hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e2ae03cc3bf896e5d4e661d346721f2b71e3200c | 2,679 | py | Python | pysnmp/ALVARION-SMI.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/ALVARION-SMI.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/ALVARION-SMI.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module ALVARION-SMI (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ALVARION-SMI
# Produced by pysmi-0.3.4 at Mon Apr 29 17:06:07 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
IpAddress, Unsigned32, ObjectIdentity, TimeTicks, MibIdentifier, Integer32, ModuleIdentity, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, enterprises, Gauge32, NotificationType, Counter32, Counter64, iso = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "Unsigned32", "ObjectIdentity", "TimeTicks", "MibIdentifier", "Integer32", "ModuleIdentity", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "enterprises", "Gauge32", "NotificationType", "Counter32", "Counter64", "iso")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
alvarionWireless = ModuleIdentity((1, 3, 6, 1, 4, 1, 12394, 1, 10))
if mibBuilder.loadTexts: alvarionWireless.setLastUpdated('200710310000Z')
if mibBuilder.loadTexts: alvarionWireless.setOrganization('Alvarion Ltd.')
alvarionProducts = ObjectIdentity((1, 3, 6, 1, 4, 1, 12394, 1, 10, 1))
if mibBuilder.loadTexts: alvarionProducts.setStatus('current')
alvarionExperiment = ObjectIdentity((1, 3, 6, 1, 4, 1, 12394, 1, 10, 3))
if mibBuilder.loadTexts: alvarionExperiment.setStatus('current')
alvarionModules = ObjectIdentity((1, 3, 6, 1, 4, 1, 12394, 1, 10, 4))
if mibBuilder.loadTexts: alvarionModules.setStatus('current')
alvarionMgmtV2 = ObjectIdentity((1, 3, 6, 1, 4, 1, 12394, 1, 10, 5))
if mibBuilder.loadTexts: alvarionMgmtV2.setStatus('current')
variation = ObjectIdentity((1, 3, 6, 1, 4, 1, 12394, 1, 10, 7))
if mibBuilder.loadTexts: variation.setStatus('current')
mibBuilder.exportSymbols("ALVARION-SMI", variation=variation, PYSNMP_MODULE_ID=alvarionWireless, alvarionProducts=alvarionProducts, alvarionWireless=alvarionWireless, alvarionModules=alvarionModules, alvarionMgmtV2=alvarionMgmtV2, alvarionExperiment=alvarionExperiment)
| 95.678571 | 505 | 0.784621 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 941 | 0.35125 |
e2ae2cf58ef27afd0ff34ba3c1c0b0bbf394a5ea | 954 | py | Python | setup.py | Sefrwahed/Alfred | 0b77ec547fb665ef29fe1a3b7e1c4ad30c31170d | [
"MIT"
] | 5 | 2016-09-06T10:29:24.000Z | 2017-02-22T14:07:48.000Z | setup.py | Sefrwahed/Alfred | 0b77ec547fb665ef29fe1a3b7e1c4ad30c31170d | [
"MIT"
] | 66 | 2016-09-06T06:40:24.000Z | 2022-03-11T23:18:05.000Z | setup.py | Sefrwahed/Alfred | 0b77ec547fb665ef29fe1a3b7e1c4ad30c31170d | [
"MIT"
] | 3 | 2016-10-06T15:17:38.000Z | 2016-12-04T13:25:53.000Z | from setuptools import setup
from setuptools import find_packages
required_packages = [
'beautifulsoup4',
'cssselect',
'duckling',
'feedfinder2',
'feedparser',
'idna',
'jieba3k',
'JPype1',
'Logbook',
'lxml',
'newspaper3k',
'nltk',
'Pillow',
'PyQt5',
'python-dateutil',
'PyYAML',
'requests',
'requests-file',
'sip',
'six',
'tldextract',
'wit', ]
def readme():
with open('README.md') as f:
return f.read()
setup(name='alfred',
version='0.1',
description='Modular Bot',
url='https://github.com/Sefrwahed/Alfred',
author='Sefrwahed',
author_email='Sefrwahed@gmail.com',
license='MIT',
packages=find_packages(),
include_package_data=True,
install_requires=required_packages,
entry_points={
'console_scripts': [
'alfred = alfred.__main__:main']},
zip_safe=False, )
| 19.875 | 48 | 0.578616 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 370 | 0.387841 |
e2af856c04d6440da75265a16b72d785a5cf429e | 2,877 | py | Python | openapi_core/schema/schemas/_format.py | gjo/openapi-core | cabe512fb043d3e95b93fbe7a20b8e2d095d7d99 | [
"BSD-3-Clause"
] | null | null | null | openapi_core/schema/schemas/_format.py | gjo/openapi-core | cabe512fb043d3e95b93fbe7a20b8e2d095d7d99 | [
"BSD-3-Clause"
] | null | null | null | openapi_core/schema/schemas/_format.py | gjo/openapi-core | cabe512fb043d3e95b93fbe7a20b8e2d095d7d99 | [
"BSD-3-Clause"
] | null | null | null | from base64 import b64encode, b64decode
import binascii
from datetime import datetime
from uuid import UUID
from jsonschema._format import FormatChecker
from jsonschema.exceptions import FormatError
from six import binary_type, text_type, integer_types
DATETIME_HAS_STRICT_RFC3339 = False
DATETIME_HAS_ISODATE = False
DATETIME_RAISES = ()
try:
import isodate
except ImportError:
pass
else:
DATETIME_HAS_ISODATE = True
DATETIME_RAISES += (ValueError, isodate.ISO8601Error)
try:
import strict_rfc3339
except ImportError:
pass
else:
DATETIME_HAS_STRICT_RFC3339 = True
DATETIME_RAISES += (ValueError, TypeError)
class StrictFormatChecker(FormatChecker):
def check(self, instance, format):
if format not in self.checkers:
raise FormatError(
"Format checker for %r format not found" % (format, ))
return super(StrictFormatChecker, self).check(
instance, format)
oas30_format_checker = StrictFormatChecker()
@oas30_format_checker.checks('int32')
def is_int32(instance):
return isinstance(instance, integer_types)
@oas30_format_checker.checks('int64')
def is_int64(instance):
return isinstance(instance, integer_types)
@oas30_format_checker.checks('float')
def is_float(instance):
return isinstance(instance, float)
@oas30_format_checker.checks('double')
def is_double(instance):
# float has double precision in Python
# It's double in CPython and Jython
return isinstance(instance, float)
@oas30_format_checker.checks('binary')
def is_binary(instance):
return isinstance(instance, binary_type)
@oas30_format_checker.checks('byte', raises=(binascii.Error, TypeError))
def is_byte(instance):
if isinstance(instance, text_type):
instance = instance.encode()
return b64encode(b64decode(instance)) == instance
@oas30_format_checker.checks("date-time", raises=DATETIME_RAISES)
def is_datetime(instance):
if isinstance(instance, binary_type):
return False
if not isinstance(instance, text_type):
return True
if DATETIME_HAS_STRICT_RFC3339:
return strict_rfc3339.validate_rfc3339(instance)
if DATETIME_HAS_ISODATE:
return isodate.parse_datetime(instance)
return True
@oas30_format_checker.checks("date", raises=ValueError)
def is_date(instance):
if isinstance(instance, binary_type):
return False
if not isinstance(instance, text_type):
return True
return datetime.strptime(instance, "%Y-%m-%d")
@oas30_format_checker.checks("uuid", raises=AttributeError)
def is_uuid(instance):
if isinstance(instance, binary_type):
return False
if not isinstance(instance, text_type):
return True
try:
uuid_obj = UUID(instance)
except ValueError:
return False
return text_type(uuid_obj) == instance
| 24.801724 | 72 | 0.73236 | 308 | 0.107056 | 0 | 0 | 1,848 | 0.642336 | 0 | 0 | 189 | 0.065693 |
e2b006944c9be3b936078a0b7fb57d47d441942b | 602 | py | Python | job-service/tests/test_database.py | anaai/anaai | fa49ba107e7c6d32aef46796452308334517a224 | [
"MIT"
] | 31 | 2022-01-26T21:47:31.000Z | 2022-03-21T12:22:39.000Z | job-service/tests/test_database.py | anaai/anaai | fa49ba107e7c6d32aef46796452308334517a224 | [
"MIT"
] | 3 | 2022-01-24T11:16:05.000Z | 2022-01-28T15:11:19.000Z | job-service/tests/test_database.py | anaai/anaai | fa49ba107e7c6d32aef46796452308334517a224 | [
"MIT"
] | 2 | 2022-02-08T12:07:55.000Z | 2022-02-08T19:45:40.000Z | import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database import Base, get_session
SQLALCHEMY_DATABASE_URL = "sqlite:///./test.db"
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
TestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
def override_get_session():
try:
session = TestingSessionLocal()
yield session
finally:
session.close()
@pytest.fixture()
def test_db():
Base.metadata.create_all(bind=engine)
yield
Base.metadata.drop_all(bind=engine)
| 21.5 | 82 | 0.775748 | 0 | 0 | 219 | 0.363787 | 118 | 0.196013 | 0 | 0 | 40 | 0.066445 |
e2b11260dba3afd786662e8f9abbc61d8cd6e587 | 783 | py | Python | holobot/extensions/admin/command_rule_manager_interface.py | rexor12/holobot | 89b7b416403d13ccfeee117ef942426b08d3651d | [
"MIT"
] | 1 | 2021-05-24T00:17:46.000Z | 2021-05-24T00:17:46.000Z | holobot/extensions/admin/command_rule_manager_interface.py | rexor12/holobot | 89b7b416403d13ccfeee117ef942426b08d3651d | [
"MIT"
] | 41 | 2021-03-24T22:50:09.000Z | 2021-12-17T12:15:13.000Z | holobot/extensions/admin/command_rule_manager_interface.py | rexor12/holobot | 89b7b416403d13ccfeee117ef942426b08d3651d | [
"MIT"
] | null | null | null | from .models import CommandRule
from typing import Optional, Tuple
class CommandRuleManagerInterface:
async def get_rules_by_server(self, server_id: str, start_offset: int, page_size: int, group: Optional[str] = None, subgroup: Optional[str] = None) -> Tuple[CommandRule, ...]:
raise NotImplementedError
async def set_rule(self, rule: CommandRule) -> int:
raise NotImplementedError
async def remove_rule(self, rule_id: int) -> None:
raise NotImplementedError
async def remove_rules_by_server(self, server_id: str) -> None:
raise NotImplementedError
async def can_execute(self, server_id: str, channel_id: str, group: Optional[str], subgroup: Optional[str], command: str) -> bool:
raise NotImplementedError
| 41.210526 | 179 | 0.713921 | 714 | 0.911877 | 0 | 0 | 0 | 0 | 639 | 0.816092 | 0 | 0 |
e2b210571dd44806899488b6c2d460b54d183385 | 349 | py | Python | authentik/lib/utils/errors.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 15 | 2020-01-05T09:09:57.000Z | 2020-11-28T05:27:39.000Z | authentik/lib/utils/errors.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 302 | 2020-01-21T08:03:59.000Z | 2020-12-04T05:04:57.000Z | authentik/lib/utils/errors.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 3 | 2020-03-04T08:21:59.000Z | 2020-08-01T20:37:18.000Z | """error utils"""
from traceback import format_tb
TRACEBACK_HEADER = "Traceback (most recent call last):\n"
def exception_to_string(exc: Exception) -> str:
"""Convert exception to string stackrace"""
# Either use passed original exception or whatever we have
return TRACEBACK_HEADER + "".join(format_tb(exc.__traceback__)) + str(exc)
| 31.727273 | 78 | 0.73639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.452722 |
e2b2765478729a29e2e169e61178953a74d6c9a5 | 967 | py | Python | atlas/foundations_sdk/src/foundations/helpers/queued.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
] | 296 | 2020-03-16T19:55:00.000Z | 2022-01-10T19:46:05.000Z | atlas/foundations_sdk/src/foundations/helpers/queued.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
] | 57 | 2020-03-17T11:15:57.000Z | 2021-07-10T14:42:27.000Z | atlas/foundations_sdk/src/foundations/helpers/queued.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
] | 38 | 2020-03-17T21:06:05.000Z | 2022-02-08T03:19:34.000Z |
_QUEUED_JOBS_KEY = 'projects:global:jobs:queued'
_ARCHIVED_JOBS_KEY = 'projects:global:jobs:archived'
def list_jobs(redis):
return {job_id.decode() for job_id in redis.smembers(_QUEUED_JOBS_KEY)}
def remove_jobs(redis, job_id_project_mapping):
for job_id, project_name in job_id_project_mapping.items():
redis.srem(_QUEUED_JOBS_KEY, job_id)
redis.srem('project:{}:jobs:queued'.format(project_name), job_id)
def job_project_names(redis, list_of_job_ids):
return {job_id: _job_project_name(redis, job_id) for job_id in list_of_job_ids}
def _job_project_name(redis, job_id):
project_name = redis.get('jobs:{}:project'.format(job_id))
if project_name:
return project_name.decode()
def add_jobs_to_archive(redis, list_of_job_ids):
for job_id in list_of_job_ids:
redis.sadd(_ARCHIVED_JOBS_KEY, job_id)
def list_archived_jobs(redis):
return {job_id.decode() for job_id in redis.smembers(_ARCHIVED_JOBS_KEY)} | 37.192308 | 83 | 0.761117 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.104447 |
e2b36ae713060194509b1b46febdcb1524cbcc54 | 2,988 | py | Python | hypersolver/src/models.py | Juju-botu/diffeqml-research | aa796c87447e5299ec4f25a07fc4d032afb1f63e | [
"Apache-2.0"
] | 49 | 2020-08-06T12:27:05.000Z | 2022-03-16T12:32:06.000Z | hypersolver/src/models.py | Juju-botu/diffeqml-research | aa796c87447e5299ec4f25a07fc4d032afb1f63e | [
"Apache-2.0"
] | null | null | null | hypersolver/src/models.py | Juju-botu/diffeqml-research | aa796c87447e5299ec4f25a07fc4d032afb1f63e | [
"Apache-2.0"
] | 6 | 2020-12-07T06:10:40.000Z | 2022-03-08T09:23:22.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch ; import torch.nn as nn
import torchdyn; from torchdyn.models import *;
def autograd_trace(x_out, x_in, **kwargs):
"""Standard brute-force means of obtaining trace of the Jacobian, O(d) calls to autograd"""
trJ = 0.
for i in range(x_in.shape[1]):
trJ += torch.autograd.grad(x_out[:, i].sum(), x_in, allow_unused=False, create_graph=True)[0][:, i]
return trJ
class CNF(nn.Module):
def __init__(self, net, trace_estimator=None, noise_dist=None):
super().__init__()
self.net = net
self.trace_estimator = trace_estimator if trace_estimator is not None else autograd_trace;
self.noise_dist, self.noise = noise_dist, None
if self.trace_estimator in REQUIRES_NOISE:
assert self.noise_dist is not None, 'This type of trace estimator requires specification of a noise distribution'
def forward(self, x):
with torch.set_grad_enabled(True):
x_in = torch.autograd.Variable(x[:,1:], requires_grad=True).to(x) # first dimension reserved to divergence propagation
# the neural network will handle the data-dynamics here
x_out = self.net(x_in)
trJ = self.trace_estimator(x_out, x_in, noise=self.noise)
return torch.cat([-trJ[:, None], x_out], 1) + 0*x # `+ 0*x` has the only purpose of connecting x[:, 0] to autograd graph
# special hypersolver version for `HyperHeun`. Can be adapted to the template with
# the appropriate tableau
class HyperHeun(nn.Module):
def __init__(self, f, solvnet):
super().__init__()
self.m = solvnet
self.norm = 1e-3
self.f = f
self.controlled = True
def forward(self, ds, dx, dx_, x, x0):
ds = ds*torch.ones(x.shape[0],1).to(x)
if self.controlled:
xout = torch.cat([x,dx,dx_,x0,ds],1)
else:
xout = torch.cat([x,dx,dx_,ds],1)
xout = self.m(xout)
return xout
def trajectory(self, x0, s_span, nn=1):
traj = []
ds = s_span[1] - s_span[0]
x = x0
for i, s in enumerate(s_span):
dx = self.f(s, x).detach()
dx_ = self.f(s+ds, x + ds*dx).detach()
traj.append(x[None])
x = x + .5*ds*(dx + dx_) + nn*(ds**3)*self(ds, dx, dx_, x, x0)
return torch.cat(traj) | 42.685714 | 140 | 0.615462 | 1,930 | 0.645917 | 0 | 0 | 0 | 0 | 0 | 0 | 997 | 0.333668 |
e2b445e024df50d0f60436d23653b554afb1890d | 4,444 | py | Python | acictf/Move ZIG/code.py | benhunter/ctf | 3de1a222ea0034ef15eb6b75585b03a6ee37ec37 | [
"MIT"
] | null | null | null | acictf/Move ZIG/code.py | benhunter/ctf | 3de1a222ea0034ef15eb6b75585b03a6ee37ec37 | [
"MIT"
] | 1 | 2022-03-31T22:44:36.000Z | 2022-03-31T22:44:36.000Z | acictf/Move ZIG/code.py | benhunter/ctf | 3de1a222ea0034ef15eb6b75585b03a6ee37ec37 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import argparse
import socket
import base64
import binascii
# 'argparse' is a very useful library for building python tools that are easy
# to use from the command line. It greatly simplifies the input validation
# and "usage" prompts which really help when trying to debug your own code.
# parser = argparse.ArgumentParser(description="Solver for 'All Your Base' challenge")
# parser.add_argument("ip", help="IP (or hostname) of remote instance")
# parser.add_argument("port", type=int, help="port for remote instance")
# args = parser.parse_args()
ip = 'challenge.acictf.com'
port = 47912
# This tells the computer that we want a new TCP "socket"
socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# This says we want to connect to the given IP and port
# socket.connect((args.ip, args.port))
socket.connect((ip, port))
# This gives us a file-like view of the connection which makes reading data
# easier since it handles the buffering of lines for you.
f = socket.makefile()
# while True:
# line = f.readline().strip()
# This iterates over data from the server a line at a time. This can cause
# some unexpected behavior like not seeing "prompts" until after you've sent
# a reply for it (for example, you won't see "answer:" for this problem).
# However, you can still send data and it will be handled correctly.
# Handle the information from the server to extact the problem and build
# the answer string.
# pass # Fill this in with your logic
# Send a response back to the server
# answer = "Clearly not the answer..."
# socket.send((answer + "\n").encode()) # The "\n" is important for the server's
# interpretation of your answer, so make
# sure there is only one sent for each
# answer.
def raw_dec(x):
e = x.encode()
b = bytes(e)
i = int.from_bytes(b, byteorder='big')
return i
def b64_dec(x):
b = base64.b64decode(x)
i = int.from_bytes(b, byteorder='big')
return i
def hex_dec(x):
# return int(binascii.unhexlify(x))
i = int(x, 16)
return i
def oct_dec(x):
d = int(x, 8)
return d
def bin_dec(x):
d = int(x, 2)
return d
def dec_raw(x):
# return str(x)
i = int(x).to_bytes(int(x).bit_length(), byteorder='big').strip(b'\x00')
return i.decode()
def dec_b64(x):
by = x.to_bytes((x.bit_length() + 7) // 8, byteorder='big').strip(b'A')
b64 = base64.b64encode(by)
return b64.decode()
def dec_hex(x):
# by = x.to_bytes(x.bit_length(), byteorder='big')
# h = binascii.hexlify(by)
h = hex(x)
return h[2:]
def dec_oct(x):
o = oct(x)
s = str(o)
return s[2:]
def dec_bin(x):
b = bin(x)
s = str(b)
return s[2:]
# def read_to_dash():
# pass
while True:
line = f.readline().strip()
if len(line) > 1 and line[0] == '-':
break
while True:
line = f.readline().strip().split()
print(line)
encode = line[0]
decode = line[2]
print(encode, decode)
src = f.readline().strip()
print(src)
# src to dec
if encode == 'raw':
dec = raw_dec(src)
elif encode == 'b64':
dec = b64_dec(src)
elif encode == 'hex':
dec = hex_dec(src)
elif encode == 'dec':
dec = int(src)
elif encode == 'oct':
dec = oct_dec(src)
elif encode == 'bin':
dec = bin_dec(src)
# dec to target
if decode == 'raw':
target = dec_raw(dec)
elif decode == 'b64':
target = dec_b64(dec)
elif decode == 'hex':
target = dec_hex(dec)
elif decode == 'dec':
target = str(dec)
elif decode == 'oct':
target = dec_oct(dec)
elif decode == 'bin':
target = dec_bin(dec)
# answer = "Clearly not the answer..."
socket.send((target + "\n").encode()) # The "\n" is important for the server's
# interpretation of your answer, so make
# sure there is only one sent for each
# answer.
line = f.readline().strip()
print(line)
line = f.readline().strip()
print(line)
line = f.readline().strip()
print(line)
if 'incorrect' in line:
print('hold up')
line = f.readline().strip()
print(line)
# ACI{for_great_justice_618c35ec}
| 27.263804 | 86 | 0.592484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,009 | 0.45207 |
e2b5a5ee8837e56f2ed115f0216ab56eb944e223 | 222 | py | Python | even-then-odd.py | Omi0604/DCU-Einstein- | b35e2657b8e27904035e881021c9bdf9e51675bb | [
"MIT"
] | null | null | null | even-then-odd.py | Omi0604/DCU-Einstein- | b35e2657b8e27904035e881021c9bdf9e51675bb | [
"MIT"
] | null | null | null | even-then-odd.py | Omi0604/DCU-Einstein- | b35e2657b8e27904035e881021c9bdf9e51675bb | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
a = []
b = []
s = input()
while s != "end":
n = int(s)
if n % 2 == 1:
a.append(n)
else:
print(n)
s = input()
i = 0
while i < len(a):
print(a[i])
i = i + 1
| 11.1 | 22 | 0.400901 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.121622 |
e2b5e75237ba1c6e2b5426eb208b1203e2003661 | 2,418 | py | Python | sensing/slam/laser_slam/script/laser_transfer_old.py | lnexenl/XTDrone | f0402d44ac3b9a8435cfc67aea769ef659892010 | [
"MIT"
] | 1 | 2021-11-25T03:32:19.000Z | 2021-11-25T03:32:19.000Z | sensing/slam/laser_slam/script/laser_transfer_old.py | lnexenl/XTDrone | f0402d44ac3b9a8435cfc67aea769ef659892010 | [
"MIT"
] | null | null | null | sensing/slam/laser_slam/script/laser_transfer_old.py | lnexenl/XTDrone | f0402d44ac3b9a8435cfc67aea769ef659892010 | [
"MIT"
] | null | null | null | import rospy
from gazebo_msgs.srv import GetModelState
from geometry_msgs.msg import PoseStamped, Pose2D
from nav_msgs.msg import Odometry
from tf2_ros import TransformListener, Buffer
import sys
vehicle_type = sys.argv[1]
vehicle_id = sys.argv[2]
laser_slam_type = sys.argv[3]
rospy.init_node(vehicle_type+vehicle_id+'_'+laser_slam_type+'_laser_transfer')
pose_pub = rospy.Publisher(vehicle_type+'_'+ vehicle_id+"/mavros/vision_pose/pose", PoseStamped, queue_size=1)
local_pose = PoseStamped()
local_pose.header.frame_id = 'map'
laser_scan = Pose2D()
def odm_groundtruth_callback(msg):
global local_pose
local_pose.header.stamp = msg.header.stamp
local_pose.pose.position.z = msg.pose.pose.position.z
def laser_scan_matcher_callback(data):
global laser_scan
laser_scan = data
def laser_scan_matcher():
global local_pose
pose2d_sub = rospy.Subscriber(vehicle_type+'_'+ vehicle_id+"/pose2D", Pose2D, laser_scan_matcher_callback,queue_size=1)
rate = rospy.Rate(100)
while True:
local_pose.header.stamp = rospy.Time.now()
local_pose.pose.position.x = laser_scan.x
local_pose.pose.position.y = laser_scan.y
quaternion = tf.transformations.quaternion_from_euler(0, 0, laser_scan.theta)
local_pose.pose.orientation.x = quaternion[0]
local_pose.pose.orientation.y = quaternion[1]
local_pose.pose.orientation.z = quaternion[2]
local_pose.pose.orientation.w = quaternion[3]
pose_pub.publish(local_pose)
rate.sleep()
def aloam():
global local_pose
rate = rospy.Rate(30)
while not rospy.is_shutdown():
try:
tfstamped = tfBuffer.lookup_transform('camera_init', 'aft_mapped', rospy.Time(0))
except:
continue
local_pose.header.stamp = rospy.Time().now()
local_pose.header.frame_id = 'map'
local_pose.pose.position = tfstamped.transform.translation
local_pose.pose.orientation = tfstamped.transform.rotation
pose_pub.publish(local_pose)
rate.sleep()
if __name__ == '__main__':
if laser_slam_type == '2d':
odom_groundtruth_sub = rospy.Subscriber('/xtdrone/'+vehicle_type+'_'+ vehicle_id+'/ground_truth/odom', Odometry, odm_groundtruth_callback)
laser_scan_matcher()
elif laser_slam_type == '3d':
aloam()
else:
print('input error')
| 35.043478 | 146 | 0.702233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.066584 |
e2b8694c4053862ecbc9d41a424c50a797c4b8a1 | 5,309 | py | Python | robot-pushing/push_env.py | kvablack/robosuite | 1c5a2723fbe9d39eed5f517ce885d59c40dc9b14 | [
"MIT"
] | null | null | null | robot-pushing/push_env.py | kvablack/robosuite | 1c5a2723fbe9d39eed5f517ce885d59c40dc9b14 | [
"MIT"
] | null | null | null | robot-pushing/push_env.py | kvablack/robosuite | 1c5a2723fbe9d39eed5f517ce885d59c40dc9b14 | [
"MIT"
] | null | null | null | import itertools
import os
import shutil
import numpy as np
import gym
from gym import spaces
import robosuite
from robosuite.controllers import load_controller_config
import robosuite.utils.macros as macros
import imageio, tqdm
from her import HERReplayBuffer
from tianshou.data import Batch
macros.SIMULATION_TIMESTEP = 0.02
np.set_printoptions(suppress=True)
class PushingEnvironment(gym.Env):
def __init__(self, horizon, control_freq, num_obstacles=0, renderable=False):
self.num_obstacles = num_obstacles
self.renderable = renderable
self.env = robosuite.make(
"Push",
robots=["Panda"],
controller_configs=load_controller_config(default_controller="OSC_POSE"),
has_renderer=False,
has_offscreen_renderer=renderable,
render_visual_mesh=renderable,
render_collision_mesh=False,
camera_names=["agentview"] if renderable else None,
control_freq=control_freq,
horizon=horizon,
use_object_obs=True,
use_camera_obs=renderable,
hard_reset=False,
num_obstacles=num_obstacles,
)
low, high = self.env.action_spec
self.action_space = spaces.Box(low=low[:3], high=high[:3])
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=[12 + 6 * num_obstacles])
self.curr_obs = None
self.step_num = None
def seed(self, seed=None):
if seed is not None:
np.random.seed(seed)
self.action_space.seed(seed)
def _get_flat_obs(self, obs):
return np.concatenate([
obs["robot0_eef_pos"],
obs["gripper_to_cube_pos"],
obs["gripper_to_goal_pos"],
obs["cube_to_goal_pos"],
] + list(itertools.chain.from_iterable(zip(
[obs[f"gripper_to_obstacle{i}_pos"] for i in range(self.num_obstacles)],
[obs[f"cube_to_obstacle{i}_pos"] for i in range(self.num_obstacles)]
))))
def reset(self):
self.curr_obs = self.env.reset()
self.step_num = 0
return self._get_flat_obs(self.curr_obs)
def step(self, action):
next_obs, reward, done, info = self.env.step(np.concatenate([action, [0, 0, 0]]))
info["TimeLimit.truncated"] = done
return_obs = self._get_flat_obs(next_obs)
if self.renderable:
info["image"] = self.curr_obs["agentview_image"][::-1]
info["step"] = self.step_num
if done:
info["final_image"] = next_obs["agentview_image"][::-1]
self.curr_obs = next_obs
self.step_num += 1
return return_obs, reward, done, info
def her(self, obs, obs_next):
"""
Takes a list of observations (and next observations) from an entire episode and returns
the HER-modified version of the episode in the form of 4 lists: (obs, obs_next, reward, done).
"""
obs = np.array(obs)
obs_next = np.array(obs_next)
# final cube position
fake_goal = obs_next[-1, :3] - obs_next[-1, 3:6]
# gripper to goal pos
obs[:, 6:9] = obs[:, :3] - fake_goal
obs_next[:, 6:9] = obs_next[:, :3] - fake_goal
# cube to goal pos
obs[:, 9:] = (obs[:, :3] - obs[:, 3:6]) - fake_goal
obs_next[:, 9:] = (obs_next[:, :3] - obs_next[:, 3:6]) - fake_goal
rewards = [self.env.compute_reward(fake_goal, on[:3] - on[3:6], {}) for on in obs_next]
# rewards = []
# for on in obs_next:
# reward = self.compute_reward(fake_goal, on[:3] - on[3:6], {})
# rewards.append(reward)
# if reward == 0:
# break
dones = np.full_like(rewards, False, dtype=bool)
dones[-1] = True
infos = {
"TimeLimit.truncated": dones.copy()
}
return obs[:len(rewards)], obs_next[:len(rewards)], np.array(rewards), dones, infos
def render(self, mode="human"):
assert self.renderable
return self.curr_obs["agentview_image"][::-1]
if __name__ == "__main__":
shutil.rmtree("render")
os.makedirs("render")
env = PushingEnvironment(1, 2, 10, renderable=True)
env.seed(0)
# buf = HERReplayBuffer(env, total_size=20, buffer_num=1)
obs = env.reset()
# for i in range(3):
# buf.add(Batch(
# obs=[obs],
# obs_next=[obs],
# act=[[0, 0, 0]],
# rew=[-100],
# done=[False if i < 2 else True]
# ))
# actions = [[0, 0, 1]] * 2 + [[0, -1, 0]] * 2 + [[1, 0, -1]] * 2 + [[0, 1, 0]] * 3\
# + [[0, 0, 0]] * 2 + [[1, 0, 0]] * 2 + [[0, 1, -1]] + [[-1, 0, 0]] * 4
for i in tqdm.tqdm(range(300)):
# print(env.env.robots[0]._joint_positions)
img = env.render()
imageio.imwrite(f"render/{i:03}.png", img)
obs_next, rew, done, _ = env.step(env.action_space.sample())
# if i == 17:
# done = True
# buf.add(Batch(
# obs=[obs],
# obs_next=[obs_next],
# act=[actions[i]],
# rew=[rew],
# done=[done]
# ))
obs = obs_next
if done:
# env.seed(i // 30 + 10)
env.reset()
| 34.927632 | 102 | 0.563195 | 3,746 | 0.705594 | 0 | 0 | 0 | 0 | 0 | 0 | 1,389 | 0.261631 |
e2b86d9c3452142df7bd7020bf19a4c57caca2de | 3,520 | py | Python | test/weak_agents_tests.py | JakubPetriska/poker-agent-kit | 12c28711c91447c708719454d1fbd224fa03189e | [
"MIT"
] | 19 | 2018-09-21T15:27:09.000Z | 2022-03-09T03:55:21.000Z | test/weak_agents_tests.py | JakubPetriska/poker-agent-kit | 12c28711c91447c708719454d1fbd224fa03189e | [
"MIT"
] | 6 | 2018-05-09T17:09:58.000Z | 2019-07-09T15:15:05.000Z | test/weak_agents_tests.py | JakubPetriska/poker-cfr | 12c28711c91447c708719454d1fbd224fa03189e | [
"MIT"
] | 2 | 2018-09-11T02:49:57.000Z | 2018-11-17T00:29:38.000Z | import unittest
import acpc_python_client as acpc
from tools.constants import Action
from weak_agents.action_tilted_agent import create_agent_strategy, create_agent_strategy_from_trained_strategy, TiltType
from tools.io_util import read_strategy_from_file
from evaluation.exploitability import Exploitability
from tools.game_utils import is_strategies_equal, is_correct_strategy
KUHN_POKER_GAME_FILE_PATH = 'games/kuhn.limit.2p.game'
LEDUC_POKER_GAME_FILE_PATH = 'games/leduc.limit.2p.game'
class WeakAgentsTests(unittest.TestCase):
def test_kuhn_action_tilted_agent_not_crashing(self):
strategy = create_agent_strategy(
KUHN_POKER_GAME_FILE_PATH,
Action.RAISE,
TiltType.ADD,
0.2,
cfr_iterations=20,
cfr_weight_delay=2,
show_progress=False)
self.assertTrue(is_correct_strategy(strategy))
def test_leduc_add_action_tilted_agent_not_crashing(self):
strategy = create_agent_strategy(
LEDUC_POKER_GAME_FILE_PATH,
Action.FOLD,
TiltType.ADD,
0.1,
cfr_iterations=5,
cfr_weight_delay=2,
show_progress=False)
self.assertTrue(is_correct_strategy(strategy))
def test_leduc_multiply_action_tilted_agent_not_crashing(self):
strategy = create_agent_strategy(
LEDUC_POKER_GAME_FILE_PATH,
Action.FOLD,
TiltType.MULTIPLY,
0.1,
cfr_iterations=5,
cfr_weight_delay=2,
show_progress=False)
self.assertTrue(is_correct_strategy(strategy))
def test_kuhn_action_tilted_agent(self):
kuhn_equilibrium, _ = read_strategy_from_file(
KUHN_POKER_GAME_FILE_PATH,
'strategies/kuhn.limit.2p-equilibrium.strategy')
game = acpc.read_game_file(KUHN_POKER_GAME_FILE_PATH)
exploitability = Exploitability(game)
tilted_agent_strategy = create_agent_strategy_from_trained_strategy(
KUHN_POKER_GAME_FILE_PATH,
kuhn_equilibrium,
Action.RAISE,
TiltType.ADD,
0.2)
self.assertTrue(is_correct_strategy(tilted_agent_strategy))
self.assertTrue(not is_strategies_equal(kuhn_equilibrium, tilted_agent_strategy))
equilibrium_exploitability = exploitability.evaluate(kuhn_equilibrium)
raise_add_tilted_exploitability = exploitability.evaluate(tilted_agent_strategy)
self.assertTrue(raise_add_tilted_exploitability > equilibrium_exploitability)
def test_kuhn_action_minus_tilted_agent(self):
kuhn_equilibrium, _ = read_strategy_from_file(
KUHN_POKER_GAME_FILE_PATH,
'strategies/kuhn.limit.2p-equilibrium.strategy')
game = acpc.read_game_file(KUHN_POKER_GAME_FILE_PATH)
exploitability = Exploitability(game)
tilted_agent_strategy = create_agent_strategy_from_trained_strategy(
KUHN_POKER_GAME_FILE_PATH,
kuhn_equilibrium,
Action.CALL,
TiltType.ADD,
-0.5)
self.assertTrue(is_correct_strategy(tilted_agent_strategy))
self.assertTrue(not is_strategies_equal(kuhn_equilibrium, tilted_agent_strategy))
equilibrium_exploitability = exploitability.evaluate(kuhn_equilibrium)
raise_add_tilted_exploitability = exploitability.evaluate(tilted_agent_strategy)
self.assertTrue(raise_add_tilted_exploitability > equilibrium_exploitability)
| 38.681319 | 120 | 0.715909 | 3,022 | 0.858523 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.041761 |
2c33d96452890bf440e51fd5374dc9777c10acd9 | 870 | py | Python | insert_default_data.py | daichi-yoshikawa/flask-boilerplate | 2a136eb713a698955dc78ce07364ac333806e6da | [
"MIT"
] | 1 | 2021-01-04T21:25:24.000Z | 2021-01-04T21:25:24.000Z | insert_default_data.py | daichi-yoshikawa/flask-boilerplate | 2a136eb713a698955dc78ce07364ac333806e6da | [
"MIT"
] | null | null | null | insert_default_data.py | daichi-yoshikawa/flask-boilerplate | 2a136eb713a698955dc78ce07364ac333806e6da | [
"MIT"
] | null | null | null | from app import create_app
from app.models import db
from app.models.role import Role
app = create_app()
app.app_context().push()
DefaultRoles = [
{
'name': 'user',
},
{
'name': 'admin',
},
]
def insert_default_roles():
try:
if Role.query.count() == 0:
print('roles table is empty. Insert default roles.')
for i, role in enumerate(DefaultRoles):
print(f"role:{role['name']} is inserted.")
role['id'] = i + 1
row = Role(**role)
db.session.add(row)
db.session.commit()
print('Default roles are inserted.')
else:
print('roles table has some data already. Skip inserting data.')
except Exception as e:
print(f'Error occurred. {type(e)}: {str(e)}\nExecute rollback.')
db.session.rollback()
print('Rollback done.')
if __name__ == '__main__':
insert_default_roles()
| 21.75 | 70 | 0.618391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 278 | 0.31954 |
2c341aa50389e1550ffc2517e153f369b8431dd7 | 381 | py | Python | 15-more-types/mysum.py | SeirousLee/example-code-2e | 81ec1669a4b8fd098db44a78a3d551287eec7bc9 | [
"MIT"
] | 990 | 2019-03-21T21:17:34.000Z | 2022-03-31T00:55:07.000Z | 15-more-types/mysum.py | FerMatPy/example-code-2e | 980f75032685921e08d5200513a261e83ef5f858 | [
"MIT"
] | 17 | 2019-12-18T18:00:05.000Z | 2022-01-12T14:23:47.000Z | 15-more-types/mysum.py | FerMatPy/example-code-2e | 980f75032685921e08d5200513a261e83ef5f858 | [
"MIT"
] | 276 | 2019-04-06T12:32:00.000Z | 2022-03-29T11:50:47.000Z | import functools
import operator
from collections.abc import Iterable
from typing import overload, Union, TypeVar
T = TypeVar('T')
S = TypeVar('S') # <1>
@overload
def sum(it: Iterable[T]) -> Union[T, int]: ... # <2>
@overload
def sum(it: Iterable[T], /, start: S) -> Union[T, S]: ... # <3>
def sum(it, /, start=0): # <4>
return functools.reduce(operator.add, it, start)
| 25.4 | 64 | 0.64042 | 0 | 0 | 0 | 0 | 137 | 0.35958 | 0 | 0 | 26 | 0.068241 |
2c34649f2cd3a47740d5b157c39c34a7ddd8f853 | 24,987 | py | Python | chess/board.py | quadratic-bit/pygame-chess | 83e8329a0e294008191770e7ddace52572bf7460 | [
"MIT"
] | 3 | 2021-12-22T08:28:21.000Z | 2022-01-05T03:44:50.000Z | chess/board.py | quadratic-bit/pygame-chess | 83e8329a0e294008191770e7ddace52572bf7460 | [
"MIT"
] | null | null | null | chess/board.py | quadratic-bit/pygame-chess | 83e8329a0e294008191770e7ddace52572bf7460 | [
"MIT"
] | null | null | null | from __future__ import annotations
import math
from collections import deque
from typing import Optional, Callable
import numpy as np
import pygame
from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, \
PIECE_INDICES, init_zobrist, MoveFlags, GameState
from chess.utils import load_image, load_font
class Chessboard:
"""Chessboard interface (8x8 field)"""
def __init__(self, light_colour="#F0D9B5", dark_colour="#B58863") -> None:
# Board itself
self._board = np.array([Piece.empty()] * 64)
# Active colour
self._active_colour = PieceColour.White
# Castling rights
self._castling_rights = {
PieceColour.White: {
CastlingType.KingSide: False,
CastlingType.QueenSide: False
},
PieceColour.Black: {
CastlingType.KingSide: False,
CastlingType.QueenSide: False
}
}
# Store piece types as strings
self._get_piece_str = {PieceType.Pawn: "pawn",
PieceType.Knight: "knight",
PieceType.Bishop: "bishop",
PieceType.Rook: "rook",
PieceType.Queen: "queen",
PieceType.King: "king"}
# Store piece move validators
self._get_validator: dict[
PieceType, Callable[[int, int, int, int], bool]] \
= {PieceType.Pawn: self._can_pawn_make,
PieceType.Knight: self._can_knight_make,
PieceType.Bishop: self._can_bishop_make,
PieceType.Rook: self._can_rook_make,
PieceType.Queen: self._can_queen_make,
PieceType.King: self._can_king_make}
# En Passant target
self._en_passant_target: Optional[int] = None
# Half-move clock
self._halfmoves = 0
# Init zobrist hash
self._z_table = init_zobrist()
# Board appearance
self._light_colour = pygame.Color(light_colour)
self._dark_colour = pygame.Color(dark_colour)
self._light_complementary = pygame.Color("#DBAB84")
self._dark_complementary = pygame.Color("#DBC095")
self._move_colour = pygame.Color("#8D80AD")
self._bg_colour = pygame.Color("#443742")
self._side = 100 # px
self._font_size = 45
self._font_gap = 15
self._font = load_font("ubuntumono/UbuntuMono-R.ttf", self._font_size)
self._font_colour = pygame.Color("white")
@property
def board(self) -> np.ndarray:
return self._board
@property
def halfmoves(self) -> int:
return self._halfmoves
@property
def active_colour(self) -> PieceColour:
return self._active_colour
@property
def passive_colour(self) -> PieceColour:
return PieceColour.White if self._active_colour == PieceColour.Black else PieceColour.Black
def hash(self) -> int:
h = 0
for i in range(64):
piece = self._board[i]
if piece.Type != PieceType.Empty:
j = PIECE_INDICES[piece.Type.value | piece.Colour.value]
h ^= self._z_table[i][j]
return h
def set_colours(self, light_colour: str, dark_colour: str,
light_complementary: str, dark_complementary: str) -> None:
self._light_colour = pygame.Color(light_colour)
self._dark_colour = pygame.Color(dark_colour)
self._light_complementary = pygame.Color(light_complementary)
self._dark_complementary = pygame.Color(dark_complementary)
def render(self, screen: pygame.Surface,
last_move=None, skip=None, pos=None, game_info=None) -> None:
"""Render chessboard"""
if skip is not None and pos is None:
raise ValueError("skip is not None but pos is None")
screen.fill(self._bg_colour)
group = pygame.sprite.Group()
grabbed_data = None
skip: Optional[tuple[int]]
can_move_now = None if skip is None else self._get_all_piece_moves(skip[0] + skip[1] * 8)
for i, piece in enumerate(self._board):
x, y = i % 8, i // 8
if pos is not None and i in can_move_now:
pygame.draw.rect(screen, self._move_colour,
(x * self._side, y * self._side,
self._side, self._side))
elif last_move is not None and last_move.From == i:
pygame.draw.rect(screen, self._light_complementary,
(x * self._side, y * self._side,
self._side, self._side))
elif last_move is not None and last_move.To == i or (x, y) == skip:
pygame.draw.rect(screen, self._dark_complementary,
(x * self._side, y * self._side,
self._side, self._side))
else:
if (x + y) % 2 == 0:
colour = self._light_colour
else:
colour = self._dark_colour
pygame.draw.rect(screen, colour,
(x * self._side, y * self._side,
self._side, self._side))
if piece.Type == PieceType.Empty:
continue
elif (x, y) == skip:
grabbed_data = f"{self._get_piece_str[piece.Type]}_" \
f"{'w' if piece.Colour == PieceColour.White else 'b'}.png", i, group
else:
PieceSprite(
f"{self._get_piece_str[piece.Type]}_"
f"{'w' if piece.Colour == PieceColour.White else 'b'}"
f".png", i, group)
if grabbed_data is not None:
grabbed_piece = PieceSprite(*grabbed_data)
grabbed_piece.rect.x = pos[0] - 50 # type: ignore
grabbed_piece.rect.y = pos[1] - 50 # type: ignore
group.draw(screen)
text = ["Ход " + ("белых"
if self._active_colour == PieceColour.White
else "чёрных")]
if game_info is not None:
text.extend([f"Оценка: {game_info[0]}",
f"Позиций: {game_info[2]}",
f"Глубина: {game_info[3]}",
f"Время: {game_info[1]}с"])
line_pos = (screen.get_rect().h -
len(text) * (self._font_size + self._font_gap) -
self._font_gap) // 2
for line in text:
line_rendered = self._font.render(line, True, self._font_colour)
l_rect = line_rendered.get_rect()
screen.blit(line_rendered, (800 + (400 - l_rect.w) // 2, line_pos))
line_pos += self._font_size + self._font_gap
def at(self, x: int, y: int) -> Piece:
"""Get piece from position on the board"""
if 0 <= x <= 7 and 0 <= y <= 7:
return self._board[x + y * 8]
return Piece.empty()
def toggle_state(self) -> GameState:
"""Return game state after active colour move"""
other_colour = PieceColour.Black \
if self._active_colour == PieceColour.White \
else PieceColour.White
self._active_colour = other_colour
if self.get_all_moves(other_colour):
return GameState.Continue
elif self.king_is_safe(other_colour):
return GameState.Stalemate
else:
return GameState.Checkmate
def _force_can_make(self, move: Move) -> Optional[Move]:
"""
Check if the move is correct with adding corresponding flags
(!) Without checking king safety and turn order
"""
# Can't make incorrect move
if move.Captured != self._board[move.To]:
return None
this_piece: Piece = self._board[move.From]
other_piece: Piece = self._board[move.To]
# Can't make move w/o piece itself
if this_piece.Type == PieceType.Empty:
return None
# Can't eat pieces of your colour
if other_piece.Type != PieceType.Empty and \
other_piece.Colour == this_piece.Colour:
return None
# Resolving piece xy coordinates to calculate move possibility
y1, y2 = move.From // 8, move.To // 8
x1, x2 = move.From % 8, move.To % 8
# Castling
if this_piece.Type == PieceType.King and \
y1 == y2 and abs(x1 - x2) == 2 \
and move.Captured == Piece.empty():
castling = CastlingType.QueenSide if x1 - x2 == 2 \
else CastlingType.KingSide
if castling == CastlingType.QueenSide and (
self._board[move.To - 1] != Piece.empty() or
self._board[move.From - 1] != Piece.empty() or
self._board[move.From - 2] != Piece.empty()):
return None
elif castling == CastlingType.KingSide and (
self._board[move.From + 1] != Piece.empty() or
self._board[move.From + 2] != Piece.empty()):
return None
if self._castling_rights[this_piece.Colour][castling]:
lost_castling = {castling}
other_side = CastlingType.KingSide \
if castling == CastlingType.QueenSide \
else CastlingType.QueenSide
if self._castling_rights[this_piece.Colour][other_side]:
lost_castling.add(other_side)
move.Flags = MoveFlags(Castling=castling,
LoseCastling=lost_castling)
else:
return None
elif this_piece.Type == PieceType.King:
# Losing castling rights after king move
lost_castling = set()
if self._castling_rights[this_piece.Colour][CastlingType.KingSide]:
lost_castling.add(CastlingType.KingSide)
if self._castling_rights[this_piece.Colour][CastlingType.QueenSide]:
lost_castling.add(CastlingType.QueenSide)
move.Flags = MoveFlags(LoseCastling=lost_castling)
elif this_piece.Type == PieceType.Rook:
# Losing castling rights after rook move
if x1 == 0 and self._castling_rights[this_piece.Colour][CastlingType.QueenSide]:
move.Flags = MoveFlags(LoseCastling={CastlingType.QueenSide})
elif x1 == 7 and self._castling_rights[this_piece.Colour][CastlingType.KingSide]:
move.Flags = MoveFlags(LoseCastling={CastlingType.KingSide})
elif this_piece.Type == PieceType.Pawn and 0 <= move.To <= 7:
move.Flags = MoveFlags(PawnPromotion=PieceType.Queen)
if self._get_validator[this_piece.Type](x1, y1, x2, y2):
return move
return None
def can_make(self, move: Move) -> Optional[Move]:
"""Check if the move is correct"""
# Checking basic move correctness
completed_move = self._force_can_make(move)
if completed_move is not None:
# Can't capture the king
if self._board[move.To].Type == PieceType.King:
return None
# Checking king safety
self.make_move(move)
safety = self.king_is_safe(self._board[move.To].Colour)
self.unmake_move(move)
return completed_move if safety else None
return None
def make_move(self, move: Move) -> None:
"""
Make move on the board
Use board.make_move() to check if move is correct
"""
# Removing castling rights
if move.Flags.LoseCastling is not None:
this_colour = self._board[move.From].Colour
for castling in move.Flags.LoseCastling:
self._castling_rights[this_colour][castling] = False
# Moving piece
self._halfmoves += 1
self._board[move.To] = self._board[move.From]
self._board[move.From] = Piece.empty()
if move.Flags.PawnPromotion is not None:
self._board[move.To] = Piece(move.Flags.PawnPromotion,
self._board[move.To].Colour)
# Doing castling
if move.Flags.Castling is not None:
if move.Flags.Castling == CastlingType.KingSide:
self._board[move.From + 1] = self._board[move.To + 1]
self._board[move.To + 1] = Piece.empty()
else:
self._board[move.From - 1] = self._board[move.To - 2]
self._board[move.To - 2] = Piece.empty()
def unmake_move(self, move: Move) -> None:
"""Unmake move on the board (no additional checking)"""
# Returning castling rights
if move.Flags.LoseCastling is not None:
this_colour = self._board[move.To].Colour
for castling in move.Flags.LoseCastling:
self._castling_rights[this_colour][castling] = True
# Unmoving piece
self._halfmoves -= 1
self._board[move.From] = self._board[move.To]
self._board[move.To] = move.Captured
# Demoting pawn
if move.Flags.PawnPromotion is not None:
self._board[move.From] = Piece(PieceType.Pawn,
self._board[move.From].Colour)
# Undoing castling
if move.Flags.Castling is not None:
if move.Flags.Castling == CastlingType.KingSide:
self._board[move.To + 1] = self._board[move.From + 1]
self._board[move.From + 1] = Piece.empty()
else:
self._board[move.To - 2] = self._board[move.From - 1]
self._board[move.From - 1] = Piece.empty()
def get_all_moves(self, colour: PieceColour, no_castling=False) -> deque[Move]:
moves: deque[Move] = deque()
for i, piece_from in enumerate(self._board):
if piece_from.Type == PieceType.Empty or \
piece_from.Colour != colour:
continue
for j, piece_to in enumerate(self._board):
move = self.can_make(Move(i, j, piece_to))
if move is not None and (not no_castling or move.Flags.Castling is None):
moves.append(move)
return moves
def _get_all_piece_moves(self, pos: int) -> deque[int]:
moves: deque[int] = deque()
for i, piece_to in enumerate(self._board):
move = self.can_make(Move(pos, i, piece_to))
if move is not None:
moves.append(move.To)
return moves
def king_is_safe(self, colour: PieceColour) -> bool:
"""Check if king is safe on current board state"""
king_pos = np.where(self._board == Piece(PieceType.King, colour))[0][0]
king_x, king_y = king_pos % 8, king_pos // 8
right_side = range(king_x + 1, 8)
left_side = range(king_x - 1, -1, -1)
bottom_side = range(king_y + 1, 8)
top_side = range(king_y - 1, -1, -1)
o_colour = PieceColour.White if \
colour == PieceColour.Black else PieceColour.Black
o_pawn = Piece(PieceType.Pawn, o_colour)
o_knight = Piece(PieceType.Knight, o_colour)
o_bishop = Piece(PieceType.Bishop, o_colour)
o_rook = Piece(PieceType.Rook, o_colour)
o_queen = Piece(PieceType.Queen, o_colour)
o_king = Piece(PieceType.King, o_colour)
# Horizontal and vertical
def _line(iter_side: range, const_x: bool) -> bool:
for component in iter_side:
attacking_piece = self.at(king_x, component) \
if const_x \
else self.at(component, king_y)
if attacking_piece.Type != PieceType.Empty:
if attacking_piece == o_rook or \
attacking_piece == o_queen:
return True
return False
return False
if _line(right_side, False) or _line(left_side, False) or \
_line(top_side, True) or _line(bottom_side, True):
return False
# All diagonals
def _diagonal(iter_side_x: range, iter_side_y: range) -> bool:
for x, y in zip(iter_side_x, iter_side_y):
attacking_piece = self.at(x, y)
if attacking_piece.Type != PieceType.Empty:
if attacking_piece == o_bishop or \
attacking_piece == o_queen:
return True
return False
return False
if _diagonal(right_side, bottom_side) or \
_diagonal(left_side, bottom_side) or \
_diagonal(right_side, top_side) or \
_diagonal(left_side, top_side):
return False
# Pawns
sign_ = -1 if colour == PieceColour.White else 1
if self.at(king_x + 1, king_y + sign_) == o_pawn or \
self.at(king_x - 1, king_y + sign_) == o_pawn:
return False
# Knight
if self.at(king_x + 1, king_y + 2) == o_knight or \
self.at(king_x - 1, king_y + 2) == o_knight or \
self.at(king_x + 2, king_y + 1) == o_knight or \
self.at(king_x - 2, king_y + 1) == o_knight or \
self.at(king_x + 1, king_y - 2) == o_knight or \
self.at(king_x - 1, king_y - 2) == o_knight or \
self.at(king_x + 2, king_y - 1) == o_knight or \
self.at(king_x - 2, king_y - 1) == o_knight:
return False
# King
opponent_king_pos = np.where(self._board == o_king)[0][0]
if self._can_king_make(opponent_king_pos % 8,
opponent_king_pos // 8,
king_x, king_y):
return False
return True
def _can_pawn_make(self, x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if pawn can make move"""
direction = -1 if \
self._board[y1 * 8 + x1].Colour == PieceColour.White \
else 1
to_capture = self._board[y2 * 8 + x2].Type != PieceType.Empty
dx = abs(x2 - x1)
if y2 - y1 == direction and \
((dx == 1 and to_capture) or (dx == 0 and not to_capture)):
return True
return (not to_capture and
(y1 == 1 or y1 == 6) and
y2 - y1 == direction * 2 and
dx == 0 and self._board[y1 * 8 + x1 + direction * 8].Type ==
PieceType.Empty)
@staticmethod
def _can_knight_make(x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if knight can make move"""
dx, dy = abs(x2 - x1), abs(y2 - y1)
return dx == 1 and dy == 2 or dx == 2 and dy == 1
def _can_bishop_make(self, x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if bishop can make move"""
return (abs(x1 - x2) == abs(y1 - y2)) and self._diagonal_is_free(
x1, y1, x2, y2)
def _can_rook_make(self, x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if rook can make move"""
return self._horizontal_is_free(x1, y1, x2, y2) \
if y1 == y2 else self._vertical_is_free(x1, y1, x2, y2) \
if x1 == x2 else False
def _can_queen_make(self, x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if queen can make move"""
return \
self._can_bishop_make(x1, y1, x2, y2) or \
self._can_rook_make(x1, y1, x2, y2)
@staticmethod
def _can_king_make(x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if king can make move"""
return (abs(x2 - x1) < 2 and abs(y2 - y1) < 2) or \
(abs(x1 - x2) == 2 and y1 == y2)
def _diagonal_is_free(self, x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if diagonal is free (not included end points)"""
sign_x = int(math.copysign(1, x2 - x1))
sign_y = int(math.copysign(1, y2 - y1))
for x, y in zip(range(x1 + sign_x, x2, sign_x),
range(y1 + sign_y, y2, sign_y)):
if self._board[y * 8 + x].Type != PieceType.Empty:
return False
return True
def _horizontal_is_free(self, x1: int, y1: int, x2: int, _: int) -> bool:
"""Check if horizontal is free (not included end points)"""
sign = int(math.copysign(1, x2 - x1))
for x in range(x1 + sign, x2, sign):
if self._board[y1 * 8 + x].Type != PieceType.Empty:
return False
return True
def _vertical_is_free(self, x1: int, y1: int, _: int, y2: int) -> bool:
"""Check if vertical is free (not included end points)"""
sign = int(math.copysign(1, y2 - y1))
for y in range(y1 + sign, y2, sign):
if self._board[y * 8 + x1].Type != PieceType.Empty:
return False
return True
@classmethod
def _parse_fen(cls, fen_string: str) -> Chessboard:
"""
Parse FEN string,
use Chessboard.from_fen() instead
"""
# Setup
error_info = f"Invalid FEN string: {fen_string}"
tmp_board = cls()
fen_dict = {"p": PieceType.Pawn,
"n": PieceType.Knight,
"b": PieceType.Bishop,
"r": PieceType.Rook,
"q": PieceType.Queen,
"k": PieceType.King}
fields = fen_string.split()
assert len(fields) == 6, error_info
tmp_position = 0
# Parse First field (Piece Placement)
for sym in fields[0]:
if sym == "/":
assert tmp_position % 8 == 0, error_info
continue
if sym.isdigit():
tmp_position += int(sym)
assert tmp_position < 65, error_info
continue
assert sym.lower() in fen_dict, error_info
clr = PieceColour.White if sym.isupper() else PieceColour.Black
type_ = fen_dict[sym.lower()]
tmp_board._board[tmp_position] = Piece(type_, clr)
tmp_position += 1
assert tmp_position == 64, error_info
# Parse Second Field (Active Color)
if fields[1] == "b":
tmp_board._active_colour = PieceColour.Black
elif fields[1] == "w":
tmp_board._active_colour = PieceColour.White
else:
assert False, error_info
# Parse Third field (Castling Rights)
if fields[2] != "-":
for castling in fields[2]:
if castling.lower() == "q":
tmp_board._castling_rights[
PieceColour.White if castling.isupper()
else PieceColour.Black][CastlingType.QueenSide] = True
elif castling.lower() == "k":
tmp_board._castling_rights[
PieceColour.White if castling.isupper()
else PieceColour.Black][CastlingType.KingSide] = True
else:
assert False, error_info
# Parse Fourth field (Possible En Passant Targets)
alg_cell = fields[3]
if alg_cell != "-":
assert len(alg_cell) == 2, error_info
assert 96 < ord(alg_cell[0]) < 105, error_info
assert alg_cell[1].isdigit() and 0 < int(alg_cell[1]) < 9
tmp_board._en_passant_target = int(
(8 - int(alg_cell[1])) * 8 + ord(alg_cell[0]) - 97)
# Parse Fifth field (Full-move Number)
assert fields[4].isnumeric()
# Parse Sixth field (Half-move Clock)
assert fields[5].isnumeric() and int(fields[5]) >= 0, error_info
tmp_board._halfmoves = int(fields[5])
return tmp_board
@classmethod
def from_fen(cls, fen_string: str) -> Chessboard:
"""Create Chessboard using FEN"""
try:
return cls._parse_fen(fen_string)
except AssertionError as e:
raise ValueError(str(e))
@classmethod
def from_state(cls, state: np.ndarray) -> Chessboard:
"""Create Chessboard using state"""
tmp_board = cls()
tmp_board._board = state
return tmp_board
class PieceSprite(pygame.sprite.Sprite):
"""Piece class for drawing on a board"""
def __init__(self, sprite_img: str, pos: int,
*groups: pygame.sprite.AbstractGroup):
super().__init__(*groups)
self.image = load_image(sprite_img)
self.rect = self.image.get_rect()
self.move_sprite(pos)
def move_sprite(self, position: int) -> None:
self.rect.x = position % 8 * 100 # type: ignore
self.rect.y = position // 8 * 100 # type: ignore
| 43.230104 | 99 | 0.551407 | 24,693 | 0.986654 | 0 | 0 | 4,150 | 0.165821 | 0 | 0 | 2,693 | 0.107604 |
2c34aaffba06e447010de1f928152d6ddfcf4138 | 604 | py | Python | dataset.py | HimanchalChandra/visual-relationship-detection | 74922fbb8a3dc1a15b539a7178acb48256f3ad0c | [
"Apache-2.0"
] | 2 | 2021-04-16T08:33:24.000Z | 2021-10-15T12:21:53.000Z | dataset.py | HimanchalChandra/visual-relationship-detection | 74922fbb8a3dc1a15b539a7178acb48256f3ad0c | [
"Apache-2.0"
] | null | null | null | dataset.py | HimanchalChandra/visual-relationship-detection | 74922fbb8a3dc1a15b539a7178acb48256f3ad0c | [
"Apache-2.0"
] | null | null | null | from datasets.vrd import VrdDataset
def get_dataset(opt, type, transform):
assert opt.dataset in ['vrd', 'visual_genome']
if opt.dataset == 'vrd':
dataset = VrdDataset(opt.dataset_path, opt.num_classes, type, transform)
# elif opt.dataset == 'activitynet':
# training_data = ActivityNet(
# opt.video_path,
# opt.annotation_path,
# 'training',
# False,
# spatial_transform=spatial_transform,
# temporal_transform=temporal_transform,
# target_transform=target_transform)
return dataset
| 30.2 | 80 | 0.624172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 325 | 0.538079 |
2c354ca792cf2e4db55a4069cfe0952baf84ba72 | 4,398 | py | Python | conanfile.py | danimtb/conan-msys_installer | d5f9c1cbe6ef71035e25a26414858b55eb2011ff | [
"MIT"
] | null | null | null | conanfile.py | danimtb/conan-msys_installer | d5f9c1cbe6ef71035e25a26414858b55eb2011ff | [
"MIT"
] | null | null | null | conanfile.py | danimtb/conan-msys_installer | d5f9c1cbe6ef71035e25a26414858b55eb2011ff | [
"MIT"
] | null | null | null | from conans import ConanFile, tools
import os
class MsysBaseInstallerConan(ConanFile):
name = "msys-base_installer"
version = "2013072300"
license = "http://www.mingw.org/license"
url = "http://github.com/danimtb/conan-msys-installer"
settings = "os", "compiler"
build_policy = "missing"
description = "Msys"
build_requires = "7z_installer/1.0@conan/stable"
def configure(self):
if (self.settings.os != "Windows" and self.settings.compiler != "gcc"):
raise Exception("Not valid configuration: %s, %s. %s should be used in Windows, gcc" % (self.settings.os, self.settings.compiler, self.name))
def build(self):
files = {
"msys-bash": "http://prdownloads.sourceforge.net/mingw/bash-3.1.23-1-msys-1.0.18-bin.tar.xz",
"msys-bzip2": "http://prdownloads.sourceforge.net/mingw/bzip2-1.0.6-1-msys-1.0.17-bin.tar.lzma",
"msys-bzip2-dll": "http://prdownloads.sourceforge.net/mingw/libbz2-1.0.6-1-msys-1.0.17-dll-1.tar.lzma",
"msys-core": "http://prdownloads.sourceforge.net/mingw/msysCORE-1.0.19-1-msys-1.0.19-bin.tar.xz",
"msys-core-ext": "http://prdownloads.sourceforge.net/mingw/msysCORE-1.0.19-1-msys-1.0.19-ext.tar.xz",
"msys-core-lic": "http://prdownloads.sourceforge.net/mingw/msysCORE-1.0.19-1-msys-1.0.19-lic.tar.xz",
"msys-core-doc": "http://prdownloads.sourceforge.net/mingw/msysCORE-1.0.19-1-msys-1.0.19-doc.tar.xz",
"msys-coreutils": "http://prdownloads.sourceforge.net/mingw/coreutils-5.97-3-msys-1.0.13-bin.tar.lzma",
"msys-diffutils": "http://prdownloads.sourceforge.net/mingw/diffutils-2.8.7.20071206cvs-3-msys-1.0.13-bin.tar.lzma",
"msys-dos2unix": "http://prdownloads.sourceforge.net/mingw/dos2unix-7.3.2-1-msys-1.0.18-bin.tar.lzma",
"msys-file": "http://prdownloads.sourceforge.net/mingw/file-5.04-1-msys-1.0.13-bin.tar.lzma",
"msys-magic-dll": "http://prdownloads.sourceforge.net/mingw/libmagic-5.04-1-msys-1.0.13-dll-1.tar.lzma",
"msys-findutils": "http://prdownloads.sourceforge.net/mingw/findutils-4.4.2-2-msys-1.0.13-bin.tar.lzma",
"msys-gawk": "http://prdownloads.sourceforge.net/mingw/gawk-3.1.7-2-msys-1.0.13-bin.tar.lzma",
"msys-grep": "http://prdownloads.sourceforge.net/mingw/grep-2.5.4-2-msys-1.0.13-bin.tar.lzma",
"msys-gzip": "http://prdownloads.sourceforge.net/mingw/gzip-1.3.12-2-msys-1.0.13-bin.tar.lzma",
"msys-less": "http://prdownloads.sourceforge.net/mingw/less-436-2-msys-1.0.13-bin.tar.lzma",
"msys-libiconv": "http://prdownloads.sourceforge.net/mingw/libiconv-1.14-1-msys-1.0.17-dll-2.tar.lzma",
"msys-libintl": "http://prdownloads.sourceforge.net/mingw/libintl-0.18.1.1-1-msys-1.0.17-dll-8.tar.lzma",
"msys-make": "http://prdownloads.sourceforge.net/mingw/make-3.81-3-msys-1.0.13-bin.tar.lzma",
"msys-regex-dll": "http://prdownloads.sourceforge.net/mingw/libregex-1.20090805-2-msys-1.0.13-dll-1.tar.lzma",
"msys-sed": "http://prdownloads.sourceforge.net/mingw/sed-4.2.1-2-msys-1.0.13-bin.tar.lzma",
"msys-tar": "http://prdownloads.sourceforge.net/mingw/tar-1.23-1-msys-1.0.13-bin.tar.lzma",
"msys-termcap": "http://prdownloads.sourceforge.net/mingw/termcap-0.20050421_1-2-msys-1.0.13-bin.tar.lzma",
"msys-termcap-dll": "http://prdownloads.sourceforge.net/mingw/libtermcap-0.20050421_1-2-msys-1.0.13-dll-0.tar.lzma",
"msys-texinfo": "http://prdownloads.sourceforge.net/mingw/texinfo-4.13a-2-msys-1.0.13-bin.tar.lzma",
"msys-xz": "http://prdownloads.sourceforge.net/mingw/xz-5.0.3-1-msys-1.0.17-bin.tar.lzma",
"msys-lzma-dll": "http://prdownloads.sourceforge.net/mingw/liblzma-5.0.3-1-msys-1.0.17-dll-5.tar.lzma",
"msys-z-dll": "http://prdownloads.sourceforge.net/mingw/zlib-1.2.7-1-msys-1.0.17-dll.tar.lzma"
}
for util_name in files:
tools.download(files[util_name], util_name)
self.run("7z x %s" % util_name)
self.run("7z x %s~" % util_name)
os.unlink(util_name)
os.unlink("%s~" % util_name)
def package(self):
self.copy("*", dst="", src=".")
def package_info(self):
self.env_info.path.append(os.path.join(self.package_folder, "bin"))
| 67.661538 | 153 | 0.64211 | 4,349 | 0.988859 | 0 | 0 | 0 | 0 | 0 | 0 | 3,097 | 0.704184 |
2c35def8ee1647a9c2698f2968e607d08e8fd841 | 192 | py | Python | App/urls.py | python1801aclchemy/AXF | 64f8d1ceff49a1a9398b06dca8a3bcf9d0c76527 | [
"Apache-2.0"
] | null | null | null | App/urls.py | python1801aclchemy/AXF | 64f8d1ceff49a1a9398b06dca8a3bcf9d0c76527 | [
"Apache-2.0"
] | null | null | null | App/urls.py | python1801aclchemy/AXF | 64f8d1ceff49a1a9398b06dca8a3bcf9d0c76527 | [
"Apache-2.0"
] | null | null | null | from flask_restful import Api
from App.apis import Hello, Home
api = Api()
def init_urls(app):
api.init_app(app=app)
api.add_resource(Hello, "/hello/")
api.add_resource(Home, "/home/") | 17.454545 | 34 | 0.71875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.088542 |
2c367c20e8a7ce50a25b938ae8d9670ea04db4e7 | 1,865 | py | Python | thermal01/seek_to_csv/ir/simpleVideoCamera.py | De-Risking-Strategies/SensorFusionPublic | caa6ef7a5ac8991ee12ce2d5ad9b28e2b2b8ed38 | [
"MIT"
] | null | null | null | thermal01/seek_to_csv/ir/simpleVideoCamera.py | De-Risking-Strategies/SensorFusionPublic | caa6ef7a5ac8991ee12ce2d5ad9b28e2b2b8ed38 | [
"MIT"
] | null | null | null | thermal01/seek_to_csv/ir/simpleVideoCamera.py | De-Risking-Strategies/SensorFusionPublic | caa6ef7a5ac8991ee12ce2d5ad9b28e2b2b8ed38 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import numpy as cv
#import cv2 as cv
from irCamera_SeekMosaic import irCamera_SeekMosaic
#from PIL import Image
vlcamera = cv2.VideoCapture(0)
ircamera = irCamera_SeekMosaic(54339)
dsize = (1, 1) #default is no resizing
vlret = False
irret = False
while 1:
if vlcamera is not None:
vlret, visible_image = vlcamera.read()
if ircamera is not None:
irret, thermal_image_data_fixedpoint = ircamera.read()
#resize the image data so we can see it later
height, width = thermal_image_data_fixedpoint.shape[:2]
if height < 320:
# make this a little bit larger to view it clearly
dsize = (width * 4, height * 4)
else:
dsize = (width * 2, height * 2)
thermal_image_data_fixedpoint_enlarged = cv2.resize(thermal_image_data_fixedpoint, dsize)
#find min and max pixel values so we can optimize the image contrast
[minVal, maxVal, minLoc, maxLoc] = cv2.minMaxLoc(thermal_image_data_fixedpoint_enlarged)
#Now create a version of the data to show on the screen or run through an inferencing model
imageDelta = maxVal - minVal
offsetImg = thermal_image_data_fixedpoint_enlarged - minVal
scaledImg = offsetImg / imageDelta
#thermal_img_normalized = cv2.normalize(thermal_image_data_fixedpoint_enlarged, dst=None, alpha=0, beta=65535, norm_type=cv2.NORM_MINMAX)
# then we'll just show the images in OpenCV windows
if vlret:
cv2.imshow('Visible', visible_image)
if irret:
#cv2.imshow('Thermal IR', thermal_img_normalized)
cv2.imshow('Presentation IR', scaledImg)
k=cv2.waitKey(1)
#
# And finally cleanup
#
if vlcamera is not None:
vlcamera.release()
if ircamera is not None:
ircamera.release()
cv.destroyAllWindows()
| 30.080645 | 145 | 0.689008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 602 | 0.322788 |
2c3962395215ba9ce077e3f3b133d79c166c4278 | 1,599 | py | Python | beerhunter/breweries/models.py | zhukovvlad/beerhunt-project | e841f4946c08275e9d189605ffe9026d6657d63f | [
"MIT"
] | null | null | null | beerhunter/breweries/models.py | zhukovvlad/beerhunt-project | e841f4946c08275e9d189605ffe9026d6657d63f | [
"MIT"
] | null | null | null | beerhunter/breweries/models.py | zhukovvlad/beerhunt-project | e841f4946c08275e9d189605ffe9026d6657d63f | [
"MIT"
] | null | null | null | import os
from uuid import uuid4
from django.db import models
from django.urls import reverse
from django.utils.timezone import now as timezone_now
from autoslug import AutoSlugField
from model_utils.models import TimeStampedModel
from django_countries.fields import CountryField
from django.utils.translation import gettext as _
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToFill
def brewery_directory_path_with_uuid(instance, filename):
now = timezone_now()
extension = os.path.splitext(filename)[1]
extension = extension.lower()
uuid_for_url = uuid4()
return f"{now:%Y/%m}/breweries/{uuid_for_url}{instance.pk}{extension}"
class Brewery(TimeStampedModel):
title = models.CharField(_('Title of brewery'), max_length=255)
slug = AutoSlugField(
"Brewery Slug",
unique=True,
always_update=False,
populate_from='title'
)
country_of_origin = CountryField(
"Country of Origin", blank=True
)
image = models.ImageField(
upload_to=brewery_directory_path_with_uuid,
default='images/default/fermentation.png',
null=True,
blank=True
)
icon = ImageSpecField(
source='image',
processors=[ResizeToFill(100, 100)],
format='PNG',
options={'quality': 60}
)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("breweries:BreweryDetail", kwargs={"slug": self.slug})
class Meta:
verbose_name = "Brewery"
verbose_name_plural = "Breweries"
| 27.101695 | 77 | 0.695435 | 909 | 0.56848 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.141338 |
2c3a335f0f1ca3f07b9332c7a1feb6ba4df8ba4f | 433 | py | Python | fluent_contents/tests/testapp/content_plugins.py | vinnyrose/django-fluent-contents | 7fa1a58a97099d8e2898a2016bf8abe2ea651b02 | [
"Apache-2.0"
] | null | null | null | fluent_contents/tests/testapp/content_plugins.py | vinnyrose/django-fluent-contents | 7fa1a58a97099d8e2898a2016bf8abe2ea651b02 | [
"Apache-2.0"
] | null | null | null | fluent_contents/tests/testapp/content_plugins.py | vinnyrose/django-fluent-contents | 7fa1a58a97099d8e2898a2016bf8abe2ea651b02 | [
"Apache-2.0"
] | null | null | null | from django.utils.safestring import mark_safe
from fluent_contents.extensions import ContentPlugin, plugin_pool
from fluent_contents.tests.testapp.models import RawHtmlTestItem
@plugin_pool.register
class RawHtmlTestPlugin(ContentPlugin):
"""
The most basic "raw HTML" plugin item, for testing.
"""
model = RawHtmlTestItem
def render(self, request, instance, **kwargs):
return mark_safe(instance.html)
| 28.866667 | 65 | 0.764434 | 231 | 0.533487 | 0 | 0 | 253 | 0.584296 | 0 | 0 | 67 | 0.154734 |
2c3b193f88b8cd9961e5a52478329879cd11b3e2 | 5,525 | py | Python | src/train_DNN/visualize_DNN_few_images.py | StanfordASL/NASA_ULI_Xplane_Simulator | 051fa492b650545442c790cf04a8cdb19632bd02 | [
"Apache-2.0"
] | 4 | 2021-06-28T19:13:58.000Z | 2021-12-10T03:10:38.000Z | src/train_DNN/visualize_DNN_few_images.py | StanfordASL/NASA_ULI_Xplane_Simulator | 051fa492b650545442c790cf04a8cdb19632bd02 | [
"Apache-2.0"
] | null | null | null | src/train_DNN/visualize_DNN_few_images.py | StanfordASL/NASA_ULI_Xplane_Simulator | 051fa492b650545442c790cf04a8cdb19632bd02 | [
"Apache-2.0"
] | 1 | 2021-05-25T20:52:11.000Z | 2021-05-25T20:52:11.000Z | """
Goal: Visualize images from aircraft camera and load as a pytorch dataloader
0. load images and the corresponding state information in labels.csv
1. test a trained DNN and visualize predictions
"""
import sys, os
import torch
import numpy as np
import pandas
import matplotlib.pyplot as plt
# make sure this is a system variable in your bashrc
NASA_ULI_ROOT_DIR=os.environ['NASA_ULI_ROOT_DIR']
import torchvision
from torchvision import transforms
from torch.utils.data import TensorDataset, DataLoader
from PIL import Image
DATA_DIR = os.environ['NASA_DATA_DIR']
CODE_DIR = NASA_ULI_ROOT_DIR + '/src/train_DNN/'
sys.path.append(CODE_DIR)
# where intermediate results are saved
# never save this to the main git repo
SCRATCH_DIR = NASA_ULI_ROOT_DIR + '/scratch/'
UTILS_DIR = NASA_ULI_ROOT_DIR + '/src/utils/'
sys.path.append(UTILS_DIR)
from textfile_utils import *
from model_taxinet import TaxiNetDNN
if __name__ == '__main__':
# CUDA model
torch.cuda.empty_cache()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('found device: ', device)
model_dir = NASA_ULI_ROOT_DIR + '/pretrained_DNN/'
# condition
condition = 'morning'
# larger images require a resnet, downsampled can have a small custom DNN
dataset_type = 'large_images'
# where raw images and csvs are saved
BASE_DATALOADER_DIR = DATA_DIR + '/' + dataset_type + '/' + condition
data_dir = BASE_DATALOADER_DIR + '/' + condition + '_test'
# MODEL
# instantiate the model
model = TaxiNetDNN()
# load the pre-trained model
if device.type == 'cpu':
model.load_state_dict(torch.load(model_dir + '/best_model.pt', map_location=torch.device('cpu')))
else:
model.load_state_dict(torch.load(model_dir + '/best_model.pt'))
model = model.to(device)
model.eval()
# LOSS FUNCTION
loss_func = torch.nn.MSELoss().to(device)
# how often to plot a few images for progress report
# warning: plotting is slow
NUM_PRINT = 2
IMAGE_WIDTH = 224
IMAGE_HEIGHT = 224
# create a temp dir to visualize a few images
visualization_dir = SCRATCH_DIR + '/test_DNN_taxinet/viz/'
remove_and_create_dir(visualization_dir)
MAX_FILES = 200
# resize to 224 x 224 x 3 for EfficientNets
# prepare image transforms
# warning: you might need to change the normalization values given your dataset's statistics
tfms = transforms.Compose([transforms.Resize((IMAGE_WIDTH, IMAGE_HEIGHT)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]),])
image_list = [x for x in os.listdir(data_dir) if x.endswith('.png')]
# where the labels for each image (such as distance to centerline) are present
label_file = data_dir + '/labels.csv'
# dataframe of labels
labels_df = pandas.read_csv(label_file, sep=',')
# columns are:
# ['image_filename', 'absolute_time_GMT_seconds', 'relative_time_seconds', 'distance_to_centerline_meters', 'distance_to_centerline_NORMALIZED', 'downtrack_position_meters', 'downtrack_position_NORMALIZED', 'heading_error_degrees', 'heading_error_NORMALIZED', 'period_of_day', 'cloud_type']
for i, image_name in enumerate(image_list):
# open images and apply transforms
fname = data_dir + '/' + str(image_name)
image = Image.open(fname).convert('RGB')
tensor_image_example = tfms(image)
# get the corresponding state information (labels) for each image
specific_row = labels_df[labels_df['image_filename'] == image_name]
# there are many states of interest, you can modify to access which ones you want
dist_centerline_norm = specific_row['distance_to_centerline_NORMALIZED'].item()
# normalized downtrack position
downtrack_position_norm = specific_row['downtrack_position_NORMALIZED'].item()
# normalized heading error
heading_error_norm = specific_row['heading_error_NORMALIZED'].item()
labels = torch.tensor([dist_centerline_norm, downtrack_position_norm])
# run the model and get the loss
inputs = tensor_image_example.unsqueeze(0)
#inputs = tensor_image_example
inputs = inputs.to(device)
labels = labels.to(device)
print(' ')
print('inputs: ', inputs.shape)
print('labels: ', labels)
outputs = model(inputs)
loss = loss_func(outputs, labels).item()
preds = outputs.detach().cpu().numpy()[0]
print('preds: ', preds)
print('loss: ', loss)
print(' ')
# periodically save the images to disk
if i % NUM_PRINT == 0:
plt.imshow(image)
# original image
title_str_1 = ' '.join(['TRUE Dist Centerline: ', str(round(dist_centerline_norm,3)), 'Downtrack Pos. Norm: ', str(round(downtrack_position_norm,3)), '\n', 'Heading Error Norm: ', str(round(heading_error_norm, 3))])
title_str_2 = ' '.join(['PRED Dist Centerline: ', str(round(preds[0],3)), 'Downtrack Pos. Norm: ', str(round(preds[1],3))])
title_str = title_str_1 + '\n' + title_str_2
plt.title(title_str)
plt.savefig(visualization_dir + '/' + str(i) + '.png')
plt.close()
# early terminate for debugging
if i > MAX_FILES:
break
| 35.416667 | 294 | 0.662443 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,179 | 0.394389 |
2c3ce123c1b3e7bd690b52526495222fa3c1ade0 | 588 | py | Python | release_type.py | sairam4123/GodotReleaseScriptPython | 2fd2644b0301f20b89b6772a0c93cec6d012f080 | [
"MIT"
] | null | null | null | release_type.py | sairam4123/GodotReleaseScriptPython | 2fd2644b0301f20b89b6772a0c93cec6d012f080 | [
"MIT"
] | null | null | null | release_type.py | sairam4123/GodotReleaseScriptPython | 2fd2644b0301f20b89b6772a0c93cec6d012f080 | [
"MIT"
] | null | null | null | from enum import Enum, auto
class ReleaseLevel(Enum):
alpha = auto()
beta = auto()
release_candidate = auto()
public = auto()
@classmethod
def has_value(cls, value):
return value in cls._value2member_map_.values()
class ReleaseType(Enum):
bugfix = auto()
minor = auto()
major = auto()
hotfix = auto()
@classmethod
def has_value(cls, value):
return value in cls._value2member_map_.values()
def value_from_key(dict_, value):
for key in dict_:
if dict_[key] == value:
return key
return None
| 18.967742 | 55 | 0.620748 | 425 | 0.722789 | 0 | 0 | 198 | 0.336735 | 0 | 0 | 0 | 0 |
2c3dba86ff323fef28c9a80d764d95da5f24f6b8 | 1,559 | py | Python | tests/test_cf_gh_pages_dns_records.py | mondeja/pre-commit-hooks | 226a386dd7cd4e7a9d7bb6c7aaff0fea7cdf269b | [
"BSD-3-Clause"
] | null | null | null | tests/test_cf_gh_pages_dns_records.py | mondeja/pre-commit-hooks | 226a386dd7cd4e7a9d7bb6c7aaff0fea7cdf269b | [
"BSD-3-Clause"
] | 14 | 2021-06-14T12:25:22.000Z | 2022-03-10T20:41:30.000Z | tests/test_cf_gh_pages_dns_records.py | mondeja/pre-commit-hooks | 226a386dd7cd4e7a9d7bb6c7aaff0fea7cdf269b | [
"BSD-3-Clause"
] | null | null | null | """Tests for 'cloudflare-gh-pages-dns' hook."""
import contextlib
import io
import os
import pytest
from hooks.cf_gh_pages_dns_records import check_cloudflare_gh_pages_dns_records
@pytest.mark.skipif(
not os.environ.get("CF_API_KEY"),
reason=(
"Cloudflare user API key defined in 'CF_API_KEY' environment variable"
" needed."
),
)
@pytest.mark.parametrize("quiet", (True, False), ids=("quiet=True", "quiet=False"))
@pytest.mark.parametrize(
("domain", "username", "expected_result", "expected_stderr"),
(
pytest.param(
"hrcgen.ml",
"mondeja",
True,
"",
id="domain=hrcgen.ml-username=mondeja", # configured with GH pages
),
pytest.param(
"foobar.baz",
"mondeja",
False,
(
"The domain 'foobar.baz' was not found being managed by your"
" Cloudflare account.\n"
),
id="domain=foobar.baz-username=mondeja", # inexistent zone
),
# TODO: add example domain to test bad configuration
),
)
def test_check_cloudflare_gh_pages_dns_records(
domain,
username,
expected_result,
expected_stderr,
quiet,
):
stderr = io.StringIO()
with contextlib.redirect_stderr(stderr):
result = check_cloudflare_gh_pages_dns_records(
domain,
username,
quiet=quiet,
)
assert result is expected_result
assert stderr.getvalue() == expected_stderr
| 25.145161 | 83 | 0.594612 | 0 | 0 | 0 | 0 | 1,373 | 0.880693 | 0 | 0 | 517 | 0.331623 |
2c3ef057fa163c3922fff7982a077b7053fa9b3c | 796 | py | Python | config.py | rSimulate/Cosmosium | f2489862b9b747458a6be9b884c9de75bd6eb3d2 | [
"CC-BY-4.0"
] | 18 | 2015-01-02T05:22:43.000Z | 2021-11-12T12:11:12.000Z | config.py | rSimulate/Cosmosium | f2489862b9b747458a6be9b884c9de75bd6eb3d2 | [
"CC-BY-4.0"
] | 3 | 2015-07-14T19:11:54.000Z | 2018-09-17T19:09:52.000Z | config.py | rSimulate/Cosmosium | f2489862b9b747458a6be9b884c9de75bd6eb3d2 | [
"CC-BY-4.0"
] | 4 | 2016-02-24T05:19:07.000Z | 2022-02-15T17:36:37.000Z | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
"""config.py: Default configuration."""
# Server:
SERVER = 'wsgiref'
DOMAIN = 'localhost:7099'
HOST = 'localhost'
PORT = 7099
# Meta:
# Note on making it work in localhost:
# * Open a terminal, then do:
# - sudo gedit /etc/hosts
# * Enter the desired localhost alias for 127.0.0.1:
# - (e.g. 127.0.0.1 mydomain.tld)
# * Don't forget to save the file :)
BASE_URI = 'http://mydomain.tld'
GOOGLE_BASE_URI = 'http://localhost' # Google doesn't seem to accept
# non-working urls, but accepts localhost
# Facebook:
FACEBOOK_CLIENT_ID = 'NULL'
FACEBOOK_CLIENT_SECRET = 'NULL'
# Twitter:
TWITTER_CLIENT_ID = 'NULL'
TWITTER_CLIENT_SECRET = 'NULL'
# Google:
GOOGLE_CLIENT_ID = 'NULL'
GOOGLE_CLIENT_SECRET = 'NULL'
| 23.411765 | 78 | 0.66206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 528 | 0.663317 |
2c3fb2a119544a5df7baf6e26ee12fd13218d950 | 5,046 | py | Python | devsupport/check_loggers/check_loggers.py | bradh/jmisb | 94456903782e08bb7a1909736810f171c2df8f8e | [
"MIT"
] | 26 | 2018-05-31T01:36:10.000Z | 2022-03-23T21:40:31.000Z | devsupport/check_loggers/check_loggers.py | bradh/jmisb | 94456903782e08bb7a1909736810f171c2df8f8e | [
"MIT"
] | 206 | 2018-05-22T17:56:12.000Z | 2022-03-18T10:55:27.000Z | devsupport/check_loggers/check_loggers.py | bradh/jmisb | 94456903782e08bb7a1909736810f171c2df8f8e | [
"MIT"
] | 10 | 2019-03-30T00:53:40.000Z | 2022-03-16T18:27:22.000Z | import os
modules = ['api', 'core']
sourcedirs = []
expectedToHaveNoTest = [ 'api/src/main/java/org/jmisb/api/klv/LdsParser.java',
'api/src/main/java/org/jmisb/api/video/VideoDecodeThread.java',
'api/src/main/java/org/jmisb/api/video/VideoOutput.java',
'api/src/main/java/org/jmisb/api/video/VideoStreamOutput.java',
'api/src/main/java/org/jmisb/api/video/DemuxerUtils.java',
'api/src/main/java/org/jmisb/api/video/MetadataDecodeThread.java',
'api/src/main/java/org/jmisb/api/video/VideoInput.java',
'api/src/main/java/org/jmisb/api/video/StreamDemuxer.java',
'api/src/main/java/org/jmisb/api/video/VideoFileOutput.java',
'api/src/main/java/org/jmisb/api/video/FfmpegLog.java',
'api/src/main/java/org/jmisb/api/video/FileDemuxer.java',
'core/src/main/java/org/jmisb/core/video/FrameConverter.java']
# flag that says whether everything was OK. Any failing check fails the result.
checkPasses = True
def fileHasMatchingLine(filePath, text):
f = open(filePath, 'r')
for line in f.readlines():
if text in line:
return True
return False
def usesJavaUtilLogging(filePath):
return fileHasMatchingLine(filePath, 'java.util.logging')
def hasLogging(filePath):
return fileHasMatchingLine(filePath, 'org.slf4j.Logger')
def isCalledLOGGER(text):
textParts = text.split('=')
leftPart = textParts[0].strip()
variableName = leftPart.split()[-1].strip()
# would be better to pick just one, but two cases isn't too bad
if variableName in ['logger', 'LOGGER']:
return True
else:
print('Unexpected variable name: ' + variableName)
return False
def isPrivateStaticFinal(text):
# TODO: fix final
# return text.startswith('private static final ')
return text.startswith('private static ')
def matchesExpectedFormat(text):
return isCalledLOGGER(text) and isPrivateStaticFinal(text)
def matchesFileName(text, filePath):
fileName = filePath.split('/')[-1]
# print(fileName)
className = fileName.split('.')[0]
# print(className)
# print(text.split('(')[-1])
classInLoggerName = text.split('(')[-1].split('.')[0]
# print(classInLoggerName)
if className == classInLoggerName:
return True
else:
print('Class from filename:' + className + ", but class from logger: " + classInLoggerName)
return False
def checkUsesExpectedLoggerName(filePath):
didFindFactory = False
f = open(filePath, 'r')
for line in f.readlines():
if "LoggerFactory.getLogger" in line:
didFindFactory = True
text = line.strip()
if not matchesExpectedFormat(text):
print('Does not match expected format ' + text)
checkPasses = False
if not matchesFileName(text, filePath):
print('Does not match expected class name ' + text)
checkPasses = False
if not didFindFactory:
print("Did not find expected factory line in " + filePath)
checkPasses = False
def addUsefulSourceFiles(filePath):
if usesJavaUtilLogging(filePath):
filesWithJavaUtilLogging.append(filePath)
if hasLogging(filePath):
filesWithLoggers.append(filePath)
def checkTestFile(testFilePath):
fileIsOK = False
f = open(testFilePath, 'r')
for line in f.readlines():
if 'extends LoggerChecks' in line:
fileIsOK = True
break
if 'TestLoggerFactory.getTestLogger' in line:
fileIsOK = True
break
if not fileIsOK:
print(testFilePath + " did not contain the expected test")
checkPasses = False
def checkHasTestCase(sourceFilePath):
# print(sourceFilePath)
testFilePath = sourceFilePath.replace('main', 'test').replace('.java', 'Test.java')
# print(testFilePath)
if not os.path.exists(testFilePath):
if not sourceFilePath.replace('../../', '') in expectedToHaveNoTest:
print('Did not find test case for ' + sourceFilePath + " at " + testFilePath)
elif sourceFilePath.replace('../../', '') in expectedToHaveNoTest:
print('Found unexpected test case for ' + sourceFilePath + " at " + testFilePath)
else:
checkTestFile(testFilePath)
filesWithJavaUtilLogging = []
filesWithLoggers = []
for module in modules:
sourcedir = os.path.join("..", "..", module, "src", "main", "java")
for subdir, dirs, files in os.walk(sourcedir):
for file in files:
filePath = os.path.join(subdir, file)
if not filePath.endswith('.java'):
continue
addUsefulSourceFiles(filePath)
if len(filesWithJavaUtilLogging) > 0:
print('The following files use legacy Java logging:')
for fileName in filesWithJavaUtilLogging:
print('\t' + fileName)
checkPasses = False
print('The following files use SLF4J logging:')
for fileName in filesWithLoggers:
print('\t' + fileName)
checkUsesExpectedLoggerName(fileName)
checkHasTestCase(fileName)
| 35.787234 | 99 | 0.664883 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,697 | 0.336306 |
2c40a38e7ff656eb25531ca19651e9e46f6c26f4 | 1,154 | py | Python | commands/pick.py | DaleNaci/AUC | 3a67cc1e5edcbd9440269e36abde0e400bfde515 | [
"Apache-2.0"
] | null | null | null | commands/pick.py | DaleNaci/AUC | 3a67cc1e5edcbd9440269e36abde0e400bfde515 | [
"Apache-2.0"
] | null | null | null | commands/pick.py | DaleNaci/AUC | 3a67cc1e5edcbd9440269e36abde0e400bfde515 | [
"Apache-2.0"
] | null | null | null | import asyncio
import random
import discord
from discord.ext.commands import Bot
from discord.ext import commands
from discord import Color, Embed
# This command randomly picks between the two non-banned maps.
#
# !pick [#] [#]
#
# The two numbers represent the two maps that were not banned from
# running the !maps command.
class Pick(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def pick(self, ctx):
# These numbers are the same as the ones listed in !maps
d = {
"1": "The Skeld",
"2": "Mira HQ",
"3": "Polus"
}
msg = ctx.message.content
nums = msg.split(" ")[1:]
# Must be two unique numbers that are 1, 2, 3
if len(nums)!=2 or not all(n in d for n in nums) or nums[0]==nums[1]:
await ctx.send("Invalid Input!")
return
text = d[random.choice(nums)]
embed = Embed(
title="Map",
color=Color.from_rgb(0, 0, 0),
description=text
)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Pick(bot))
| 22.627451 | 77 | 0.57539 | 778 | 0.674177 | 0 | 0 | 694 | 0.601386 | 670 | 0.580589 | 334 | 0.289428 |
2c42e21de4e45b73eacc95f497a0cac35eca60ff | 858 | py | Python | edx/quiz/unique_values.py | spradeepv/dive-into-python | ec27d4686b7b007d21f9ba4f85d042be31ee2639 | [
"MIT"
] | null | null | null | edx/quiz/unique_values.py | spradeepv/dive-into-python | ec27d4686b7b007d21f9ba4f85d042be31ee2639 | [
"MIT"
] | null | null | null | edx/quiz/unique_values.py | spradeepv/dive-into-python | ec27d4686b7b007d21f9ba4f85d042be31ee2639 | [
"MIT"
] | null | null | null | """
Write a Python function that returns a list of keys in aDict that map to integer values that are unique (i.e. values appear exactly once in aDict). The list of keys you return should be sorted in increasing order. (If aDict does not contain any unique values, you should return an empty list.)
This function takes in a dictionary and returns a list.
"""
def uniqueValues(aDict):
l = []
temp = {}
for key, val in aDict.items():
if temp.has_key(val):
if l.count(val) > 0:
l.remove(val)
else:
temp[val] = 1
l.append(val)
li = []
for key, val in aDict.items():
if val in l:
li.append(key)
li.sort()
return li
aDict = {1:1, 2:1, 3:3, 4:2, 5:3}
print uniqueValues({1: 1, 2: 1, 3: 1})
print uniqueValues({1: 1, 3: 2, 6: 0, 7: 0, 8: 4, 10: 0})
| 31.777778 | 293 | 0.589744 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 358 | 0.417249 |
2c45f39a248d430cb808333a3a7cc174b4b12b63 | 6,909 | py | Python | tests/test_svarog.py | dswistowski/svarog | 7432fc6235446f07fe1f6654a877eaadb59eb4f5 | [
"MIT"
] | 4 | 2021-02-11T11:53:28.000Z | 2022-02-16T10:23:11.000Z | tests/test_svarog.py | dswistowski/svarog | 7432fc6235446f07fe1f6654a877eaadb59eb4f5 | [
"MIT"
] | 65 | 2020-08-04T10:40:07.000Z | 2022-03-29T03:03:50.000Z | tests/test_svarog.py | dswistowski/svarog | 7432fc6235446f07fe1f6654a877eaadb59eb4f5 | [
"MIT"
] | 1 | 2021-04-18T15:02:16.000Z | 2021-04-18T15:02:16.000Z | from dataclasses import dataclass
from dataclasses import field
from enum import Enum
from typing import Any
from typing import ClassVar
from typing import Literal
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Union
from uuid import UUID
import pytest
from svarog import Svarog
from svarog.types import NoneType
def test_can_build_primitive_str(forge):
assert forge(str, "the-string") == "the-string"
def test_can_build_primitive_int(forge):
assert forge(int, "3") == 3
def test_can_build_none(forge):
assert forge(NoneType, None) is None
def test_can_build_dataclass(forge):
@dataclass
class A:
foo: str
assert forge(A, {"foo": "bar"}) == A(foo="bar")
def test_can_build_complicated_dataclass(forge):
@dataclass
class A:
foo: int
bar: str
lorem: Sequence[int]
@dataclass
class B:
a: A
b: Optional[A]
c: Sequence[A]
assert forge(
B,
{
"a": {"foo": "3", "bar": "x", "lorem": [1, 3, 4]},
"b": None,
"c": [
{"foo": 1, "bar": 3, "lorem": ["2", "3"]},
{"foo": 2, "bar": 3, "lorem": []},
],
},
) == B(
a=A(foo=3, bar="x", lorem=[1, 3, 4]),
b=None,
c=[A(1, "3", [2, 3]), A(2, "3", [])],
)
@dataclass
class A:
b: "B"
c: "C"
@dataclass
class B:
number: int
@dataclass
class C:
letter: str
def test_can_build_dataclass_from_readme(forge):
assert forge(A, {"b": {"number": 42}, "c": {"letter": "x"}}) == A(B(42), C("x"))
def test_can_build_dataclass_with_optional_values(forge):
@dataclass
class A:
a: int
b: Sequence[str] = field(default_factory=lambda: ["x"])
assert forge(A, {"a": 3}) == A(a=3, b=["x"])
def test_can_build_optional(forge):
assert forge(Optional[str], None) is None
assert forge(Optional[str], "str") == "str"
def test_can_build_list(forge):
assert forge(list, [1, 2, "fooo"]) == [1, 2, "fooo"]
def test_can_build_bare_list(forge):
assert forge(Sequence, (1, 3, "2")) == [1, 3, "2"]
def test_can_build_list_of_any(forge):
assert forge(Sequence[Any], (1, 3, "2")) == [1, 3, "2"]
def test_can_build_typed_list(forge):
assert forge(Sequence[int], (1, 3, "2")) == [1, 3, 2]
def test_can_build_dict(forge):
assert forge(dict, {"foo": "bar"}) == {"foo": "bar"}
def test_can_build_mapping(forge):
assert forge(Mapping, {"foo": "bar"}) == {"foo": "bar"}
def test_can_build_typed_mapping(forge):
assert forge(Mapping[str, int], {"foo": 42, 42: "3"}) == {"foo": 42, "42": 3}
def test_can_build_typed_keys_mapping(forge):
assert forge(Mapping[str, Any], {"foo": 42, 42: "3"}) == {"foo": 42, "42": "3"}
def test_can_build_typed_value_mapping(forge):
assert forge(Mapping[Any, int], {"foo": 42, 42: "3"}) == {"foo": 42, 42: 3}
class WithRef:
def __init__(self, child: Optional["WithRef"]):
self._child = child
def __eq__(self, other: "WithRef") -> bool:
return type(self) == type(other) and self._child == other._child
def test_will_work_with_forward_ref(forge):
assert forge(WithRef, {"child": {"child": None}}) == WithRef(WithRef(None))
def test_non_optional_unions_are_not_supported_yet(forge):
with pytest.raises(NotImplementedError):
assert forge(Union[str, int], "3")
def test_can_apply_simple_types(forge, register_forge):
@dataclass
class A:
uuid: UUID
assert forge(A, {"uuid": "00000000-0000-0000-0000-000000000000"}) == A(UUID(int=0))
def test_can_register_forge(forge, register_forge):
class FooType(Enum):
LOREM = "lorem"
IPSUM = "ipsum"
class FooParams:
types: ClassVar[Mapping[FooType, "FooParams"]] = {}
def __init_subclass__(cls, type: FooType):
cls.types[type] = cls
@classmethod
def for_type(cls, type):
return cls.types[type]
@dataclass
class LoremFooParams(FooParams, type=FooType.LOREM):
lorem: str
@dataclass
class IpsumFooParams(FooParams, type=FooType.IPSUM):
ipsum: int
@dataclass
class Foo:
type: FooType
params: FooParams
@classmethod
def forge(cls, _, data, forge):
foo_type = forge(FooType, data["type"])
return Foo(
type=forge(FooType, foo_type),
params=forge(FooParams.for_type(foo_type), data["params"]),
)
register_forge(Foo, Foo.forge)
assert forge(Foo, {"type": "lorem", "params": {"lorem": "foo-bar"}}) == Foo(
type=FooType.LOREM, params=LoremFooParams("foo-bar")
)
assert forge(Foo, {"type": "ipsum", "params": {"ipsum": 42}}) == Foo(
type=FooType.IPSUM, params=IpsumFooParams(42)
)
def test_can_build_class_without_annotations(forge):
class A:
def __init__(self, a, b):
self._a = a
self._b = b
def __eq__(self, other):
return (
type(self) == type(other)
and self._a == other._a
and self._b == other._b
)
assert forge(A, {"a": 42, "b": "foo-bar"}) == A(42, "foo-bar")
def test_can_build_if_there_is_default(forge):
@dataclass
class A:
x: int = 0
assert forge(A, {"x": 42}) == A(42)
def test_can_build_nested_types_with_args(forge):
@dataclass
class A:
x: Optional[Mapping[str, Any]] = None
assert forge(A, {"x": {"foo": "bar"}}) == A(x={"foo": "bar"})
def test_can_build_literal(forge):
T = Literal["a", "b", "c"]
assert forge(T, "a") == "a"
def test_can_build_literal_key(forge):
@dataclass
class B:
x: Mapping[Literal["a"], Literal["b"]]
assert forge(B, {"x": {"a": "b"}}) == B(x={"a": "b"})
def test_can_do_camel_case(svarog: Svarog):
svarog.enable_snake_case()
@dataclass
class A:
foo: str
lorem_ipsum: str
assert svarog.forge(A, {"Foo": "bar", "LoremIpsum": "lorem"}) == A(
foo="bar", lorem_ipsum="lorem"
)
def test_do_not_camel_case_mappping(svarog: Svarog):
svarog.enable_snake_case()
@dataclass
class A:
lorem_ipsum: Mapping[str, Any]
assert svarog.forge(A, {"LoremIpsum": {"LookAtMe": 42}}) == A(
lorem_ipsum={"LookAtMe": 42}
)
def test_can_have_multiple_filters(svarog: Svarog):
svarog.add_filter(lambda t: True, lambda t, d: d * 2)
svarog.add_filter(lambda t: issubclass(t, float), lambda _, d: d * 1.5)
svarog.add_filter(lambda t: issubclass(t, int), lambda _, d: d * 3)
svarog.add_filter(lambda t: issubclass(t, str), lambda _, d: f"X{d}")
assert svarog.forge(str, "Boo") == "XBooBoo"
assert svarog.forge(int, 4) == 4 * 2 * 3
assert svarog.forge(float, 3.0) == 3.0 * 2 * 1.5
| 23.906574 | 87 | 0.592126 | 1,880 | 0.272109 | 0 | 0 | 1,371 | 0.198437 | 0 | 0 | 665 | 0.096251 |
2c463f3690d4e367a42cbb8add865a987c6ac538 | 16,186 | py | Python | vaqc/vaqc.py | PennLINC/vaqc | 7bc1f26d74e1c84d282744ed0feebd60a69e1267 | [
"MIT"
] | 2 | 2021-05-21T17:46:04.000Z | 2021-05-21T20:34:03.000Z | vaqc/vaqc.py | PennLINC/vaqc | 7bc1f26d74e1c84d282744ed0feebd60a69e1267 | [
"MIT"
] | 2 | 2021-05-20T12:06:10.000Z | 2021-09-28T18:38:46.000Z | vaqc/vaqc.py | PennLINC/vaqc | 7bc1f26d74e1c84d282744ed0feebd60a69e1267 | [
"MIT"
] | null | null | null | import base64
import re
import os.path as op
from io import BytesIO
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import nibabel as nib
import numpy as np
from pathlib import Path
import pandas as pd
import nilearn.image as nim
from dipy.segment.mask import median_otsu
from nipype.utils.filemanip import save_json, load_json
def get_bids_params(fullpath):
bids_patterns = [
r'^(.*/)?(?P<subject_id>sub-[a-zA-Z0-9]+)',
'^.*_(?P<session_id>ses-[a-zA-Z0-9]+)',
'^.*_(?P<task_id>task-[a-zA-Z0-9]+)',
'^.*_(?P<acq_id>acq-[a-zA-Z0-9]+)',
'^.*_(?P<space_id>space-[a-zA-Z0-9]+)',
'^.*_(?P<rec_id>rec-[a-zA-Z0-9]+)',
'^.*_(?P<run_id>run-[a-zA-Z0-9]+)',
'^.*_(?P<dir_id>dir-[a-zA-Z0-9]+)'
]
matches = {"subject_id": None, "session_id": None, "task_id": None, "dir_id": None,
"acq_id": None, "space_id": None, "rec_id": None, "run_id": None}
for pattern in bids_patterns:
pat = re.compile(pattern)
match = pat.search(fullpath)
params = match.groupdict() if match is not None else {}
matches.update(params)
return matches
def reorient_array(data, aff):
# rearrange the matrix to RAS orientation
orientation = nib.orientations.io_orientation(aff)
data_RAS = nib.orientations.apply_orientation(data, orientation)
# In RAS
return nib.orientations.apply_orientation(
data_RAS,
nib.orientations.axcodes2ornt("IPL")
)
def mplfig(data, outfile=None, as_bytes=False):
fig = plt.figure(frameon=False, dpi=data.shape[0])
fig.set_size_inches(float(data.shape[1])/data.shape[0], 1)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(data, aspect=1, cmap=plt.cm.Greys_r) # previous aspect="normal"
if outfile:
fig.savefig(outfile, dpi=data.shape[0], transparent=True)
plt.close()
return outfile
if as_bytes:
IObytes = BytesIO()
plt.savefig(IObytes, format='png', dpi=data.shape[0], transparent=True)
IObytes.seek(0)
base64_jpgData = base64.b64encode(IObytes.read())
return base64_jpgData.decode("ascii")
def mplfigcontour(data, outfile=None, as_bytes=False):
fig = plt.figure(frameon=False)
fig.set_size_inches(float(data.shape[1])/data.shape[0], 1)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
bg = np.zeros(data.shape)
bg[:] = np.nan
ax.imshow(bg, aspect=1, cmap=plt.cm.Greys_r) # used to be aspect="normal"
ax.contour(data, colors="red", linewidths=0.1)
if outfile:
fig.savefig(outfile, dpi=data.shape[0], transparent=True)
plt.close()
return outfile
if as_bytes:
IObytes = BytesIO()
plt.savefig(IObytes, format='png', dpi=data.shape[0], transparent=True)
IObytes.seek(0)
base64_jpgData = base64.b64encode(IObytes.read())
return base64_jpgData.decode("ascii")
def load_and_reorient(filename):
img = nib.load(filename)
data, aff = img.get_data(), img.affine
data = reorient_array(data, aff)
return data
def make_a_square(data_mat, include_last_dim=True):
"""Applies zero padding to make a 2d matrix a square.
Examples:
---------
>>> too_long = np.arange(4 * 7).reshape((4, 7))
>>> long_squared = make_a_square(too_long)
>>> long_squared.shape
(7, 7)
>>> long_squared.sum(1)
array([ 0, 21, 70, 119, 168, 0, 0])
>>> too_tall = np.arange(6 * 5 * 3).reshape((6, 5, 3))
>>> tall_squared = make_a_square(too_tall)
>>> tall_squared.shape
(6, 6, 6)
>>> tall_2squared = make_a_square(too_tall, include_last_dim=False)
>>> tall_2squared.shape
(6, 6, 3)
"""
shapes = data_mat.shape if include_last_dim else data_mat.shape[:-1]
# Is it already square?
if all([shape == shapes[0] for shape in shapes]):
return data_mat
n_dims_to_pad = len(shapes)
largest_side = np.argmax(shapes)
sides_to_pad = np.arange(n_dims_to_pad).tolist()
sides_to_pad.pop(largest_side)
# Must specify padding for all dims
padding = [(0, 0)] * data_mat.ndim
for side_to_pad in sides_to_pad:
needed_padding = shapes[largest_side] - shapes[side_to_pad]
left_pad = int(needed_padding // 2)
right_pad = needed_padding - left_pad
padding[side_to_pad] = (left_pad, right_pad)
return np.pad(data_mat, padding, "constant", constant_values=(0, 0))
def nearest_square(limit):
answer = 0
while (answer + 1) ** 2 < limit:
answer += 1
if (answer ** 2) == limit:
return answer
else:
return answer + 1
def create_sprite_from_tiles(tile, out_file=None, as_bytes=False):
num_slices = tile.shape[-1]
N = nearest_square(num_slices)
M = int(np.ceil(num_slices/N))
# tile is square, so just make a big arr
pix = tile.shape[0]
if len(tile.shape) == 3:
mosaic = np.zeros((N*tile.shape[0], M*tile.shape[0]))
else:
mosaic = np.zeros((N*tile.shape[0], M*tile.shape[0], tile.shape[-2]))
mosaic[:] = np.nan
helper = np.arange(N*M).reshape((N, M))
for t in range(num_slices):
x, y = np.nonzero(helper == t)
xmin = x[0] * pix
xmax = (x[0] + 1) * pix
ymin = y[0] * pix
ymax = (y[0] + 1) * pix
x_span = xmax - xmin
y_span = ymax - ymin
if len(tile.shape) == 3:
mosaic[xmin:xmax, ymin:ymax] = tile[:x_span, :y_span, t]
else:
mosaic[xmin:xmax, ymin:ymax, :] = tile[:x_span, :y_span, :, t]
if as_bytes:
img = mplfig(mosaic, out_file, as_bytes=as_bytes)
return dict(img=img, N=N, M=M, pix=pix, num_slices=num_slices)
if out_file:
img = mplfig(mosaic, out_file), N, M, pix, num_slices
return dict(mosaic=mosaic, N=N, M=M, pix=pix, num_slices=num_slices)
def createSprite4D(dwi_file):
# initialize output dict
output = []
# load the file
dwi = load_and_reorient(dwi_file)[:, :, :, 1:]
# create tiles from center slice on each orientation
for orient in ['sag', 'ax', 'cor']:
axis_tiles = get_middle_slice_tiles(dwi, orient)
# create sprite images for the axis
results = embed_tiles_in_json_sprite(axis_tiles, as_bytes=True)
results['img_type'] = '4dsprite'
results['orientation'] = orient
output.append(results)
return output
def square_and_normalize_slice(slice2d):
tile_data = make_a_square(slice2d)
max_value = np.percentile(tile_data, 98)
tile_data[tile_data > max_value] = max_value
return tile_data / max_value
def embed_tiles_in_json_sprite(tile_list, as_bytes=True, out_file=None):
"""Make a big rectangle containing the images for a brainsprite.
Parameters:
-----------
tile_list : list
List of 2d square numpy arrays to stick in a mosaic
Returns:
--------
mosaic : np.ndarray
Mosaic of tile images
"""
# Tiles are squares
tile_size = tile_list[0].shape[0]
num_tiles = len(tile_list)
num_tile_rows = nearest_square(num_tiles)
num_tile_cols = int(np.ceil(num_tiles/num_tile_rows))
mosaic = np.zeros((num_tile_rows * tile_size,
num_tile_cols * tile_size))
i_indices, j_indices = np.unravel_index(np.arange(num_tiles),
(num_tile_rows, num_tile_cols))
i_tile_offsets = tile_size * i_indices
j_tile_offsets = tile_size * j_indices
for tile, i_offset, j_offset in zip(tile_list, i_tile_offsets,
j_tile_offsets):
mosaic[i_offset:(i_offset + tile_size),
j_offset:(j_offset + tile_size)] = tile
if as_bytes:
img = mplfig(mosaic, out_file, as_bytes=as_bytes)
return dict(img=img, N=num_tile_rows, M=num_tile_cols,
pix=tile_size, num_slices=num_tiles)
return dict(mosaic=mosaic, N=num_tile_rows, M=num_tile_cols,
pix=tile_size, num_slices=num_tiles)
def get_middle_slice_tiles(data, slice_direction):
"""Create a strip of intensity-normalized, square middle slices.
"""
slicer = {"ax": 0, "cor": 1, "sag": 2}
all_data_slicer = [slice(None), slice(None), slice(None)]
num_slices = data.shape[slicer[slice_direction]]
slice_num = int(num_slices / 2)
all_data_slicer[slicer[slice_direction]] = slice_num
middle_slices = data[tuple(all_data_slicer)]
num_slices = middle_slices.shape[2]
slice_tiles = [square_and_normalize_slice(middle_slices[..., mid_slice])
for mid_slice in range(num_slices)]
return slice_tiles
def createB0_ColorFA_Mask_Sprites(b0_file, colorFA_file, mask_file):
colorfa = make_a_square(load_and_reorient(colorFA_file), include_last_dim=False)
b0 = make_a_square(load_and_reorient(b0_file)[:, :, :, 0])
anat_mask = make_a_square(load_and_reorient(mask_file))
# make a b0 sprite
_, mask = median_otsu(b0)
outb0 = create_sprite_from_tiles(b0, as_bytes=True)
outb0['img_type'] = 'brainsprite'
# make a colorFA sprite, masked by b0
Q = make_a_square(colorfa, include_last_dim=False)
Q[np.logical_not(mask)] = np.nan
Q = np.moveaxis(Q, -2, -1)
outcolorFA = create_sprite_from_tiles(Q, as_bytes=True)
outcolorFA['img_type'] = 'brainsprite'
# make an anat mask contour sprite
outmask = create_sprite_from_tiles(
make_a_square(anat_mask, include_last_dim=False))
img = mplfigcontour(outmask.pop("mosaic"), as_bytes=True)
outmask['img'] = img
return outb0, outcolorFA, outmask
def create_report_json(dwi_corrected_file, eddy_rms, eddy_report,
color_fa_file, anat_mask_file,
outlier_indices,
eddy_qc_file,
outpath=op.abspath('./report.json')):
report = {}
report['dwi_corrected'] = createSprite4D(dwi_corrected_file)
b0, colorFA, mask = createB0_ColorFA_Mask_Sprites(dwi_corrected_file,
color_fa_file,
anat_mask_file)
report['b0'] = b0
# report['colorFA'] = colorFA
report['anat_mask'] = mask
report['outlier_volumes'] = outlier_indices.tolist()
with open(eddy_report, 'r') as f:
report['eddy_report'] = f.readlines()
report['eddy_params'] = np.genfromtxt(eddy_rms).tolist()
eddy_qc = load_json(eddy_qc_file)
report['eddy_quad'] = eddy_qc
save_json(outpath, report)
return outpath
def create_bold_Mask_Sprites(bold_file):
boldref = load_and_reorient(str(bold_file).replace("desc-preproc_bold",
"boldref"))
boldmask = load_and_reorient(
str(bold_file).replace("desc-preproc_bold", "desc-brain_mask"))
b0 = boldref
anat_mask = boldmask
# make a boldref sprite
outb0 = create_sprite_from_tiles(b0, as_bytes=True)
outb0['img_type'] = 'brainsprite'
# make an anat mask contour sprite
outmask = create_sprite_from_tiles(
make_a_square(anat_mask, include_last_dim=False))
img = mplfigcontour(outmask.pop("mosaic"), as_bytes=True)
outmask['img'] = img
return outb0, outmask
def get_fmriprep_outlier_volumes_from_confounds(confounds_df):
"""extract which volume numbers are outliers from the fmriprep confounds df.
Returns:
bad_volumes: list
eg [34, 35, 100, 150]
"""
# get the motion columns
motion = confounds_df.filter(regex='motion')
# find any rows with values above 0
return_df = motion[(motion > 0).any(1)]
# return the index (row names) of this df
return list(return_df.index)
def get_fmriprep_stats_info(bold_corrected_file, confounds_df):
"""Create a dictionary that has single values per interesting thing.
@ziz
eg {"max_fd": 99.4, "max_rmsd":5, "dimension_x": 140, "subject_id}
"""
subject_info = get_bids_params(bold_corrected_file.name)
qc = {'mean_fd': np.nanmean(confounds_df.framewise_displacement),
'max_fd': np.nanmax(confounds_df.framewise_displacement),
'mean_rmsd': np.nanmean(confounds_df.rmsd),
'max_rmsd': np.nanmax(confounds_df.rmsd),
'mean_dvars': np.nanmean(confounds_df.dvars),
'max_dvars': np.nanmax(confounds_df.dvars)}
qc.update(subject_info)
qc['participant_id'] = qc['subject_id']
qc['file_name'] = bold_corrected_file.name.replace(".nii.gz", "").replace(".nii", "")
return qc
def create_bold_report_json(bold_corrected_file, confounds_file, outpath):
"""Creates a json file on disk with images and info about the fmriprep run.
"""
report = {}
report['dwi_corrected'] = createSprite4D(bold_corrected_file)
b0, mask = create_bold_Mask_Sprites(bold_corrected_file)
report['b0'] = b0
report['anat_mask'] = mask
# Load the confounds data
confounds_df = pd.read_csv(str(confounds_file), sep="\t")
# Find the outlier volumes
report['outlier_volumes'] = \
get_fmriprep_outlier_volumes_from_confounds(confounds_df)
report['eddy_params'] = np.nan_to_num(confounds_df[
['framewise_displacement', 'rmsd']].to_numpy()).tolist()
report['eddy_quad'] = {}
report['qc_scores'] = get_fmriprep_stats_info(bold_corrected_file,
confounds_df)
save_json(outpath, report)
return report['qc_scores']
def find_confounds_file(nii_file):
"""Finds the corresponding confounds.tsv file for a bold.nii.gz
Parameters:
nii_file: pathlib.Path
Returns:
confounds_file: pathlib.Path
"""
confounds_options = [str(fname).replace("desc-confounds_timeseries.tsv", "") for
fname in nii_file.parent.glob("*confound*tsv")]
confounds_file, = [fname for fname in confounds_options if
str(nii_file).startswith(fname)]
return Path(confounds_file + "desc-confounds_timeseries.tsv")
def report_from_nii(nii_file):
"""Creates a report json and returns subject QC scores dict.
Parameters:
nii_file: pathlib.Path
"""
output_file = Path(
str(nii_file).replace("desc-preproc_bold.nii.gz", "vaqc.json"))
print("Creating", str(output_file))
confounds_file = find_confounds_file(nii_file)
subject_scores = create_bold_report_json(nii_file, confounds_file,
output_file)
return subject_scores
def process_fmriprep_subject(subject_dir):
"""Creates a QC file and
Parameters:
subject_dir: pathlib.Path
"""
processed_images = list(subject_dir.rglob("**/*desc-preproc*_bold.nii.gz"))
print("found ", "\n\t".join(map(str, processed_images)))
image_qcs = []
for image_file in processed_images:
image_qcs.append(report_from_nii(image_file))
return image_qcs
def process_fmriprep(input_dir):
"""Take an fmriprep output directory and create report data
for the viewer.
Parameters:
input_dir: pathlib.Path
"""
subject_dirs = [_pth for _pth in input_dir.glob("sub-*")
if _pth.is_dir()]
summary_json = input_dir / "vaqc.json"
image_qcs = []
for subject_dir in subject_dirs:
print("Processing directory:", str(subject_dir))
image_qcs += process_fmriprep_subject(subject_dir)
# Write out the root directory vaqc.json
group_report = {
"report_type": "fmriprep_qc_report",
"pipeline": "fmriprep",
"pipeline_version": "20.2.1",
"boilerplate": "",
"metric_explanation": {
'mean_fd': '',
'max_fd': 'maximum absoulte framewise displacement',
'mean_rmsd': 'max RMSD of motion parameters',
'max_rmsd': 'max RMSD of motion parameters',
'mean_dvars': 'mean dvars',
'max_dvars': "maximum absolute dvars",
},
"subjects": image_qcs
}
save_json(str(summary_json), group_report)
return 1
| 33.100204 | 89 | 0.639256 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,040 | 0.249598 |
2c46e20944df9fdddc17ddf37076817334c8143f | 1,392 | py | Python | apps/workspaces/migrations/0001_initial.py | fylein/fyle-integrations-platform-connector | 72f5d364deca8d98516e8486ec0ab377a8ceaccc | [
"MIT"
] | null | null | null | apps/workspaces/migrations/0001_initial.py | fylein/fyle-integrations-platform-connector | 72f5d364deca8d98516e8486ec0ab377a8ceaccc | [
"MIT"
] | 1 | 2021-12-08T13:51:14.000Z | 2021-12-08T13:51:14.000Z | apps/workspaces/migrations/0001_initial.py | fylein/fyle-integrations-platform-connector | 72f5d364deca8d98516e8486ec0ab377a8ceaccc | [
"MIT"
] | null | null | null | # Generated by Django 3.2.8 on 2021-10-11 11:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Workspace',
fields=[
('id', models.AutoField(
help_text='Unique Id to identify a workspace', primary_key=True, serialize=False)),
('name', models.CharField(help_text='Name of the workspace', max_length=255)),
('fyle_org_id', models.CharField(help_text='org id', max_length=255, unique=True)),
('last_synced_at', models.DateTimeField(
help_text='Datetime when expenses were pulled last', null=True)),
('source_synced_at', models.DateTimeField(
help_text='Datetime when source dimensions were pulled', null=True)),
('destination_synced_at', models.DateTimeField(
help_text='Datetime when destination dimensions were pulled', null=True)),
('created_at', models.DateTimeField(auto_now_add=True, help_text='Created at datetime')),
('updated_at', models.DateTimeField(auto_now=True, help_text='Updated at datetime')),
],
options={
'db_table': 'workspaces',
},
),
]
| 39.771429 | 105 | 0.586207 | 1,299 | 0.93319 | 0 | 0 | 0 | 0 | 0 | 0 | 428 | 0.307471 |
2c47a06228a2ffaddf789b3313d684521df077e9 | 3,634 | py | Python | django/test-prefect.py | AllenNeuralDynamics/ephys-framework-tests | ee940afeab54e5e25765a903a6b65f2e95be4c48 | [
"MIT"
] | null | null | null | django/test-prefect.py | AllenNeuralDynamics/ephys-framework-tests | ee940afeab54e5e25765a903a6b65f2e95be4c48 | [
"MIT"
] | 3 | 2022-01-22T04:34:46.000Z | 2022-01-26T02:14:21.000Z | django/test-prefect.py | AllenNeuralDynamics/ephys-framework-tests | ee940afeab54e5e25765a903a6b65f2e95be4c48 | [
"MIT"
] | 2 | 2022-01-21T22:38:27.000Z | 2022-01-25T01:30:09.000Z | import os
os.environ['PREFECT__LOGGING__LEVEL'] = 'DEBUG'
os.environ['DJANGO_ALLOW_ASYNC_UNSAFE'] = 'true'
from prefect import flow, task
import numpy as np
import pandas as pd
from django_pandas.io import read_frame
import helpers
@task
def insert_session(session_id):
from django_connect import connect
connect()
import db.models as d
session = helpers.get_session(session_id)
d.StimulusPresentation.objects.filter(session_id=session_id).delete()
# stimulus types
stim_types = read_frame(d.StimulusType.objects.all())
# stimulus presentations
stim_table = session.stimulus_presentations
stim_table = stim_table.replace({'null':None})
for k in ['phase','size','spatial_frequency']:
stim_table[k] = stim_table[k].apply(helpers.clean_string)
stim_table = stim_table.reset_index()
stim_table = stim_table.merge(stim_types.reset_index(), left_on='stimulus_name', right_on='name', how='left')
stim_table = stim_table.rename(columns={'id':'stimulus_type_id'}).drop(columns=['stimulus_name','name','index'])
stim_table['session_id'] = pd.Series([session.ecephys_session_id]*len(stim_table))
stim_table = stim_table.fillna(np.nan).replace({np.nan:None})
d.StimulusPresentation.objects.bulk_create([ d.StimulusPresentation(**v) for v in stim_table.to_dict(orient='records')])
@task
def list_units(session_id):
from django_connect import connect
connect()
import db.models as d
units = d.Unit.objects.filter(channel__session_probe__session_id=session_id)
return [ int(u.id) for u in units ]
@task
def insert_spike_times(session_id, unit_id):
from django_connect import connect
connect()
import db.models as d
print(f"insert_spike_times: session {session_id}, unit {unit_id}")
st = d.UnitSpikeTimes.objects.filter(unit_id=unit_id).delete()
session = helpers.get_session(session_id)
if unit_id in session.spike_times:
unit_spike_times = session.spike_times[unit_id]
st = d.UnitSpikeTimes(unit_id=unit_id, spike_times=list(unit_spike_times))
st.save()
@task
def insert_trial_spike_counts(unit_id):
from django_connect import connect
connect()
import db.models as d
d.TrialSpikeCounts.objects.filter(unit_id=unit_id).delete()
unit = d.Unit.objects.get(pk=unit_id)
session = unit.channel.session_probe.session
stim_table = d.StimulusPresentation.objects.filter(session=session)
stim_table = read_frame(stim_table)
unit_table = d.Unit.objects.filter(channel__session_probe__session=session)
duration = stim_table.stop_time-stim_table.start_time
spike_times = d.UnitSpikeTimes.objects.filter(unit=unit)
if len(spike_times) == 0:
return
spike_times = np.array(spike_times.first().spike_times)
count = helpers.spike_count(stim_table.start_time,stim_table.stop_time,spike_times)
this_df = pd.DataFrame(data = {
'unit_id':int(unit.id),
'stimulus_id':stim_table.id.values.astype(int),
'spike_count':count,
'spike_rate':np.divide(count,duration)
})
d.TrialSpikeCounts.objects.bulk_create([d.TrialSpikeCounts(**v) for v in this_df.to_dict(orient='records')])
@flow(name="spikes")
def spike_flow(session_id):
r0 = insert_session(session_id)
unit_ids = list_units(session_id, wait_for=[r0])
for unit_id in unit_ids.wait().result():
r1 = insert_spike_times(session_id=session_id, unit_id=unit_id)
insert_trial_spike_counts(unit_id=unit_id, wait_for=[r1])
if __name__ == "__main__":
spike_flow(session_id=732592105)
| 31.877193 | 124 | 0.726472 | 0 | 0 | 0 | 0 | 3,304 | 0.909191 | 0 | 0 | 374 | 0.102917 |
2c47d249a7a2a9897dd96152260a3f88189f02aa | 584 | py | Python | src/db/models/artist_album.py | jsbecerrab/Loka-prueba-backend | d47250a68e3e28375c0b8e0a6cdf78223b0d12cd | [
"MIT"
] | null | null | null | src/db/models/artist_album.py | jsbecerrab/Loka-prueba-backend | d47250a68e3e28375c0b8e0a6cdf78223b0d12cd | [
"MIT"
] | null | null | null | src/db/models/artist_album.py | jsbecerrab/Loka-prueba-backend | d47250a68e3e28375c0b8e0a6cdf78223b0d12cd | [
"MIT"
] | null | null | null | from sqlalchemy import Column, ForeignKey, Integer, DateTime
from sqlalchemy.orm import relationship
from ..database import Base
class Artist_album(Base):
__tablename__ = "artists_albums"
id = Column(Integer, primary_key=True, index=True)
artist_id = Column(Integer, ForeignKey("artists.id"))
album_id = Column(Integer, ForeignKey("albums.id"))
created_at = Column(DateTime, nullable=False)
updated_at = Column(DateTime)
artist = relationship("Artist", back_populates="artists_albums")
album = relationship("Album", back_populates="artists_albums") | 36.5 | 68 | 0.75 | 453 | 0.775685 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.14726 |
2c483b1523e5b0f0877e23893de88f8b0f837561 | 1,099 | py | Python | join/api/join_controller.py | andrequeiroz2/api-join | b8e2415ab25ca94a53ae042686c943e7c203d3fd | [
"MIT"
] | null | null | null | join/api/join_controller.py | andrequeiroz2/api-join | b8e2415ab25ca94a53ae042686c943e7c203d3fd | [
"MIT"
] | null | null | null | join/api/join_controller.py | andrequeiroz2/api-join | b8e2415ab25ca94a53ae042686c943e7c203d3fd | [
"MIT"
] | null | null | null | from flask import request
from firebase_admin import auth
import requests
from join import firebase
import ast
import time
def get_join():
token = request.headers['authorization']
decoded_token = auth.verify_id_token(token)
email = decoded_token['firebase']['identities']['email'][0]
tags_response = requests.get('http://tags:7000/api/users/tags', headers={'authorization': token}, verify=False)
dict_tag = tags_response.content.decode("UTF-8")
resp_tag = ast.literal_eval(dict_tag)
if "error" in resp_tag.keys():
return resp_tag
tasks_response = requests.get('http://tasks:1000/api/users/tasks', headers={'authorization': token}, verify=False)
dict_tasks = tasks_response.content.decode("UTF-8")
resp_tasks = ast.literal_eval(dict_tasks)
resp = {
"msg":"succeess",
"data":[{
"email":email,
"tags":resp_tag["data"][0]["tags"][0],
"tasks":resp_tasks["data"][0]["tasks"][0]
}],
"status":"200"
}
return resp
| 23.891304 | 118 | 0.613285 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 242 | 0.2202 |
2c488abf9ca04504184d8340dff0f547466c24fd | 1,110 | py | Python | examples/simple_resource.py | pdyba/lambdalizator | 0371b8d3e25249096a9c7e7cf90fc590a99ad536 | [
"MIT"
] | 3 | 2020-09-26T11:05:32.000Z | 2021-09-25T08:58:10.000Z | examples/simple_resource.py | pdyba/lambdalizator | 0371b8d3e25249096a9c7e7cf90fc590a99ad536 | [
"MIT"
] | 15 | 2020-09-29T12:10:55.000Z | 2021-11-17T10:42:21.000Z | examples/simple_resource.py | pdyba/lambdalizator | 0371b8d3e25249096a9c7e7cf90fc590a99ad536 | [
"MIT"
] | 1 | 2020-09-26T11:05:38.000Z | 2020-09-26T11:05:38.000Z | #!/usr/bin/env python3.8
# coding=utf-8
"""
Simple Lambda Handler
"""
from lbz.dev.server import MyDevServer
from lbz.dev.test import Client
from lbz.exceptions import LambdaFWException
from lbz.resource import Resource
from lbz.response import Response
from lbz.router import add_route
class HelloWorld(Resource):
@add_route("/", method="GET")
def list(self):
return Response({"message": "HelloWorld"})
def handle(event, context):
try:
exp = HelloWorld(event)
resp = exp()
return resp
except Exception: # pylint: disable=broad-except
return LambdaFWException().get_response(context.aws_request_id).to_dict()
class TestHelloWorld:
def setup_method(self) -> None:
# pylint: disable=attribute-defined-outside-init
self.client = Client(resource=HelloWorld)
def test_filter_queries_all_active_when_no_params(self) -> None:
data = self.client.get("/").to_dict()["body"]
assert data == '{"message":"HelloWorld"}'
if __name__ == "__main__":
server = MyDevServer(acls=HelloWorld, port=8001)
server.run()
| 26.428571 | 81 | 0.691892 | 470 | 0.423423 | 0 | 0 | 100 | 0.09009 | 0 | 0 | 219 | 0.197297 |
2c48f48a4bfa46c1cff327814ecfae6eb5228d01 | 2,473 | py | Python | src/python/packages/loudspeakerconfig/createArrayConfigFromSofa.py | s3a-spatialaudio/VISR | 55f6289bc5058d4898106f3520e1a60644ffb3ab | [
"ISC"
] | 17 | 2019-03-12T14:52:22.000Z | 2021-11-09T01:16:23.000Z | src/python/packages/loudspeakerconfig/createArrayConfigFromSofa.py | s3a-spatialaudio/VISR | 55f6289bc5058d4898106f3520e1a60644ffb3ab | [
"ISC"
] | null | null | null | src/python/packages/loudspeakerconfig/createArrayConfigFromSofa.py | s3a-spatialaudio/VISR | 55f6289bc5058d4898106f3520e1a60644ffb3ab | [
"ISC"
] | 2 | 2019-08-11T12:53:07.000Z | 2021-06-22T10:08:08.000Z | # -*- coding: utf-8 -*-
"""
Created on Thu May 3 08:04:22 2018
@author: af5u13
"""
import numpy as np
import os
from .geometry_functions import deg2rad, sph2cart
from loudspeakerconfig import createArrayConfigFile
def createArrayConfigFromSofa( sofaFile, xmlFile = None, lspLabels = None, twoDSetup = False, virtualLoudspeakers = [] ):
"""
Create a loudspeaker configuraton file from a SOFA file containing a number of emitters representing loudspeakers.
Parameters
==========
sofaFile: string
A file path to a SOFA file.
xmlFile: string, optional
Path of the XML output file to be written.
Optional argument, if not provided, the SOFA file path is used with the extension replaced by ".xml"
lspLabels: list of strings, optional
List of loudspeaker labels, must match the number of emitters in the SOFA files.
If not provided, numbered labels are automatically generated.
twoDSetup: bool, optional
Flag specifying whether the aray is to be considered plane (True) or 3D (False).
Optional value, dafault is False (3D).
virtualLoudspeakers: list, optional
A list of virtual loudspeakers to be added to the setup. Each entry must be a Python dict as decribed
in the function :py:meth:`loudspeakerconfig.createArrayConfigFile`.
"""
import h5py # Import in the function to avoid a global dependency.
if not os.path.exists( sofaFile ):
raise ValueError( "SOFA file does not exist.")
if xmlFile is None:
xmlFile = os.path.basename(sofaFile) + '.xml'
fh = h5py.File( sofaFile )
ep =fh.get('EmitterPosition')
emitterCoordSystem = ep.attrs['Type'] # This is a required attribute.
emitterCoordSystem = emitterCoordSystem.decode("utf-8") # make it a string.
if emitterCoordSystem == "spherical":
posSph = np.squeeze( np.asarray(ep) )
posSph[:,0] = deg2rad( posSph[:,0] )
posSph[:,1] = deg2rad( posSph[:,1] )
posCart = sph2cart( posSph[:,0], posSph[:,1], posSph[:,2] )
else:
posCart = np.squeeze( np.asarray(ep) )
if twoDSetup:
posCart = posCart[:,0:2]
createArrayConfigFile( xmlFile,
posCart.T,
loudspeakerLabels = lspLabels,
twoDconfig = twoDSetup,
sphericalPositions = True,
virtualLoudspeakers = virtualLoudspeakers )
fh.close()
| 34.830986 | 121 | 0.650627 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,245 | 0.503437 |
2c48f750067a643a09b94ad43993a6e0c7fcf3bf | 774 | py | Python | blog_app/migrations/0019_auto_20200901_0727.py | Rxavio/django-blog | 573ff668537465112d355490f19fa8bb8864fde8 | [
"MIT"
] | null | null | null | blog_app/migrations/0019_auto_20200901_0727.py | Rxavio/django-blog | 573ff668537465112d355490f19fa8bb8864fde8 | [
"MIT"
] | null | null | null | blog_app/migrations/0019_auto_20200901_0727.py | Rxavio/django-blog | 573ff668537465112d355490f19fa8bb8864fde8 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.3 on 2020-09-01 05:27
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog_app', '0018_auto_20200830_0501'),
]
operations = [
migrations.AddField(
model_name='post',
name='favourite',
field=models.ManyToManyField(blank=True, related_name='favourite', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='post',
name='status',
field=models.CharField(choices=[('published', 'Published'), ('draft', 'Draft')], default='published', max_length=10),
),
]
| 29.769231 | 129 | 0.630491 | 648 | 0.837209 | 0 | 0 | 0 | 0 | 0 | 0 | 171 | 0.22093 |
2c499a524e96db29eeb7fabf4bede1ea0d293384 | 837 | py | Python | utils.py | avb76/barbershop | 975b501b0c53600909910619e248dff627acaa22 | [
"MIT"
] | null | null | null | utils.py | avb76/barbershop | 975b501b0c53600909910619e248dff627acaa22 | [
"MIT"
] | null | null | null | utils.py | avb76/barbershop | 975b501b0c53600909910619e248dff627acaa22 | [
"MIT"
] | null | null | null | from datetime import datetime, date, timedelta
def hour_generator(start, end, step=10):
while start < end:
yield start
start = start + timedelta(minutes=step)
def create_hour(hour, date=None):
if not date:
return datetime.strptime(hour, '%H:%M')
h = datetime.strptime(hour, '%H:%M')
return datetime(year=date.year, month=date.month, day=date.day, hour=h.hour, minute=h.minute)
def right_intervals(hours):
duration = 40
#bar = Barber.query.get(2)
#hours = bar.get_hours(date.today(), duration)
intervals = int(duration / 10)
for i in range(len(hours)):
works = True
for j in range(intervals):
if hours[i + j] != hours[i] + timedelta(minutes=10 * j):
works = False
break
if works:
print(hours[i])
| 27.9 | 97 | 0.600956 | 0 | 0 | 131 | 0.156511 | 0 | 0 | 0 | 0 | 86 | 0.102748 |
2c4ab657772de4e66a6590e0611b2e99cb5d0d0b | 222 | py | Python | display/display/handlers/mailbox/read.py | owlsn/h_crawl | c0431ee6484e61d9339553c3350962ea517749d6 | [
"MIT"
] | null | null | null | display/display/handlers/mailbox/read.py | owlsn/h_crawl | c0431ee6484e61d9339553c3350962ea517749d6 | [
"MIT"
] | 8 | 2021-03-18T20:33:29.000Z | 2022-03-11T23:21:04.000Z | display/display/handlers/mailbox/read.py | owlsn/h_crawl | c0431ee6484e61d9339553c3350962ea517749d6 | [
"MIT"
] | null | null | null | from display.handlers.base import BaseHandler
class MailboxReadHandler(BaseHandler):
def get(self):
title = 'MailboxReadHandler'
self.render('mailbox/read-mail.html', title = title, **self.render_dict) | 37 | 80 | 0.725225 | 175 | 0.788288 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.198198 |
2c4ad1bd8ba2570017282e1dff484a7cee430565 | 194 | py | Python | print_service.py | laashub-sua/demo-print | 76665ffa6e3bd675ffa111ff6c3aabed9b5ea6ec | [
"Apache-2.0"
] | null | null | null | print_service.py | laashub-sua/demo-print | 76665ffa6e3bd675ffa111ff6c3aabed9b5ea6ec | [
"Apache-2.0"
] | null | null | null | print_service.py | laashub-sua/demo-print | 76665ffa6e3bd675ffa111ff6c3aabed9b5ea6ec | [
"Apache-2.0"
] | null | null | null | import convert_pdf_2_jpg
import printer
def do_print(file_path):
if file_path.endswith('.pdf'):
file_path = convert_pdf_2_jpg.do_convert(file_path)
printer.do_print(file_path)
| 21.555556 | 59 | 0.757732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.030928 |
2c4cd55f4acf4c8318a07560161cd0c0d72dabcc | 12,377 | py | Python | plok/tests/test_blog_views.py | jarnoln/plokkeri | 0fd136730fcf1e9839ea8b41fd5aec3987e60ada | [
"MIT"
] | null | null | null | plok/tests/test_blog_views.py | jarnoln/plokkeri | 0fd136730fcf1e9839ea8b41fd5aec3987e60ada | [
"MIT"
] | 2 | 2020-06-05T17:13:39.000Z | 2021-06-01T21:50:01.000Z | plok/tests/test_blog_views.py | jarnoln/plokkeri | 0fd136730fcf1e9839ea8b41fd5aec3987e60ada | [
"MIT"
] | null | null | null | # from unittest import skip
from django.conf import settings
from django.contrib import auth
from django.urls import reverse
from django.test import TestCase
from plok.models import Blog, Article
from .ext_test_case import ExtTestCase
class BlogList(TestCase):
url_name = 'plok:blog_list'
def test_reverse_blog_list(self):
self.assertEqual(reverse(self.url_name), '/list/')
def test_uses_correct_template(self):
response = self.client.get(reverse(self.url_name))
self.assertTemplateUsed(response, 'plok/blog_list.html')
def test_default_context(self):
creator = auth.get_user_model().objects.create(username='creator')
blog1 = Blog.objects.create(created_by=creator, name="test_blog_1", title="Test blog 1")
blog2 = Blog.objects.create(created_by=creator, name="test_blog_2", title="Test blog 2")
response = self.client.get(reverse(self.url_name))
self.assertEqual(response.context['page'], 'blogs')
self.assertEqual(response.context['title'], 'Blogs')
self.assertEqual(response.context['blog_list'].count(), 2)
self.assertEqual(response.context['blog_list'][0], blog1)
self.assertEqual(response.context['blog_list'][1], blog2)
self.assertEqual(response.context['message'], '')
# self.assertEqual(response.context['can_add'], True)
self.assertEqual(response.context['can_add'], False)
class BlogPage(ExtTestCase):
url_name = 'plok:blog'
def test_reverse_blog(self):
self.assertEqual(reverse(self.url_name, args=['test_blog']), '/plok/test_blog/')
def test_uses_correct_template(self):
creator = auth.get_user_model().objects.create(username='creator')
blog = Blog.objects.create(created_by=creator, name="test_blog")
response = self.client.get(reverse(self.url_name, args=[blog.name]))
self.assertTemplateUsed(response, 'plok/blog_detail.html')
def test_get_absolute_url(self):
creator = auth.get_user_model().objects.create(username='creator')
blog = Blog.objects.create(created_by=creator, name="test_blog")
self.assertEqual(blog.get_absolute_url(), reverse(self.url_name, args=[blog.name]))
def test_default_context(self):
creator = auth.get_user_model().objects.create(username='creator')
blog = Blog.objects.create(created_by=creator, name="test_blog", title="Test blog")
response = self.client.get(reverse(self.url_name, args=[blog.name]))
self.assertEqual(response.context['blog'], blog)
self.assertEqual(response.context['blog'].articles().count(), 0)
self.assertEqual(response.context['title'], 'Test blog')
self.assertEqual(response.context['message'], '')
self.assertEqual(response.context['can_edit'], False)
def test_404_no_blog(self):
response = self.client.get(reverse(self.url_name, args=['test_blog']))
self.assertTemplateUsed(response, '404.html')
def test_cant_edit_if_not_logged_in(self):
creator = auth.get_user_model().objects.create(username='creator')
blog = Blog.objects.create(created_by=creator, name="test_blog", title="Test blog")
response = self.client.get(reverse(self.url_name, args=[blog.name]))
self.assertEqual(response.context['can_edit'], False)
def test_cant_edit_if_not_creator(self):
creator = auth.get_user_model().objects.create(username='creator')
blog = Blog.objects.create(created_by=creator, name="test_blog", title="Test blog")
self.create_and_log_in_user()
response = self.client.get(reverse(self.url_name, args=[blog.name]))
self.assertEqual(response.context['can_edit'], False)
def test_shows_articles(self):
creator = auth.get_user_model().objects.create(username='creator')
blog = Blog.objects.create(created_by=creator, name="test_blog", title="Test blog")
article = Article.objects.create(blog=blog, name="test_article", title="Test article", created_by=creator)
response = self.client.get(reverse(self.url_name, args=[blog.name]))
self.assertEqual(response.context['blog'].articles().count(), 1)
self.assertEqual(response.context['blog'].articles()[0], article)
class CreateBlogPage(ExtTestCase):
url_name = 'plok:blog_create'
def test_reverse_blog_create(self):
self.assertEqual(reverse(self.url_name), '/create/')
def test_uses_correct_template(self):
self.create_and_log_in_user()
response = self.client.get(reverse(self.url_name))
self.assertTemplateUsed(response, 'plok/blog_form.html')
def test_default_context(self):
self.create_and_log_in_user()
self.client.cookies.load({settings.LANGUAGE_COOKIE_NAME: 'en-us'})
response = self.client.get(reverse(self.url_name))
self.assertEqual(response.context['title'], 'Create new blog')
self.assertEqual(response.context['message'], '')
def test_can_create_new_blog(self):
self.assertEqual(Blog.objects.all().count(), 0)
self.create_and_log_in_user()
response = self.client.post(reverse(self.url_name), {
'name': 'test_blog',
'title': 'Test blog',
'description': 'For testing'},
follow=True)
self.assertEqual(Blog.objects.all().count(), 1)
self.assertEqual(response.context['blog'].name, 'test_blog')
self.assertEqual(response.context['blog'].title, 'Test blog')
self.assertEqual(response.context['blog'].description, 'For testing')
def test_cant_create_blog_if_not_logged_in(self):
response = self.client.get(reverse(self.url_name), follow=True)
self.assertTemplateUsed(response, 'account/login.html')
def test_cant_create_blog_with_existing_name(self):
user = self.create_and_log_in_user()
Blog.objects.create(created_by=user, name="test_blog", title="Test blog")
self.assertEqual(Blog.objects.all().count(), 1)
self.client.cookies.load({settings.LANGUAGE_COOKIE_NAME: 'en-us'})
response = self.client.post(
reverse(self.url_name),
{
'name': 'test_blog',
'title': 'Test blog',
'description': 'For testing'
},
follow=True)
self.assertEqual(Blog.objects.all().count(), 1)
self.assertTemplateUsed(response, 'plok/blog_form.html')
self.assertContains(response, 'Blog with this Name already exists')
class UpdateBlogPage(ExtTestCase):
url_name = 'plok:blog_update'
def test_reverse_blog_update(self):
self.assertEqual(reverse(self.url_name, args=['test_blog']), '/plok/test_blog/update/')
def test_uses_correct_template(self):
user = self.create_and_log_in_user()
blog = Blog.objects.create(created_by=user, name="test_blog")
response = self.client.get(reverse(self.url_name, args=[blog.name]))
self.assertTemplateUsed(response, 'plok/blog_form.html')
def test_404_no_blog(self):
self.create_and_log_in_user()
response = self.client.get(reverse(self.url_name, args=['test_blog']))
self.assertTemplateUsed(response, '404.html')
def test_can_update_blog(self):
user = self.create_and_log_in_user()
Blog.objects.create(created_by=user, name="test_blog", title="Test blog", description="Testing")
self.assertEqual(Blog.objects.all().count(), 1)
response = self.client.post(reverse(self.url_name, args=['test_blog']), {
'title': 'Test blog updated',
'description': 'Updated'},
follow=True)
self.assertEqual(Blog.objects.all().count(), 1)
blog = Blog.objects.all()[0]
self.assertEqual(blog.title, 'Test blog updated')
self.assertEqual(blog.description, 'Updated')
self.assertTemplateUsed(response, 'plok/blog_detail.html')
self.assertEqual(response.context['blog'].title, 'Test blog updated')
self.assertEqual(response.context['blog'].description, 'Updated')
def test_cant_update_blog_if_not_logged_in(self):
creator = auth.get_user_model().objects.create(username='creator')
Blog.objects.create(created_by=creator, name="test_blog", title="Test blog", description="Testing")
response = self.client.post(reverse(self.url_name, args=['test_blog']), {
'title': 'Test blog updated',
'description': 'Updated'},
follow=True)
blog = Blog.objects.all()[0]
self.assertEqual(blog.title, 'Test blog')
self.assertEqual(blog.description, 'Testing')
self.assertTemplateUsed(response, 'account/login.html')
# self.assertTemplateUsed(response, 'registration/login.html')
def test_cant_update_blog_if_not_creator(self):
creator = auth.get_user_model().objects.create(username='creator')
Blog.objects.create(created_by=creator, name="test_blog", title="Test blog", description="Testing")
self.create_and_log_in_user()
response = self.client.post(reverse(self.url_name, args=['test_blog']), {
'title': 'Test blog updated',
'description': 'Updated'},
follow=True)
self.assertTemplateUsed(response, 'plok/blog_detail.html')
class DeleteBlogPage(ExtTestCase):
url_name = 'plok:blog_delete'
def test_reverse_blog_delete(self):
self.assertEqual(reverse(self.url_name, args=['test_blog']), '/plok/test_blog/delete/')
def test_uses_correct_template(self):
user = self.create_and_log_in_user()
blog = Blog.objects.create(created_by=user, name="test_blog")
response = self.client.get(reverse(self.url_name, args=[blog.name]))
self.assertTemplateUsed(response, 'plok/blog_confirm_delete.html')
def test_404_no_blog(self):
user = self.create_and_log_in_user()
response = self.client.get(reverse(self.url_name, args=['test_blog']))
self.assertTemplateUsed(response, '404.html')
def test_can_delete_blog(self):
user = self.create_and_log_in_user()
Blog.objects.create(created_by=user, name="test_blog", title="Test blog", description="Testing")
self.assertEqual(Blog.objects.all().count(), 1)
response = self.client.post(reverse(self.url_name, args=['test_blog']), {}, follow=True)
self.assertEqual(Blog.objects.all().count(), 0)
def test_cant_delete_blog_if_not_logged_in(self):
creator = auth.get_user_model().objects.create(username='creator')
Blog.objects.create(created_by=creator, name="test_blog", title="Test blog", description="Testing")
response = self.client.post(reverse(self.url_name, args=['test_blog']), {}, follow=True)
# self.assertTemplateUsed(response, 'registration/login.html')
self.assertTemplateUsed(response, 'account/login.html')
def test_cant_delete_blog_if_not_creator(self):
creator = auth.get_user_model().objects.create(username='creator')
Blog.objects.create(created_by=creator, name="test_blog", title="Test blog", description="Testing")
user = self.create_and_log_in_user()
self.assertEqual(Blog.objects.all().count(), 1)
response = self.client.post(reverse(self.url_name, args=['test_blog']), {}, follow=True)
self.assertEqual(Blog.objects.all().count(), 1)
self.assertTemplateUsed(response, '404.html')
def test_cant_delete_blog_if_blog_has_articles(self):
user = self.create_and_log_in_user()
blog = Blog.objects.create(created_by=user, name="test_blog", title="Test blog", description="Testing")
article = Article.objects.create(created_by=user, blog=blog, name="test_article", title="Test article")
self.assertEqual(Blog.objects.all().count(), 1)
self.assertEqual(Article.objects.all().count(), 1)
response = self.client.post(reverse(self.url_name, args=['test_blog']), {}, follow=True)
self.assertEqual(Blog.objects.all().count(), 1)
self.assertEqual(Article.objects.all().count(), 1)
self.assertTemplateUsed(response, '404.html')
| 49.907258 | 114 | 0.671568 | 12,127 | 0.979801 | 0 | 0 | 0 | 0 | 0 | 0 | 2,041 | 0.164903 |
2c4d517cd55c0652efb772faaec52634d1b76f2f | 376 | py | Python | pipeline/reach-es-extractor/refparse/utils/__init__.py | wellcometrust/reach | 1aa42c7d8aaf0a91d033af8448a33f37563b0365 | [
"MIT"
] | 11 | 2019-11-04T08:24:00.000Z | 2021-12-16T23:11:47.000Z | pipeline/reach-es-extractor/refparse/utils/__init__.py | wellcometrust/reach | 1aa42c7d8aaf0a91d033af8448a33f37563b0365 | [
"MIT"
] | 274 | 2019-10-30T15:37:17.000Z | 2021-03-25T16:13:36.000Z | pipeline/reach-es-extractor/refparse/utils/__init__.py | wellcometrust/reference-parser | 1aa42c7d8aaf0a91d033af8448a33f37563b0365 | [
"MIT"
] | 3 | 2019-11-12T13:38:14.000Z | 2020-04-16T07:49:04.000Z | from .parse import structure_reference
from .fuzzy_match import FuzzyMatcher
from .file_manager import FileManager
from .serialiser import serialise_matched_reference, serialise_reference
from .exact_match import ExactMatcher
__all__ = [
structure_reference,
FuzzyMatcher,
FileManager,
serialise_matched_reference,
serialise_reference,
ExactMatcher
]
| 25.066667 | 72 | 0.81383 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2c4e4b261a88afcb890a4de0d0a8d027edd43bd5 | 8,634 | py | Python | web_site/wx/backends/dj.py | Fixdq/dj-deep | 6712a722c7f620b76f21b1ebf0b618f42eb4a58a | [
"MIT"
] | null | null | null | web_site/wx/backends/dj.py | Fixdq/dj-deep | 6712a722c7f620b76f21b1ebf0b618f42eb4a58a | [
"MIT"
] | null | null | null | web_site/wx/backends/dj.py | Fixdq/dj-deep | 6712a722c7f620b76f21b1ebf0b618f42eb4a58a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on 2014-5-14
django 帮助函数
@author: skycrab
@sns_userinfo
def oauth(request):
openid = request.openid
"""
import json
import logging
import base64
from functools import wraps
from django.conf import settings
from django.core.cache import cache
from django.shortcuts import redirect, render_to_response
from django.contrib.auth import login, logout, authenticate
from rest_framework.authtoken.models import Token
from core.redis_number import RedisStat
from django.core.urlresolvers import reverse
from .common import CommonHelper
from wx import class_property, WeixinHelper
from datetime import timedelta
from django.utils import timezone
from shop.models import ShopInfo
from account.models import User
import urllib, urlparse
import time
logger = logging.getLogger('control')
class Helper(CommonHelper):
"""微信具体逻辑帮组类"""
@class_property
def cache(cls):
"""返回cache对象"""
return cache
@class_property
def secret_key(cls):
"""返回cookie加密秘钥"""
return settings.SECRET_KEY
def sns_userinfo_proxy_callback(callback=None):
"""
网页授权获取用户信息装饰器
callback(openid, userinfo):
return user
"""
def wrap(func):
@wraps(func)
def inner(request, *args, **kwargs):
if 'MicroMessenger' in request.META.get('HTTP_USER_AGENT', ''):
shop = request.GET.get('shop', None)
if not shop:
response = func(request, *args, **kwargs)
else:
# 判断支付情况
if request.is_secure():
url = request.build_absolute_uri().replace('https://', 'http://')
return redirect(url)
unionid = request.session.get('unionid', '')
timestamp_now = int(time.time())
ok, unionid = Helper.check_cookie(unionid)
redis = RedisStat()
if ok:
# 判断微信用户token是否存在,如果不存在,则需要授权
redis_info = redis.get(unionid)
if not redis_info:
ok = False
else:
ws = json.loads(redis_info)
if not ws['unionid']:
ok = False
if not ok:
# unionid出错,重新授权
state = request.GET.get('state', None)
if state:
# aa|bb|cc aa:最近一级上级推客 bb:谁转发过来 cc:时间戳
state_list = urllib.unquote(state).split('|')
if len(state_list) != 3:
state = '0|0|%d' % timestamp_now
else:
state = '0|0|%d' % timestamp_now
state += '|%s' % shop
rs_id = redis.get('redirect_url_id')
if rs_id:
url_id = redis.incr('redirect_url_id')
else:
url_id = 1
redis.set('redirect_url_id', url_id)
redis.set_ttl('redirect_url_id_%d' % url_id, request.build_absolute_uri(), 60)
# 跳转到代理微信认证服务器
redirect_url = 'http://%s.control.binli360.com%s' % (shop, reverse('open:proxy_callback'))
scope = 'snsapi_base'
state = 'base|%s|%s' % (state, url_id)
url = WeixinHelper.proxy(redirect_url, scope, state, 'mobile')
return redirect(url)
else:
# 获取绑定的User对象
user = authenticate(unionid=unionid)
if user:
# token, goc = Token.objects.get_or_create(user=user)
login(request, user)
response = func(request, *args, **kwargs)
# response.set_cookie(shop+'_key', token.key, path='/')
response.set_cookie(shop, unionid, path='/')
else:
response = func(request, *args, **kwargs)
else:
response = func(request, *args, **kwargs)
return response
return inner
return wrap
sns_userinfo = sns_userinfo_proxy_callback()
def sns_userinfo_proxy_test_callback(callback=None):
"""
网页授权获取用户信息装饰器
callback(openid, userinfo):
return user
"""
def wrap(func):
@wraps(func)
def inner(request, *args, **kwargs):
if 'MicroMessenger' in request.META.get('HTTP_USER_AGENT', ''):
# logger.debug('sns_userinfo_proxy_test_callback is wechat')
shop = request.GET.get('shop', None)
if not shop:
response = func(request, *args, **kwargs)
else:
# 判断支付情况
if request.is_secure():
url = request.build_absolute_uri().replace('https://', 'http://')
return redirect(url)
unionid = request.session.get('unionid', '')
timestamp_now = int(time.time())
ok, unionid = Helper.check_cookie(unionid)
# logger.debug('sns_userinfo_proxy_test_callback sessino unionid is : %s' % unionid)
redis = RedisStat()
if ok:
# 判断微信用户token是否存在,如果不存在,则需要授权
redis_info = redis.get(unionid)
if not redis_info:
ok = False
else:
ws = json.loads(redis_info)
if not ws['unionid']:
ok = False
if not ok:
# unionid出错,重新授权
state = request.GET.get('state', None)
if state:
# aa|bb|cc aa:最近一级上级推客 bb:谁转发过来 cc:时间戳
state_list = urllib.unquote(state).split('|')
if len(state_list) != 3:
state = '0|0|%d' % timestamp_now
else:
state = '0|0|%d' % timestamp_now
state += '|%s' % shop
rs_id = redis.get('redirect_url_id')
if rs_id:
url_id = redis.incr('redirect_url_id')
else:
url_id = 1
redis.set('redirect_url_id', url_id)
redis.set_ttl('redirect_url_id_%d' % url_id, request.build_absolute_uri(), 60)
# 跳转到代理微信认证服务器
redirect_url = 'http://%s.control.binli360.com%s' % (shop, reverse('open:proxy_callback_test'))
scope = 'snsapi_base'
state = 'base|%s|%s' % (state, url_id)
url = WeixinHelper.proxy(redirect_url, scope, state, 'mobile')
# logger.debug('sns_userinfo_proxy_test_callback redirect_url is %s' % url)
return redirect(url)
else:
# logger.debug('sns_userinfo_proxy_test_callback unionid is : %s' % unionid)
pass
# 获取绑定的User对象
user = authenticate(unionid=unionid)
if user:
# token, goc = Token.objects.get_or_create(user=user)
login(request, user)
response = func(request, *args, **kwargs)
# response.set_cookie(shop+'_key', token.key, path='/')
response.set_cookie(shop, unionid, path='/')
else:
response = func(request, *args, **kwargs)
else:
response = func(request, *args, **kwargs)
return response
return inner
return wrap
sns_userinfo_test = sns_userinfo_proxy_test_callback()
| 39.788018 | 120 | 0.457378 | 290 | 0.032179 | 0 | 0 | 7,470 | 0.828895 | 0 | 0 | 2,081 | 0.230914 |
2c4f3ebf2e488446b1771125571b3ec897c6b76a | 3,608 | py | Python | scripts/build/builders/qpg.py | mrninhvn/matter | c577b233db9d2f3a6f87108a062b1699a40c5169 | [
"Apache-2.0"
] | 4 | 2020-09-11T04:32:44.000Z | 2022-03-11T09:06:07.000Z | scripts/build/builders/qpg.py | mrninhvn/matter | c577b233db9d2f3a6f87108a062b1699a40c5169 | [
"Apache-2.0"
] | 6 | 2022-01-19T03:58:32.000Z | 2022-01-21T06:49:15.000Z | scripts/build/builders/qpg.py | mrninhvn/matter | c577b233db9d2f3a6f87108a062b1699a40c5169 | [
"Apache-2.0"
] | 2 | 2021-12-02T09:08:00.000Z | 2021-12-03T07:31:44.000Z | # Copyright (c) 2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from enum import Enum, auto
from .gn import GnBuilder
class QpgApp(Enum):
LIGHT = auto()
LOCK = auto()
SHELL = auto()
PERSISTENT_STORAGE = auto()
def ExampleName(self):
if self == QpgApp.LIGHT:
return 'lighting-app'
elif self == QpgApp.LOCK:
return 'lock-app'
elif self == QpgApp.SHELL:
return 'shell'
elif self == QpgApp.PERSISTENT_STORAGE:
return 'persistent-storage'
else:
raise Exception('Unknown app type: %r' % self)
def AppNamePrefix(self):
if self == QpgApp.LIGHT:
return 'chip-qpg6105-lighting-example'
elif self == QpgApp.LOCK:
return 'chip-qpg6105-lock-example'
elif self == QpgApp.SHELL:
return 'chip-qpg6105-shell-example'
elif self == QpgApp.PERSISTENT_STORAGE:
return 'chip-qpg6105-persistent_storage-example'
else:
raise Exception('Unknown app type: %r' % self)
def FlashBundleName(self):
if self == QpgApp.LIGHT:
return 'lighting_app.out.flashbundle.txt'
elif self == QpgApp.LOCK:
return 'lock_app.out.flashbundle.txt'
elif self == QpgApp.SHELL:
return 'shell_app.out.flashbundle.txt'
elif self == QpgApp.PERSISTENT_STORAGE:
return 'persistent_storage_app.out.flashbundle.txt'
else:
raise Exception('Unknown app type: %r' % self)
def BuildRoot(self, root):
return os.path.join(root, 'examples', self.ExampleName(), 'qpg')
class QpgBoard(Enum):
QPG6105 = 1
def GnArgName(self):
if self == QpgBoard.QPG6105:
return 'qpg6105'
else:
raise Exception('Unknown board #: %r' % self)
class QpgBuilder(GnBuilder):
def __init__(self,
root,
runner,
app: QpgApp = QpgApp.LIGHT,
board: QpgBoard = QpgBoard.QPG6105,
enable_rpcs: bool = False):
super(QpgBuilder, self).__init__(
root=app.BuildRoot(root),
runner=runner)
self.app = app
self.board = board
self.enable_rpcs = enable_rpcs
def GnBuildArgs(self):
args = ['qpg_target_ic=\"%s\"' % self.board.GnArgName()]
if self.enable_rpcs:
args.append('import("//with_pw_rpc.gni")')
return args
def build_outputs(self):
items = {}
for extension in ["out", "out.map", "out.hex"]:
name = '%s.%s' % (self.app.AppNamePrefix(), extension)
items[name] = os.path.join(self.output_dir, name)
# Figure out flash bundle files and build accordingly
with open(os.path.join(self.output_dir, self.app.FlashBundleName())) as f:
for line in f.readlines():
name = line.strip()
items['flashbundle/%s' %
name] = os.path.join(self.output_dir, name)
return items
| 32.214286 | 82 | 0.598947 | 2,949 | 0.81735 | 0 | 0 | 0 | 0 | 0 | 0 | 1,149 | 0.318459 |
2c52e367b7b5edf4ce5bca6e592db3079c27edea | 946 | py | Python | Start.py | mn3711698/singlecoin | 63f0154ba17c7a21295b2ff6ef94929cf708a47c | [
"MIT"
] | 33 | 2021-05-14T03:21:53.000Z | 2021-11-07T20:27:53.000Z | Start.py | mn3711698/singlecoin | 63f0154ba17c7a21295b2ff6ef94929cf708a47c | [
"MIT"
] | 2 | 2021-06-04T15:31:01.000Z | 2021-09-25T12:24:02.000Z | Start.py | mn3711698/singlecoin | 63f0154ba17c7a21295b2ff6ef94929cf708a47c | [
"MIT"
] | 14 | 2021-05-14T03:34:30.000Z | 2021-11-10T12:35:39.000Z | # -*- coding: utf-8 -*-
##############################################################################
# Author:QQ173782910
##############################################################################
import logging
from apscheduler.schedulers.background import BlockingScheduler
from RunUse import TradeRun
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=format, filename='log_print.txt')
logger = logging.getLogger('print')
logging.getLogger("apscheduler").setLevel(logging.WARNING) # 设置apscheduler.
if __name__ == '__main__':
RunTrade = TradeRun()
scheduler = BlockingScheduler() # 定时的任务.
scheduler.add_job(RunTrade.get_kline_data, trigger='cron', second='*/2') # 主计算k线
scheduler.add_job(RunTrade.get_open_orders, trigger='cron', second='*/2') # 未成交单
scheduler.add_job(RunTrade.get_position, trigger='cron', second='*/1') # 仓位
scheduler.start() | 41.130435 | 85 | 0.603594 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 408 | 0.415479 |
2c543d7182008c5df004271e7ae10351c224c8a5 | 2,522 | py | Python | statinf/ml/activations.py | florianfelice/pleas | c25374f36b781ac726bbbc6da21a559499844594 | [
"MIT"
] | 6 | 2019-11-24T01:29:57.000Z | 2021-01-28T16:24:54.000Z | statinf/ml/activations.py | florianfelice/pleas | c25374f36b781ac726bbbc6da21a559499844594 | [
"MIT"
] | null | null | null | statinf/ml/activations.py | florianfelice/pleas | c25374f36b781ac726bbbc6da21a559499844594 | [
"MIT"
] | 6 | 2019-11-22T10:40:37.000Z | 2020-10-31T13:59:19.000Z | import numpy as np
import jax.numpy as jnp
from jax import lax
from jax.scipy.special import expit
# Default activation functions
def sigmoid(x):
"""Sigmoid activation function.
:param x: Input value
:type x: :obj:`float` or :obj:`numpy.array`
:return: Sigmoid activated value: :math:`sigmoid(x) = \\dfrac{1}{1 + e^{-x}}`
:rtype: :obj:`float`
"""
return expit(x)
def relu(x):
"""Rectified Linear Unit activation function.
:param x: Input value
:type x: :obj:`float` or :obj:`numpy.array`
:return: Activated value: :math:`\\mathrm{relu}(x) = \\max(0, x)`
:rtype: :obj:`float`
"""
return jnp.maximum(0, x)
def elu(x, alpha=1.):
"""Exponential Linear Unit activation function.
:param x: Input value
:type x: :obj:`float` or :obj:`numpy.array`
:formula: .. math:: \\mathrm{elu}(x) = \\begin{cases} x, & x > 0\\\\ \\alpha \\left(e^{x} - 1\\right), & x \\le 0 \\end{cases}
:return: Activated value.
:rtype: :obj:`float`
"""
safe_x = jnp.where(x > 0, 0., x)
return jnp.where(x > 0, x, alpha * jnp.expm1(safe_x))
def tanh(x):
"""Hyperbolic tangent activation function.
:param x: Input value
:type x: :obj:`float` or :obj:`numpy.array`
:return: Activated value: :math:`\\tanh(x)`
:rtype: :obj:`float`
"""
return jnp.log(x)
def softplus(x):
"""Softplus activation function.
:param x: Input value
:type x: :obj:`float` or :obj:`numpy.array`
:return: Activated value: :math:`\\mathrm{softplus}(x) = \\log(1 + e^{-x})`
:rtype: :obj:`float`
"""
return jnp.log(1 + jnp.exp(-x))
def softmax(x, axis=-1):
"""Softmax activation function.
:param x: Input value
:type x: :obj:`float` or :obj:`numpy.array`
:return: Activated value: :math:`\\mathrm{softmax}(x) = \\frac{\\exp(x_i)}{\\sum_j \\exp(x_j)}`
:rtype: :obj:`float`
"""
un_normalized = jnp.exp(x - lax.stop_gradient(x.max(axis, keepdims=True)))
return un_normalized / un_normalized.sum(axis, keepdims=True)
def logit(x, weights, bias=0):
"""Logistic function
:param x: Input value
:type x: numpy.array
:param weights: Vector of weights :math:`\\beta`
:type weights: numpy.array
:param bias: Vector of bias :math:`\\epsilon`, defaults to 0.
:type bias: numpy.array
:return: Logistic transformation: :math:`\\mathrm{logit}(x, \\beta) = \\dfrac{1}{1 + e^{-x \\beta}}`
:rtype: float
"""
return 1 / (1 + np.exp(-x.dot(weights) + bias))
| 25.474747 | 130 | 0.600714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,836 | 0.727994 |
2c55a44f1708355490f6623e534cfe988d374906 | 45,032 | py | Python | tools/mytools/ARIA/src/py/aria/Network.py | fmareuil/Galaxy_test_pasteur | 6f84fb0fc52e3e7dd358623b5da5354c66e16a5f | [
"CC-BY-3.0"
] | null | null | null | tools/mytools/ARIA/src/py/aria/Network.py | fmareuil/Galaxy_test_pasteur | 6f84fb0fc52e3e7dd358623b5da5354c66e16a5f | [
"CC-BY-3.0"
] | null | null | null | tools/mytools/ARIA/src/py/aria/Network.py | fmareuil/Galaxy_test_pasteur | 6f84fb0fc52e3e7dd358623b5da5354c66e16a5f | [
"CC-BY-3.0"
] | null | null | null | """
Authors: Bardiaux Benjamin
Institut Pasteur, Paris
IBPC, Paris
Copyright (C) 2005 Michael Habeck,
Wolfgang Rieping and Benjamin Bardiaux
No warranty implied or expressed.
All rights reserved.
$Author: bardiaux $
$Revision: 1.1.1.1 $
$Date: 2010/03/23 15:27:24 $
"""
from aria.ariabase import *
from aria.Settings import Settings
from aria.xmlutils import XMLElement, XMLBasePickler
import aria.TypeChecking as TCheck
from aria.Chain import TYPE_NONPOLYMER
import numpy
from time import clock
from aria.AriaPeak import TextPickler
from aria.AriaPeak import ASSIGNMENT_TYPE_DICT, NA, \
HEADER_PROJECT, HEADER_ASSIGNMENT_TYPE, \
HEADER_SEQUENCE_SEPARATION, HEADER_RESTRAINT_DEFINITION, \
HEADER_RESTRAINT_ACTIVE
HEADER_SEQUENCE_SEPARATION = \
"""
# sep: sequence separation s: I: s == 0 (intra-residual)
# Q: s == 1 (sequential)
# S: 2 <= s <= 3 (short)
# M: 4 <= s <= 5 (medium)
# L: s > 5 (long)
# i: inter-monomer
"""[1:-1]
HEADER_DICT = {'project': HEADER_PROJECT,
'assignment_type': HEADER_ASSIGNMENT_TYPE,
'sequence_separation': HEADER_SEQUENCE_SEPARATION,
'restraint_definition': HEADER_RESTRAINT_DEFINITION,
'restraint_active': HEADER_RESTRAINT_ACTIVE}
HEADER_ABBREVIATIONS = \
("""
#
# Abbreviations:
#
%(restraint_definition)s
%(restraint_active)s
#
#
%(assignment_type)s
#
""" % HEADER_DICT)[1:-1]
HEADER_ALL = \
"""
#
# List of distance restraints.
#
# Created by Aria 2.3, %(creation_date)s
#
%(project)s
#
# Restraints used during calculation: %(n_active)d
# Violated: %(n_violated)d
#
%(abbreviations)s
%(sequence_separation)s
#
# n_c: The number of contributions. (see noe_restraints.assignments for
# explicit list of contributions).
#
# net_res: Network-anchoring score per residue.
#
# net_ato: Network-anchoring score per atom.
#
"""[1:]
class NetworkScoreTextPickler(TextPickler):
def encode_common(self, ap):
distance_format = '%.2f'
number = '%d' % ap.getId()
rp = ap.getReferencePeak()
x = rp.getNumber()
try:
ref_peak_number = '%d' % x
except:
ref_peak_number = NA
x = rp.getSpectrum().getName()
try:
ref_peak_spectrum = str(x)
except:
ref_peak_spectrum = NA
x = ap.isActive()
if x:
active = YES
else:
active = NO
at = rp.getAssignmentType()
assignment_type = ASSIGNMENT_TYPE_DICT[at]
# BARDIAUX
net = ap._network
net_res = '%.2f' % ap._network['residue']
net_ato = '%.2f' % ap._network['atom']
values = ref_peak_spectrum, ref_peak_number, number, \
active, net_res, net_ato, assignment_type
return list(values)
def encode(self, ap):
values = self.encode_common(ap)
## contributions
contributions = ap.getContributions()
## take only active contributions
contributions = ap.getActiveContributions()
if len(contributions) == 1:
## get sequence separation
## in case of multuple spin-pairs,
## we just take the first one, since all are
## involve the same two residues
atom1, atom2 = contributions[0].getSpinPairs()[0].getAtoms()
if atom1.getSegid() <> atom2.getSegid():
# we have an inter
values.append('1') # n_c
values.append('i')
return values
seq_pos1 = atom1.getResidue().getNumber()
seq_pos2 = atom2.getResidue().getNumber()
seq_sep = abs(seq_pos1 - seq_pos2)
## intra-residue
if seq_sep == 0:
descr = 'I'
## sequential
elif seq_sep == 1:
descr = 'Q'
## TODO: are these the correct values?
## short range
elif seq_sep <= 3:
descr = 'S'
## medium range
elif seq_sep <= 5:
descr = 'M'
else:
descr = 'L'
values.append('1') # n_c
values.append(descr)
## multiple contributions
else:
values.append(str(len(contributions)))
values.append('-') # sep
return values
def dumps(self, ap):
return '\n'.join(self.encode(ap))
class NetworkAnchoringTextPickler(TextPickler):
HEADER_COMMON = ['ref_spec', 'ref_no', 'id', 'active', 'net_res', 'net_ato', 'a_type']
COLUMNS = {'all' : HEADER_COMMON + ['n_c', 'sep'],}
HEADER = {'all' : HEADER_ALL,}
def __init__(self, settings):
#check_type(settings, 'AriaPeakListTextPicklerSettings')
TextPickler.__init__(self, settings = settings)
def get_column_header(self, _type):
"""
_type is 'ambig' or 'unambig'
"""
if not _type in ('ambig', 'unambig', 'all'):
s = 'Header for peak-type "%s" not known.' % _type
self.error(TypeError, s)
return list(self.COLUMNS[_type])
def encode(self, peak_list, header):
pickler = NetworkScoreTextPickler()
all = map(pickler.encode, peak_list)
## add header
if not len(all):
return header
if len(header) <> len(all[0]):
s = 'Number of columns must match header-length.'
self.error(Exception, s)
header[0] = '# ' + header[0]
## show additional information
active = [p for p in peak_list if p.isActive()]
n_violated = len([p for p in active if p.analysis.isViolated()])
d = self._compile_header_dict()
d['n_violated'] = n_violated
d['n_active'] = len(active)
d['abbreviations'] = HEADER_ABBREVIATIONS
text = self.format_output(all, header = header)
## add \n
text = [line + '\n' for line in text]
## make string
text = ''.join(text)
return text, d
def _write(self, s, filename, gzip = 0):
import os
if s is None:
import aria.tools as tools
tools.touch(filename)
return
if gzip:
from aria.tools import gzip_open as open_func
else:
open_func = open
filename = os.path.expanduser(filename)
f = open_func(filename, 'w')
f.write(s)
f.close()
def _compile_header_dict(self):
from aria.Singleton import ProjectSingleton
import time
from copy import copy
project = ProjectSingleton()
project_settings = project.getSettings()
infra = project.getInfrastructure()
run_path = infra.get_run_path()
d = {'date': project_settings['date'],
'project': project_settings['name'],
'run': project_settings['run'],
'author': project_settings['author'],
'working_directory': run_path}
x = copy(HEADER_DICT)
x['project'] %= d
x['creation_date'] =time.ctime()
return x
def dump_network(self, peak_list, filename, gzip = 0):
if peak_list:
header = self.get_column_header('all')
text, d = self.encode(peak_list, header)
d.update(self._compile_header_dict())
header = (self.HEADER['all'] % d)[1:]
s = header + text
# s = header.replace('\n\n','\n') + text
else:
s = None
return self._write(s, filename, gzip)
class NetworkPsPickler:
def __init__(self, network):
self.peaks = network.peaks
self.p_id = network._protons_id
self.net_res = network.residue_score
self.mol = network.molecule
self.it_n = network.iteration.getNumber()
def get_matrix(self):
# since we just support symmetric dimer
n_chains = len(self.mol.get_chains())
#max_res = len([r for c in self.mol.get_chains() for r in c.getResidues()])
max_res = [c.getResidues()[-1].getNumber() for c in self.mol.get_chains() \
if c.getType() != TYPE_NONPOLYMER]
from aria.Singleton import ProjectSingleton
from aria.DataContainer import DATA_SYMMETRY
project = ProjectSingleton()
sym_settings = project.getData(DATA_SYMMETRY)[0]
if n_chains < 2 or (n_chains > 1 and sym_settings['symmetry_type'] not in ["C2","C3","D2","C5"]):
# monomeric prot or hetero dimer
matrix = numpy.zeros((max_res[0]+1, max_res[0]+1), numpy.float)
for k, r_net in self.net_res.items():
r1, r2 = map(lambda a: a.getNumber(), k)
matrix[r1,r2] = r_net
matrix[r2,r1] = r_net
return matrix, None
else:
# homo-dimer
matrix_a = numpy.zeros((max_res[0]+1, max_res[0]+1), numpy.float)
matrix_r = numpy.zeros((max_res[0]+1, max_res[1]+1), numpy.float)
for k, r_net in self.net_res.items():
r1, r2 = map(lambda a: a.getNumber(), k)
s1, s2 = map(lambda a: a.getChain().getSegid(), k)
if s1 <> s2:
matrix_r[r1,r2] = r_net
matrix_r[r2,r1] = r_net
else:
matrix_a[r1,r2] = r_net
matrix_a[r2,r1] = r_net
return matrix_a, matrix_r
def plot_matrix(self):
# mask zero-values
from matplotlib import rcParams
from numpy import ma
rcParams['numerix'] = 'numpy'
pylab = self.pylab
msg = ""
matrix_a, matrix_r = self.get_matrix()
first_res = [c.getResidues()[0].getNumber() for c in self.mol.get_chains() if c.getType() != TYPE_NONPOLYMER]
max_res = [c.getResidues()[-1].getNumber() for c in self.mol.get_chains() if c.getType() != TYPE_NONPOLYMER]
if matrix_r is not None:
ax1 = pylab.subplot(2,1,1)
#matrix = matrix_r[1:,1:]
matrix = matrix_r[first_res[0]:,first_res[1]:]
X = ma.array(matrix, mask = numpy.equal(matrix, 0.))
xyticks = (first_res[0], max_res[0], first_res[1], max_res[1])
kw = {'origin':'lower',
'interpolation':'nearest',
'aspect' : 'equal',
'extent' : xyticks}
pylab.imshow(X, cmap=pylab.cm.Reds, **kw)
pylab.grid()
pylab.colorbar(orientation = 'vertical')
pylab.ylabel("Residue Number (Inter-molecular)")
#pylab.setp( ax1.get_xticklabels(), visible=False)
pylab.subplot(212)#, sharex=ax1)
#pos = pylab.axes([0.85, 0.1, 0.04, 0.8])
#pylab.colorbar(cax = pos)#, orientation = 'horizontal')
msg = " (Intra-molecular)"
matrix = matrix_a[first_res[0]:,first_res[0]:]
#matrix = matrix_a[1:,1:]
X = ma.array(matrix, mask = numpy.equal(matrix, 0.))
xyticks = (first_res[0], max_res[0], first_res[0], max_res[0])
kw = {'origin':'lower',
'interpolation':'nearest',
'aspect' : 'equal',
'extent' : xyticks}
pylab.imshow(X, cmap=pylab.cm.Reds, **kw)
if len(msg):
orientation = 'vertical'
else:
orientation = 'horizontal'
pylab.colorbar(orientation = orientation)
pylab.grid()
pylab.xlabel("Residue Number")
pylab.ylabel("Residue Number" + msg)
def plot_profile(self, type, n):
pylab = self.pylab
if type not in ['residue', 'atom']:
return
colors = {'residue' : 'b',
'atom' : 'r'}
scores = [p._network[type] for p in self.peaks]
nbins = int(max(scores))
#nbins = 1 + int(numpy.log(len(scores))/numpy.log(2))
nbins = int(1.0 + 3.3 * numpy.log(len(scores)))
pylab.subplot(2, 1, n)
pylab.hist(scores, bins = nbins +1, facecolor = colors[type])
pylab.xlabel("Network Anchoring score per %s" % type)
pylab.ylabel("Number of Peaks")
def plot(self, path):
try:
import matplotlib
matplotlib.use('PS', warn=False)
except:
return
import matplotlib.pylab as pylab
self.pylab = pylab
pylab.figure(num=1, figsize=(8,11))
pylab.clf()
pylab.figtext(0.3,0.95, 'Network Anchoring for iteration %s' % str(self.it_n))
pylab.figtext(0.3,0.90, 'Network Anchoring scores distribution')
self.plot_profile('residue', 1)
self.plot_profile('atom', 2)
pylab.subplots_adjust(top = 0.85)
pylab.figure(num=2, figsize=(8,11))
pylab.clf()
pylab.figtext(0.3,0.95, 'Residue-wise Network Anchoring scores for iteration %d' % self.it_n)
self.plot_matrix()
pylab.figure(1)
pylab.savefig(path +'_dist.ps', papertype='a4', dpi = 72)
pylab.figure(2)
pylab.savefig(path + '_2D.ps', papertype='a4', dpi = 72)
class NetworkSettings(Settings):
def create(self):
from aria.Settings import NonNegativeFloat
from aria.Settings import YesNoChoice
d = {}
# public settings
descr = "Network anchoring removes restraints which are not surrounded by a network of active restraints."
d['enabled'] = YesNoChoice(description = descr)
descr = "High network-anchoring score per residue for a peak to be active."
d['high_residue_threshold'] = NonNegativeFloat(description = descr)
descr = """Minimal network-anchoring score per residue for a peak to be active. (In combination with \"min_atom_threshold\")"""
d['min_residue_threshold'] = NonNegativeFloat(description = descr)
descr = """Minimal network-anchoring score per atoms for a peak to be active. (In combination with \"min_residue_threshold\")"""
d['min_atom_threshold'] = NonNegativeFloat(description = descr)
# private
descr = "Maximal distance for covalent inter-proton distance."
d['distance_max'] = NonNegativeFloat(description = descr)
descr = "Maximal network anchoring score for covalent distance."
d['v_max'] = NonNegativeFloat(description = descr)
descr = "Minimal network anchoring score for intraresidual/sequential distance."
d['v_min'] = NonNegativeFloat(description = descr)
return d
def create_default_values(self):
d = {}
d['enabled'] = NO
d['high_residue_threshold'] = 4.
d['min_residue_threshold'] = 1.0
d['min_atom_threshold'] = 0.25
d['distance_max'] = 5.5
d['v_max'] = 1.0
d['v_min'] = 0.1
return d
class CovalentConstraint:
def __init__(self, id, atom1, atom2, distance):
self.atom1 = atom1
self.atom2 = atom2
self.distance = distance
self.id = id
def getId(self):
return self.id
def getScore(self):
return 0.
def getAtoms(self):
return (self.atom1, self.atom2)
def getDistance(self):
return self.distance
def __str__(self):
s = "CovalentConstraint(id=%d, atoms=%s, d=%5.3f)" % (self.id, self.getAtoms(), self.distance)
return s
class NetworkAnchoring(AriaBaseClass):
def __init__(self, settings):
TCheck.check_type(settings, 'NetworkSettings')
AriaBaseClass.__init__(self)
self.setSettings(settings)
self.anchoring = None
self.peaks = None
self.getSettings()['v_min'] = 0.1
self.getSettings()['v_max'] = 1.0
self.getSettings()['distance_max'] = 5.5
def setup(self):
"""
Setup some lists and matrices.
"""
from sets import Set
if self.anchoring is not None:
# if we already have a network, just recreate self._c_id with copied contribuitions
self.message('Retrieving Network ...')
self._c_id = {}
self._c_id[-1] = [] # covalent
for p in self.peaks:
for c in p.getContributions():
for sp in c.getSpinPairs():
sid = sp.getId() + 1
self._c_id.setdefault(sid, Set())
self._c_id[sid].add(c)
self.addDistanceRestraints()
return 1
# if we run network_anchoring for 1st time, create all list and spinpair matrices
self.message('Initializing ...')
if not self.peaks:
return 0
# list with all protons
if self._is_noesy_only:
self._protons_id = [a for c in self.molecule.get_chains() for r in c.getResidues() \
for a in r.getAtoms() if a.isProton()]
else:
self._protons_id = [a for c in self.molecule.get_chains() for r in c.getResidues() \
for a in r.getAtoms() if a.isProton() or a.getType() in ['N','C']]
self._protons_id.sort(lambda a,b: cmp(a.getId(), b.getId()))
# dict with protons id as key, and indices in self._protons_id as values
self._protons_num = {}
for a in range(0, len(self._protons_id)):
self._protons_num[self._protons_id[a].getId()] = a
# list with protons residues number
# add chain levels to residues numbering
self._residues_num = {}
for c in self.molecule.get_chains():
cid = c.getSegid()
self._residues_num[cid] = [a.getResidue().getNumber() for a in self._protons_id]# if a.getSegid() == cid]
# dict with residues number as key and list of protons ids as values
self._residues_id = {}
for c in self.molecule.get_chains():
cid = c.getSegid()
self._residues_id[cid] = {}
for a in range(0, len(self._protons_id)):
r, cid = self._protons_id[a].getResidue().getNumber(), self._protons_id[a].getSegid()
self._residues_id[cid].setdefault(r, [])
self._residues_id[cid][r].append(a)
# dict with SpinPair.getId() + 1 as key and Set of contributions as values
self._c_id = {}
self._c_id[-1] = []
# dict with SpinPair.getId() + 1 as key and spinpair as values
self.spinpairs = {}
for p in self.peaks:
for c in p.getContributions():
for sp in c.getSpinPairs():
sid = sp.getId() + 1
self._c_id.setdefault(sid, Set())
self._c_id[sid].add(c)
if not self.spinpairs.has_key(sid):
self.spinpairs[sid] = sp
# add additional distance restraints
self.addDistanceRestraints()
# matrix to hold wether 2 protons are connected with spinpair(1), covalent(2) or not connected(0)
self._sp = numpy.zeros((len(self._protons_id), len(self._protons_id)))
# matrix to store the id of the spinpair connecting 2 atoms
self._sp_id = numpy.zeros((len(self._protons_id), len(self._protons_id)))
# matrix to store covalent score of a spinpair
self._sp_cov_scores = numpy.zeros((len(self._protons_id), len(self._protons_id)))
# matrix to store sum of contributions volumes of each spinpair
self._sp_sum_scores = numpy.zeros(len(self.spinpairs.keys()) , numpy.float)
for spid, sp in self.spinpairs.items():
a, b = sp.getAtoms()
a, b = self._protons_num[a.getId()], self._protons_num[b.getId()]
self._sp[a][b] = 1
self._sp[b][a] = 1
self._sp_id[a][b] = spid
self._sp_id[b][a] = spid
self.addCovalentConstraints()
self.addStructureRestraints()
for spid, sp in self.spinpairs.items():
a, b = sp.getAtoms()
a, b = self._protons_num[a.getId()], self._protons_num[b.getId()]
cov_score = self._get_covalent_score(a, b)
self._sp_cov_scores[a][b] = cov_score
self._sp_cov_scores[b][a] = cov_score
return 1
def setDefaultNetworkScores(self, s):
for p in self.peaks:
contribs = p.getContributions()
n = len(contribs)
[c.setNetworkScore(s/n) for c in contribs]
# use additional distance restraints
def addDistanceRestraints(self):
"""
Distance contraints
"""
# get list of DistanceRestraints valid for NA
restraints = []
restraint_list = self.iteration.getDistanceRestraints()
for l, r in restraint_list.items():
if l.getListSource()['add_to_network'] == YES:
restraints += r
if not restraints:
return
from sets import Set
for r in restraints:
for c in r.getContributions():
for sp in c.getSpinPairs():
sid = sp.getId() + 1
self._c_id.setdefault(sid, Set())
self._c_id[sid].add(c)
if not self.spinpairs.has_key(sid):
self.spinpairs[sid] = sp
def addStructureRestraints(self):
check = {}
vmax = self.getSettings()['v_max']
for c in self.molecule.get_chains():
residues = c.getResidues()
atoms = [a for r in residues for a in r.getAtoms() if a.isProton() and a.getName() in ['HA', 'H']]
for i in range(0, len(atoms)-1):
for j in range(i+1, len(atoms)):
a, b = atoms[i], atoms[j]
id = (min(a.getId(),b.getId()), max(a.getId(),b.getId()))
if not check.has_key(id):
check[id] = 1
res1 =int(a.getResidue().getNumber())
str1 = a.getResidue().getStructure()
t1 = a.getName()
res2 = int(b.getResidue().getNumber())
str2 = b.getResidue().getStructure()
t2 = b.getName()
if str1 == "" or str2 == "":
continue
sep = abs(res1 - res2)
if sep > 4:
continue
both_H = str1 == str2 and str1[0] == 'H'
both_B = str1 == str2 and str1[0] == 'B'
if not both_B or not both_H:
continue
HA_HN = (t1 == 'HA' and t2 == 'H') or \
(t1 == 'H' and t2 == 'HA')
HN_HN = (t1 == t2) and (t1 == 'H')
# check if valid constraints in SS
d = 0
# Sheets, dHA,HN(i,i+1)
if both_B and HA_HN and sep == 1:
d = 1
if both_H:
if HA_HN and sep <= 4:
d = 1
if HN_HN and sep <= 2:
d = 1
if d:
##cc = CovalentConstraint(n, a, b, d)
a, b = self._protons_num[a.getId()], self._protons_num[b.getId()]
if self._sp_id[a][b] == 0:
self._sp_id[a][b] = -1
if self._sp_id[b][a] == 0:
self._sp_id[b][a] = -1
self._sp[a][b] = 2
self._sp[b][a] = 2
self._sp_cov_scores[a][b] = vmax
self._sp_cov_scores[b][a] = vmax
n+= 1
def addCovalentConstraints(self):
"""
Covalent contraints
"""
dmax = self.getSettings()['distance_max']
vmax = self.getSettings()['v_max']
from aria.CovalentDistances import CovalentDistances
cd = CovalentDistances()
check = {}
n = 0
for c in self.molecule.get_chains():
residues = c.getResidues()
for r in range(len(residues)-1):
atoms = residues[r].getAtoms() + residues[r+1].getAtoms()
# NOESY
atoms = [a for a in atoms if a.isProton()]
for i in range(0, len(atoms)-1):
for j in range(i+1, len(atoms)):
aa, bb = atoms[i], atoms[j]
id = (min(aa.getId(),bb.getId()), max(aa.getId(),bb.getId()))
if not check.has_key(id):
check[id] = 1
d = cd.areConnected(aa, bb)
if d:
cc = CovalentConstraint(n, aa, bb, d)
a, b = self._protons_num[aa.getId()], self._protons_num[bb.getId()]
if self._sp_id[a][b] == 0:
self._sp_id[a][b] = -1
if self._sp_id[b][a] == 0:
self._sp_id[b][a] = -1
self._sp[a][b] = 2
self._sp[b][a] = 2
self._sp_cov_scores[a][b] = vmax
self._sp_cov_scores[b][a] = vmax
# valid also for hetero atom
if self._is_noesy_only:
continue
ah, bh = aa.getHeteroAtom(), bb.getHeteroAtom()
if ah and bh and (ah.getType() in ['N','C'] and bh.getType() in ['N','C']) :
ai, bi = self._protons_num[ah.getId()], self._protons_num[bh.getId()]
if self._sp_id[ai][bi] == 0:
self._sp_id[ai][bi] = -1
if self._sp_id[bi][ai] == 0:
self._sp_id[bi][ai] = -1
self._sp[ai][bi] = 2
self._sp[bi][ai] = 2
self._sp_cov_scores[ai][bi] = vmax
self._sp_cov_scores[bi][ai] = vmax
n+= 1
## # cov_score
## for spid, sp in self.spinpairs.items():
## a, b = sp.getAtoms()
## d = cd.areConnected(a, b)
## if d:
## map(lambda c: (c.setCovalentScore(1.)), self._c_id[spid])
def create_network(self):
"""
create the network itself
dictionnary : key = spid
value = Set of gammas
"""
if self.anchoring is not None:
return
self.message('Creating network ...')
from sets import Set
self.anchoring = {}
#t1 = clock()
for spid, sp in self.spinpairs.items():
a, b = sp.getAtoms()
sa, sb = a.getSegid(), b.getSegid()
a, b = self._protons_num[a.getId()], self._protons_num[b.getId()]
# dim0
r = self._residues_num[sa][a]
res_bound = []
for i in range(r-1, r+2):
if self._residues_id[sa].has_key(i):
res_bound += self._residues_id[sa][i]
x = numpy.take(self._sp, res_bound, axis = 0)
both_0 = x[:,a] * x[:,b]
x0 = [res_bound[i] for i in numpy.flatnonzero(both_0)]
# dim1
r = self._residues_num[sb][b]
res_bound = []
for i in range(r-1, r+2):
if self._residues_id[sb].has_key(i):
res_bound += self._residues_id[sb][i]
x = numpy.take(self._sp, res_bound, axis = 1)
both_1 = x[a,:] * x[b,:]
x1 = [res_bound[i] for i in numpy.flatnonzero(both_1)]
x12 = Set(x0).union(x1)
self.anchoring[spid] = x12
self.message("Done.")
def _get_covalent_score(self, id_a, id_b):
"""
score according to covalent structure (a, b, are two atoms
{ Vmax if covalent constraint
S = { Vmin if intraresidual /sequential connectivity
{ 0 if long-range connectivity
"""
# argument : contribution ? => then get max distance from contribution's spinpairs (use ISPA Model)
# a spin pairs ?
# 2 atoms
vmin = self.getSettings()['v_min']
vmax = self.getSettings()['v_max']
if self._sp[id_a][id_b] == 2:
covalent_score = vmax
else:
if self._isSequential(id_a, id_b):
covalent_score = vmin
else:
covalent_score = 0.
return covalent_score
def _heaviside(self, x):
if x < 0:
return 0.
elif x == 0:
return .5
else:
return 1.
def _isSequential(self, id_a, id_b):
sa, sb = self._protons_id[id_a].getSegid(), self._protons_id[id_b].getSegid()
if sa <> sb:
return 0
else:
return abs(self._residues_num[sa][id_a] - self._residues_num[sb][id_b]) <= 1
def _sumContribScore(self):
self._sp_sum_scores = {}
for spid, contribs in self._c_id.items():
s = numpy.sum([c.getScore()/len(c.getSpinPairs()) for c in contribs])
self._sp_sum_scores[spid] = s
def updateContributionsNetworkScores(self):
"""
calulate network_score for each contribution and update network_score
"""
contribs_scores = {}
#t = clock()
self._sumContribScore()
#t = clock()
v_min = self.getSettings()['v_min']
for k, gammas in self.anchoring.items():
sp = self.spinpairs[k]
score = 0.
a, b = sp.getAtoms()
id_a = self._protons_num[a.getId()]
id_b = self._protons_num[b.getId()]
gammas = list(gammas)
# a-g
#g_scores_a = numpy.take(self._sp_sum_scores, numpy.take( self._sp_id[id_a,:], gammas))
g_scores_a = [self._sp_sum_scores[x] for x in numpy.take( self._sp_id[id_a,:], gammas)]
cov_scores_a = numpy.take(self._sp_cov_scores[id_a,:], gammas)
nus_a = numpy.where(numpy.greater(g_scores_a, cov_scores_a), g_scores_a, cov_scores_a)
nus_a *= numpy.greater(nus_a - v_min, 0)
# b-g
#g_scores_b = numpy.take(self._sp_sum_scores, numpy.take( self._sp_id[id_b,:], gammas))
g_scores_b = [self._sp_sum_scores[x] for x in numpy.take( self._sp_id[id_b,:], gammas)]
cov_scores_b = numpy.take(self._sp_cov_scores[id_b,:], gammas)
nus_b = numpy.where(numpy.greater(g_scores_b, cov_scores_b), g_scores_b, cov_scores_b)
nus_b *= numpy.greater(nus_b - v_min, 0)
score = numpy.sum(numpy.sqrt(nus_a * nus_b))
contribs = self._c_id[k]
for c in contribs:
contribs_scores.setdefault(c, [])
contribs_scores[c].append(score)
for c, ss in contribs_scores.items():
c.setNetworkScore(numpy.sum(ss)/len(ss))#/len(ss)
for p in self.peaks:
contribs = p.getContributions()
scores = numpy.array([c.getNetworkScore() for c in contribs])
#covalent = numpy.array([c.getCovalentScore() for c in contribs])
#covalent = numpy.greater(covalent, 1.)
#zero_scores_covalent = numpy.equal(scores, 0) * covalent
#scores = numpy.where(zero_scores_covalent, 1., scores)
sum_scores = numpy.sum(scores)
if sum_scores > 0.:
scores /= sum_scores
map(lambda c,s : (c.setNetworkScore(s)), contribs, scores)
#self.message("Done %5.3f" % (clock() -t))
def updateContributionsScores(self):
"""
calulate score of ecah contribution and update score
"""
for p in self.peaks:
contribs = p.getContributions()
#mask = [c.isInter() for c in contribs]
scores = numpy.array([c.getNetworkScore() * c.getWeight() for c in contribs])
#numpy.putmask(scores, mask, scores * 1.5)
sum_scores = numpy.sum(scores)
if sum_scores > 0.:
scores /= sum_scores
map(lambda c,s : (c.setScore(s)), contribs, scores)
#self.message("Done %5.3f" % (clock() -t))
def dump_text(self):
settings = None
peak_list = self.peaks
itn = self.iteration.getNumber()
infra = self.project.getInfrastructure()
import os
from aria.Protocol import REPORT_NOE_RESTRAINTS
path = infra.get_iteration_path(itn)
filename = os.path.join(path, REPORT_NOE_RESTRAINTS + '.network')
pickler = NetworkAnchoringTextPickler(settings)
pickler.dump_network(peak_list, filename, gzip = 0)
self.message('Network-Anchoring scores (text) written (%s).' % filename)
def dump_ps(self):
itn = self.iteration.getNumber()
infra = self.project.getInfrastructure()
import os
from aria.Protocol import REPORT_NOE_RESTRAINTS
path = infra.get_iteration_path(itn)
path = os.path.join(path, 'graphics/network')
np = NetworkPsPickler(self)
try:
np.plot(path)
except Exception, msg:
import aria.tools as tools
self.warning(tools.last_traceback())
msg = 'Error during creation of %s.network.' % REPORT_NOE_RESTRAINTS
self.warning(msg)
def _dump_scores(self, old_weights):
## save scores
s = ""
n = 0
for p in self.peaks:
pnetscores = self.getPeakNetScores(p)
for c in p.getContributions():
s += "NETWORK : I %4d %5d OW %5.3f W %5.3f N %5.3f S %5.3f Nres %5.3f Nat %5.3f\n" \
%(p.getId(), c.getId(), old_weights[n], c.getWeight(), c.getNetworkScore(), \
c.getScore(), pnetscores['residue'], pnetscores['atom'])
n += 1
itn = self.iteration.getNumber()
infra = self.project.getInfrastructure()
import os
path = os.path.join(infra.get_iteration_path(itn), "scores.dat")
f = open(path, 'w')
f.write(s)
f.close()
s = ''
for k, v in self.residue_score.items():
s += "%d %d %.4f\n" % (k[0],k[1], v)
path = os.path.join(infra.get_iteration_path(itn), "res_scores.dat")
f = open(path, 'w')
f.write(s)
f.close()
def getPeakNetScores(self, p):
score = {'residue' : 0.,
'atom' : 0.}
for c in p.getContributions():
res = [0,1]
for a in res:
res[a] = c.getSpinSystems()[a].getAtoms()[0].getResidue()
score['residue'] += self.getResNetScore(res) * c.getScore()
score['atom'] += c.getNetworkScore()/len(c.getSpinPairs()) * c.getScore()
return score
def getResNetScore(self, residues):
residues.sort(lambda a,b: cmp(a.getNumber(), b.getNumber()))
key = tuple(residues)
#r1, r2 = residues[0].getNumber(), residues[1].getNumber()
#key = (min((r1, r2)), max((r1, r2)))
return self.residue_score[key]
def analyze(self):
"""
Analyse contribution scores and remove non valable ones
"""
self.message('Analyzing ...')
self.result = {}
result = {}
# compute net score per residue pairs
self.residue_score = {}
for spid, sp in self.spinpairs.items():
a, b = sp.getAtoms()
r1, r2 = a.getResidue(), b.getResidue()
#sa, sb = a.getSegid(), b.getSegid()
#a, b = self._protons_num[a.getId()], self._protons_num[b.getId()]
#r1, r2 = self._residues_num[sa][a], self._residues_num[sb][b]
key = [r1, r2]
key.sort(lambda a,b: cmp(a.getNumber(), b.getNumber()))
#key = (min((r1, r2)), max((r1, r2)))
key = tuple(key)
self.residue_score.setdefault(key, 0.)
sc = max([c.getNetworkScore() for c in self._c_id[spid]])
self.residue_score[key] += sc
contribs = [c for p in self.peaks for c in p.getContributions()]
scores = [c.getScore() for c in contribs]
total = len(contribs)
#eliminated = [c for c in contribs if c.getScore() <= 0.]
eliminated = numpy.sum(numpy.less_equal(scores, 0.))
self.result['total'] = total
self.result['eliminated'] = eliminated
self.result['ratio'] = self.result['eliminated']*100./float(total)
## SET SCORE as Weight
old_weights = [c.getWeight() for c in contribs]
## save scores
#self._dump_scores(old_weights)
[c.setWeight(c.getScore()) for c in contribs]
####################################################
# FILTER PEAKS according to Nres et Natom
# First rule : <Nres>p >= Nhigh
# OR
# second rule : <Nres>p >= Nres_min AND <Natom>p >= Natom_min
#Nhigh = 4.
#Nres_min = 1.
#Nat_min = 0.25
s = self.getSettings()
for p in self.peaks:
res_score = self.getPeakNetScores(p)
p._network = res_score
if p.getReferencePeak().isReliable():
continue
## if not p.isAmbiguous() and p.getActiveContributions() and p.getActiveContributions()[0].isInter():
## continue
if not (res_score['residue'] >= s['high_residue_threshold'] or
(res_score['residue'] >= s['min_residue_threshold'] and res_score['atom'] >= s['min_atom_threshold'])):
p.isActive(0)
def update_scores(self):
self.setDefaultNetworkScores(1.)
#[ c.setScore(c.getNetworkScore() * c.getWeight()) for p in self.peaks for c in p.getContributions()]
self.updateContributionsScores()
n = 0
while n < 3:
self.message("Round %d ..." % n)
t = clock()
self._round = n
self.updateContributionsNetworkScores()
self.updateContributionsScores()
self.debug('Time: %ss' % str(clock() - t))
n += 1
def run(self, iteration):
"""
run network anchoring.
"""
self.iteration = iteration
self.peaks = iteration.getPeakList()
restraints = []
restraint_list = self.iteration.getDistanceRestraints()
for l, r in restraint_list.items():
if l.getListSource()['filter_contributions'] == YES and \
l.getListSource()['run_network_anchoring'] == YES :
restraints += r
self.peaks += restraints
self._is_noesy_only = 1
# check if we have non H-H pairs
for p in self.peaks:
contributions = p.getActiveContributions()
if not contributions:
continue
atom1, atom2 = contributions[0].getSpinPairs()[0].getAtoms()
if not atom1.isProton() and not atom1.isProton():
self._is_noesy_only = 0
break
from aria.Singleton import ProjectSingleton
self.project = ProjectSingleton()
self.molecule = self.project.getMolecule()
# 1) initalize
done = self.setup()
if not done:
s = 'Aborting. No valid peaks or restraints.'
self.warning(s)
return
# 2') create network
t1 = clock()
self.create_network()
self.debug('Time: %ss' % str(clock() - t1))
# 2) assign network scores to contributions
self.update_scores()
# 4) Analysis
t1 = clock()
self.analyze()
s = 'Done. %(eliminated)d/%(total)d (%(ratio)5.2f %%) assignment possibilities removed.\n'
self.message(s % self.result)
self.debug('Time: %ss' % str(clock() - t1))
# 5) logs
self.dump_text()
self.dump_ps()
#self.halt()
class NetworkXMLPickler(XMLBasePickler):
def _xml_state(self, x):
e = XMLElement()
e.enabled = x['enabled']
e.high_residue_threshold = x['high_residue_threshold']
e.min_residue_threshold = x['min_residue_threshold']
e.min_atom_threshold = x['min_atom_threshold']
return e
def load_from_element(self, e):
s = NetworkSettings()
s['enabled'] = str(e.enabled)
s['high_residue_threshold'] = float(e.high_residue_threshold)
s['min_residue_threshold'] = float(e.min_residue_threshold)
s['min_atom_threshold'] = float(e.min_atom_threshold)
return s
NetworkSettings._xml_state = NetworkXMLPickler()._xml_state
## TEST
if __name__ == '__main__':
molecule_file = '~/devel/aria2.2_release/test/run3/data/sequence/hrdc.xml'
ariapeaks_file='~/devel/aria2.2_release/test/run3/structures/it0/noe_restraints.pickle'
project_file = '~/devel/aria2.2_release/test/werner2.xml'
# read molecule
import aria.AriaXML as AriaXML
pickler = AriaXML.AriaXMLPickler()
molecule = pickler.load(molecule_file)
# read pickled ariapeak list
from aria.tools import Load
aria_peaks = Load(ariapeaks_file)
project = pickler.load(project_file)
project.ccpn_data_sources = ()
project.read_molecule()
ns = project.getProtocol().getSettings()['iteration_settings'][0]['network_anchoring_settings']
N = NetworkAnchoring(ns)
class it:
def __init__(self, peaks, n):
self.peaks = peaks
self.n = n
def getPeakList(self):
return self.peaks
def getNumber(self):
return self.n
N.run(it(aria_peaks, 0))
#N.dump()
| 30.4476 | 144 | 0.504641 | 41,967 | 0.931937 | 0 | 0 | 0 | 0 | 0 | 0 | 9,482 | 0.210561 |
2c567e06fe0b4a514046c47e0475e1aaaccbe7e1 | 482 | py | Python | website/wiki/plugins/images/migrations/0002_auto_20151118_1811.py | Bournvita1998/serc | 5cdbe0ea89451c56bdb05b3bb6d178aad45c3a74 | [
"MIT"
] | null | null | null | website/wiki/plugins/images/migrations/0002_auto_20151118_1811.py | Bournvita1998/serc | 5cdbe0ea89451c56bdb05b3bb6d178aad45c3a74 | [
"MIT"
] | 18 | 2020-06-05T18:17:40.000Z | 2022-03-11T23:25:21.000Z | e/mail-relay/web/wiki/plugins/images/migrations/0002_auto_20151118_1811.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | 2 | 2016-12-13T10:02:39.000Z | 2019-05-16T05:58:16.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wiki_images', '0001_initial'),
]
operations = [
migrations.AlterModelTable(
name='image',
table='wiki_images_image',
),
migrations.AlterModelTable(
name='imagerevision',
table='wiki_images_imagerevision',
),
]
| 20.956522 | 46 | 0.593361 | 373 | 0.773859 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.244813 |
2c56c451f1b5f49b1faacf083b8cc56481d39d30 | 19,134 | bzl | Python | xls/contrib/xlscc/build_rules/xlscc_rules.bzl | RobSpringer/xls | a5521c7ecbd1a071828760cf429d74810f248681 | [
"Apache-2.0"
] | null | null | null | xls/contrib/xlscc/build_rules/xlscc_rules.bzl | RobSpringer/xls | a5521c7ecbd1a071828760cf429d74810f248681 | [
"Apache-2.0"
] | null | null | null | xls/contrib/xlscc/build_rules/xlscc_rules.bzl | RobSpringer/xls | a5521c7ecbd1a071828760cf429d74810f248681 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The XLS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build rules to compile with xlscc"""
load("@bazel_skylib//lib:dicts.bzl", "dicts")
load(
"//xls/build_rules:xls_common_rules.bzl",
"append_default_to_args",
"args_to_string",
"get_output_filename_value",
"is_args_valid",
)
load(
"//xls/build_rules:xls_config_rules.bzl",
"CONFIG",
"enable_generated_file_wrapper",
)
load("//xls/build_rules:xls_providers.bzl", "ConvIRInfo")
load(
"//xls/build_rules:xls_ir_rules.bzl",
"append_xls_ir_opt_ir_generated_files",
"get_xls_ir_opt_ir_generated_files",
"xls_ir_opt_ir_attrs",
"xls_ir_opt_ir_impl",
)
load(
"//xls/build_rules:xls_codegen_rules.bzl",
"append_xls_ir_verilog_generated_files",
"get_xls_ir_verilog_generated_files",
"xls_ir_verilog_attrs",
"xls_ir_verilog_impl",
)
load("//xls/build_rules:xls_toolchains.bzl", "xls_toolchain_attr")
_CC_FILE_EXTENSION = ".cc"
_H_FILE_EXTENSION = ".h"
_INC_FILE_EXTENSION = ".inc"
_IR_FILE_EXTENSION = ".ir"
_PROTOBIN_FILE_EXTENSION = ".protobin"
_BINARYPB_FILE_EXTENSION = ".binarypb"
_DEFAULT_XLSCC_ARGS = {
"dump_ir_only": "True",
"top": "Run",
}
def _append_xls_cc_ir_generated_files(args, basename):
"""Returns a dictionary of arguments appended with filenames generated by the 'xls_cc_ir' rule.
Args:
args: A dictionary of arguments.
basename: The file basename.
Returns:
Returns a dictionary of arguments appended with filenames generated by the 'xls_cc_ir' rule.
"""
args.setdefault("ir_file", basename + _IR_FILE_EXTENSION)
return args
def _get_xls_cc_ir_generated_files(args):
"""Returns a list of filenames generated by the 'xls_cc_ir' rule found in 'args'.
Args:
args: A dictionary of arguments.
Returns:
Returns a list of files generated by the 'xls_cc_ir' rule found in 'args'.
"""
return [args.get("ir_file")]
def _get_runfiles_for_xls_cc_ir(ctx):
"""Returns the runfiles from a 'xls_cc_ir' ctx.
Args:
ctx: The current rule's context object.
Returns:
The runfiles from a 'xls_cc_ir' ctx.
"""
transitive_runfiles = []
runfiles = ctx.runfiles(files = [ctx.file.src] + [ctx.file.block] +
ctx.files._default_cc_header_files +
ctx.files._default_synthesis_header_files +
ctx.files.src_deps)
transitive_runfiles.append(ctx.attr
._xlscc_tool[DefaultInfo].default_runfiles)
transitive_runfiles.append(ctx.attr
._default_cc_header_files[DefaultInfo].default_runfiles)
transitive_runfiles.append(ctx.attr
._default_synthesis_header_files[DefaultInfo].default_runfiles)
for dep in ctx.attr.src_deps:
transitive_runfiles.append(dep[DefaultInfo].default_runfiles)
runfiles = runfiles.merge_all(transitive_runfiles)
return runfiles
def _get_transitive_built_files_for_xls_cc_ir(ctx):
"""Returns the transitive built files from a 'xls_cc_ir' ctx.
Args:
ctx: The current rule's context object.
Returns:
The transitive built files from a 'xls_cc_ir' ctx.
"""
transitive_built_files = []
transitive_built_files.append(ctx.attr.src[DefaultInfo].files)
transitive_built_files.append(ctx.attr.block[DefaultInfo].files)
transitive_built_files.append(ctx.attr._xlscc_tool[DefaultInfo].files)
transitive_built_files.append(ctx.attr
._default_cc_header_files[DefaultInfo].files)
transitive_built_files.append(ctx.attr
._default_synthesis_header_files[DefaultInfo].files)
for dep in ctx.attr.src_deps:
transitive_built_files.append(dep[DefaultInfo].files)
if not transitive_built_files:
return None
return transitive_built_files
def _xls_cc_ir_impl(ctx):
"""The implementation of the 'xls_cc_ir' rule.
Converts a C/C++ source file to an IR file.
Args:
ctx: The current rule's context object.
Returns:
A tuple with the following elements in the order presented:
1. The ConvIRInfo provider
1. The list of built files.
1. The runfiles.
"""
XLSCC_FLAGS = (
"module_name",
"block_pb",
"top",
"package",
"clang_args_file",
"defines",
"include_dirs",
"meta_out",
"dump_ir_only",
)
xlscc_args = append_default_to_args(
ctx.attr.xlscc_args,
_DEFAULT_XLSCC_ARGS,
)
# Append to user paths.
xlscc_args["include_dirs"] = (
xlscc_args.get("include_dirs", "") + ",${PWD},./," +
ctx.genfiles_dir.path + "," + ctx.bin_dir.path + "," +
"xls/contrib/xlscc/synth_only," +
"xls/contrib/xlscc/synth_only/ac_compat," +
ctx.attr._default_cc_header_files.label.workspace_root # This must the last directory in the list.
)
# Append to user defines.
xlscc_args["defines"] = (
xlscc_args.get("defines", "") + "__SYNTHESIS__," +
"__AC_OVERRIDE_OVF_UPDATE_BODY=,__AC_OVERRIDE_OVF_UPDATE2_BODY="
)
is_args_valid(xlscc_args, XLSCC_FLAGS)
my_args = args_to_string(xlscc_args)
ir_filename = get_output_filename_value(
ctx,
"ir_file",
ctx.attr.name + _IR_FILE_EXTENSION,
)
ir_file = ctx.actions.declare_file(ir_filename)
# Get runfiles
runfiles = _get_runfiles_for_xls_cc_ir(ctx)
ctx.actions.run_shell(
outputs = [ir_file],
# The IR converter executable is a tool needed by the action.
tools = [ctx.executable._xlscc_tool],
# The files required for converting the C/C++ source file.
inputs = runfiles.files,
command = "{} {} --block_pb {} {} > {}".format(
ctx.executable._xlscc_tool.path,
ctx.file.src.path,
ctx.file.block.path,
my_args,
ir_file.path,
),
mnemonic = "ConvertXLSCC",
progress_message = "Converting XLSCC file: %s" % (ctx.file.src.path),
)
return [ConvIRInfo(conv_ir_file = ir_file), [ir_file], runfiles]
_xls_cc_ir_attrs = {
"src": attr.label(
doc = "The C/C++ source file containing the top level block. A " +
"single source file must be provided. The file must have a '" +
_CC_FILE_EXTENSION + "' extension.",
mandatory = True,
allow_single_file = [_CC_FILE_EXTENSION],
),
"block": attr.label(
doc = "Protobuf describing top-level block interface. A single " +
"source file single source file must be provided. The file " +
"must have a '" + _PROTOBIN_FILE_EXTENSION + "' or a '" +
_BINARYPB_FILE_EXTENSION + "' extension.",
mandatory = True,
allow_single_file = [
_PROTOBIN_FILE_EXTENSION,
_BINARYPB_FILE_EXTENSION,
],
),
"src_deps": attr.label_list(
doc = "Additional source files for the rule. The file must have a " +
_CC_FILE_EXTENSION + ", " + _H_FILE_EXTENSION + " or " +
_INC_FILE_EXTENSION + " extension.",
allow_files = [
_CC_FILE_EXTENSION,
_H_FILE_EXTENSION,
_INC_FILE_EXTENSION,
],
),
"xlscc_args": attr.string_dict(
doc = "Arguments of the XLSCC conversion tool.",
),
"ir_file": attr.output(
doc = "Filename of the generated IR. If not specified, the " +
"target name of the bazel rule followed by an " +
_IR_FILE_EXTENSION + " extension is used.",
),
"_xlscc_tool": attr.label(
doc = "The target of the XLSCC executable.",
default = Label("//xls/contrib/xlscc:xlscc"),
allow_single_file = True,
executable = True,
cfg = "exec",
),
"_default_cc_header_files": attr.label(
doc = "Default C/C++ header files for xlscc.",
default = Label("@com_github_hlslibs_ac_types//:ac_types_as_data"),
cfg = "target",
),
"_default_synthesis_header_files": attr.label(
doc = "Default synthesis header files for xlscc.",
default = Label("//xls/contrib/xlscc:synth_only_headers"),
cfg = "target",
),
}
def _xls_cc_ir_impl_wrapper(ctx):
"""The implementation of the 'xls_cc_ir' rule.
Wrapper for xls_cc_ir_impl. See: xls_cc_ir_impl.
Args:
ctx: The current rule's context object.
Returns:
ConvIRInfo provider
DefaultInfo provider
"""
ir_conv_info, built_files, runfiles = _xls_cc_ir_impl(ctx)
return [
ir_conv_info,
DefaultInfo(
files = depset(
direct = built_files,
transitive = _get_transitive_built_files_for_xls_cc_ir(ctx),
),
runfiles = runfiles,
),
]
xls_cc_ir = rule(
doc = """A build rule that converts a C/C++ source file to an IR file.
Examples:
1) A simple IR conversion example. Assume target 'a_block_pb' is
defined.
```
xls_cc_ir(
name = "a_ir",
src = "a.cc",
block = ":a_block_pb",
)
```
""",
implementation = _xls_cc_ir_impl_wrapper,
attrs = dicts.add(
_xls_cc_ir_attrs,
CONFIG["xls_outs_attrs"],
),
)
def xls_cc_ir_macro(
name,
src,
block,
src_deps = [],
xlscc_args = {},
enable_generated_file = True,
enable_presubmit_generated_file = False,
**kwargs):
"""A macro that instantiates a build rule generating an IR file from a C/C++ source file.
The macro instantiates a rule that converts a C/C++ source file to an IR
file and the 'enable_generated_file_wrapper' function. The generated files
are listed in the outs attribute of the rule.
Examples:
1) A simple IR conversion example. Assume target 'a_block_pb' is defined.
```
xls_cc_ir(
name = "a_ir",
src = "a.cc",
block = ":a_block_pb",
)
```
Args:
name: The name of the rule.
src: The C/C++ source file containing the top level block. A single source
file must be provided. The file must have a '.cc' extension.
block: Protobuf describing top-level block interface. A single source file
single source file must be provided. The file must have a '.protobin'
or a '.binarypb' extension.
src_deps: Additional source files for the rule. The file must have a
'.cc', '.h' or '.inc' extension.
xlscc_args: Arguments of the XLSCC conversion tool.
enable_generated_file: See 'enable_generated_file' from
'enable_generated_file_wrapper' function.
enable_presubmit_generated_file: See 'enable_presubmit_generated_file'
from 'enable_generated_file_wrapper' function.
**kwargs: Keyword arguments. Named arguments.
"""
# Type check input
if type(name) != type(""):
fail("Argument 'name' must be of string type.")
if type(src) != type(""):
fail("Argument 'src' must be of string type.")
if type(block) != type(""):
fail("Argument 'block' must be of string type.")
if type(src_deps) != type([]):
fail("Argument 'src_deps' must be of list type.")
if type(xlscc_args) != type({}):
fail("Argument 'xlscc_args' must be of dictionary type.")
if type(enable_generated_file) != type(True):
fail("Argument 'enable_generated_file' must be of boolean type.")
if type(enable_presubmit_generated_file) != type(True):
fail("Argument 'enable_presubmit_generated_file' must be " +
"of boolean type.")
# Append output files to arguments.
kwargs = _append_xls_cc_ir_generated_files(kwargs, name)
xls_cc_ir(
name = name,
src = src,
block = block,
src_deps = src_deps,
xlscc_args = xlscc_args,
outs = _get_xls_cc_ir_generated_files(kwargs),
**kwargs
)
enable_generated_file_wrapper(
wrapped_target = name,
enable_generated_file = enable_generated_file,
enable_presubmit_generated_file = enable_presubmit_generated_file,
**kwargs
)
def _xls_cc_verilog_impl(ctx):
"""The implementation of the 'xls_cc_verilog' rule.
Converts a C/C++ file to an IR, optimizes the IR, and generates a verilog
file from the optimized IR.
Args:
ctx: The current rule's context object.
Returns:
ConvIRInfo provider.
OptIRInfo provider.
CodegenInfo provider.
DefaultInfo provider.
"""
ir_conv_info, ir_conv_built_files, ir_conv_runfiles = _xls_cc_ir_impl(ctx)
ir_opt_info, opt_ir_built_files, opt_ir_runfiles = xls_ir_opt_ir_impl(
ctx,
ir_conv_info.conv_ir_file,
)
codegen_info, verilog_built_files, verilog_runfiles = xls_ir_verilog_impl(
ctx,
ir_opt_info.opt_ir_file,
)
runfiles = ir_conv_runfiles.merge_all([opt_ir_runfiles, verilog_runfiles])
return [
ir_conv_info,
ir_opt_info,
codegen_info,
DefaultInfo(
files = depset(
direct = ir_conv_built_files + opt_ir_built_files +
verilog_built_files,
transitive = _get_transitive_built_files_for_xls_cc_ir(ctx),
),
runfiles = runfiles,
),
]
_cc_verilog_attrs = dicts.add(
_xls_cc_ir_attrs,
xls_ir_opt_ir_attrs,
xls_ir_verilog_attrs,
CONFIG["xls_outs_attrs"],
xls_toolchain_attr,
)
xls_cc_verilog = rule(
doc = """A build rule that generates a Verilog file from a C/C++ source file.
Examples:
1) A simple example. Assume target 'a_block_pb' is defined.
```
xls_cc_verilog(
name = "a_verilog",
src = "a.cc",
block = ":a_block_pb",
codegen_args = {
"generator": "combinational",
"module_name": "A",
"top": "A_proc",
},
)
```
""",
implementation = _xls_cc_verilog_impl,
attrs = _cc_verilog_attrs,
)
def xls_cc_verilog_macro(
name,
src,
block,
verilog_file,
src_deps = [],
xlscc_args = {},
opt_ir_args = {},
codegen_args = {},
enable_generated_file = True,
enable_presubmit_generated_file = False,
**kwargs):
"""A macro that instantiates a build rule generating a Verilog file from a C/C++ source file.
The macro instantiates a build rule that generates an Verilog file from
a DSLX source file. The build rule executes the core functionality of
following macros:
1. xls_cc_ir (converts a C/C++ file to an IR),
1. xls_ir_opt_ir (optimizes the IR), and,
1. xls_ir_verilog (generated a Verilog file).
Examples:
1) A simple example. Assume target 'a_block_pb' is defined.
```
xls_cc_verilog(
name = "a_verilog",
src = "a.cc",
block = ":a_block_pb",
codegen_args = {
"generator": "combinational",
"module_name": "A",
"top": "A_proc",
},
)
```
Args:
name: The name of the rule.
src: The C/C++ source file containing the top level block. A single source
file must be provided. The file must have a '.cc' extension.
block: Protobuf describing top-level block interface. A single source file
single source file must be provided. The file must have a '.protobin'
or a '.binarypb' extension.
verilog_file: The filename of Verilog file generated. The filename must
have a '.v' extension.
src_deps: Additional source files for the rule. The file must have a
'.cc', '.h' or '.inc' extension.
xlscc_args: Arguments of the XLSCC conversion tool.
opt_ir_args: Arguments of the IR optimizer tool. For details on the
arguments, refer to the opt_main application at
//xls/tools/opt_main.cc. Note: the 'top'
argument is not assigned using this attribute.
codegen_args: Arguments of the codegen tool. For details on the arguments,
refer to the codegen_main application at
//xls/tools/codegen_main.cc.
enable_generated_file: See 'enable_generated_file' from
'enable_generated_file_wrapper' function.
enable_presubmit_generated_file: See 'enable_presubmit_generated_file'
from 'enable_generated_file_wrapper' function.
**kwargs: Keyword arguments. Named arguments.
"""
# Type check input
if type(name) != type(""):
fail("Argument 'name' must be of string type.")
if type(src) != type(""):
fail("Argument 'src' must be of string type.")
if type(block) != type(""):
fail("Argument 'block' must be of string type.")
if type(verilog_file) != type(""):
fail("Argument 'verilog_file' must be of string type.")
if type(src_deps) != type([]):
fail("Argument 'src_deps' must be of list type.")
if type(xlscc_args) != type({}):
fail("Argument 'xlscc_args' must be of dictionary type.")
if type(opt_ir_args) != type({}):
fail("Argument 'opt_ir_args' must be of dictionary type.")
if type(codegen_args) != type({}):
fail("Argument 'codegen_args' must be of dictionary type.")
if type(enable_generated_file) != type(True):
fail("Argument 'enable_generated_file' must be of boolean type.")
if type(enable_presubmit_generated_file) != type(True):
fail("Argument 'enable_presubmit_generated_file' must be " +
"of boolean type.")
# Append output files to arguments.
kwargs = _append_xls_cc_ir_generated_files(kwargs, name)
kwargs = append_xls_ir_opt_ir_generated_files(kwargs, name)
kwargs = append_xls_ir_verilog_generated_files(kwargs, name, codegen_args)
xls_cc_verilog(
name = name,
src = src,
block = block,
verilog_file = verilog_file,
src_deps = src_deps,
xlscc_args = xlscc_args,
opt_ir_args = opt_ir_args,
codegen_args = codegen_args,
outs = _get_xls_cc_ir_generated_files(kwargs) +
get_xls_ir_opt_ir_generated_files(kwargs) +
get_xls_ir_verilog_generated_files(kwargs, codegen_args) +
[verilog_file],
**kwargs
)
enable_generated_file_wrapper(
wrapped_target = name,
enable_generated_file = enable_generated_file,
enable_presubmit_generated_file = enable_presubmit_generated_file,
**kwargs
)
| 32.989655 | 107 | 0.64064 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,939 | 0.519442 |
2c57630305a7b0574563ea2a6b2e98b3d7e35805 | 3,099 | py | Python | Wasteland/api/save_and_load.py | Tonysun1/Explore-Z-Wasteland | 77be93f8b7838cd0b6b42e03607ba5662fc25b4c | [
"MIT"
] | 1 | 2018-10-24T03:48:12.000Z | 2018-10-24T03:48:12.000Z | Wasteland/api/save_and_load.py | Tonysun-rpi/Explore-Z-Wasteland | 77be93f8b7838cd0b6b42e03607ba5662fc25b4c | [
"MIT"
] | null | null | null | Wasteland/api/save_and_load.py | Tonysun-rpi/Explore-Z-Wasteland | 77be93f8b7838cd0b6b42e03607ba5662fc25b4c | [
"MIT"
] | 1 | 2018-10-22T19:14:38.000Z | 2018-10-22T19:14:38.000Z | from api.class_declaration import Profile, Storage, Base, Bag
# TODO: This S/L api uses txt files for the time being, should use databases in the future
# questions: what's in the save_file?
# save_file is a dictionary contains the following keys:
# profile, bag, base, and storage
# They are all "class", but bag and storage contains a list(need to keep the position of items)
# what's in the profile?
# name, level, gender, age, hunger, health, attack, armor
# what's in the base?
# name, level, storage
# what's in the bag?
# name, volume, items(list)
# what's in the storage?
# name, volume, items(list)
# save_file: the object that contains all the information
# filename: filename for save file
def save(save_file, filename):
f = open(filename, "w")
for i in save_file.keys():
#f.write(i)
for j in save_file[i]:
#if the class is profile
if (i == "P"):
f.write("Profile: ")
f.write(j.name+" ")
f.write("{} ".format(j.level))
f.write("{} ".format(j.gender))
f.write("{} ".format(j.age))
f.write("{} ".format(j.hunger))
f.write("{} ".format(j.health))
f.write("{} ".format(j.attack))
f.write("{} ".format(j.armor))
f.write("\n")
#if the class is base
if (i == "Bs"):
f.write("Base: ")
f.write(j.name+" ")
f.write("{} ".format(j.level))
#if the class is storage
f.write(j.storage.name + " ")
f.write("{} ".format(j.storage.volume))
f.write("{} ".format(j.storage.item))
f.write("\n")
#if the class is storage
if (i == "S"):
f.write("Storage: ")
f.write(j.name+" ")
f.write("{} ".format(j.volume))
f.write("{} ".format(j.item))
f.write("\n")
#if the class is bag
if (i == "Bg"):
f.write("Bag: ")
f.write(j.name+" ")
f.write("{} ".format(j.volume))
f.write("{} ".format(j.item))
f.write("\n")
# save_file: the object that contains all the information
# filename: filename for save file
def load(save_file, filename):
f = open(filename, "r")
content = f.read().splitlines()
for i in content:
partial = i.split(" ")
#print(partial)
#if Profile
if (partial[0] == "Profile:"):
profile = Profile(partial[1], partial[2], partial[3], partial[4], partial[5], partial[6], partial[7], partial[8])
save_file["P"].append(profile)
#if Storage
if (partial[0] == "Storage:"):
storage = Storage(partial[1], partial[2], partial[3])
save_file["S"].append(storage)
#if Base
if (partial[0] == "Base:"):
storage = Storage(partial[3], partial[4], partial[5])
base = Base(partial[1], partial[2], storage)
save_file["Bs"].append(base)
#if Bag
if (partial[0] == "Bag:"):
bag = Bag(partial[1], partial[2], partial[3])
save_file["Bg"].append(bag)
# testing
profile = Profile("try_profile", 10, 1, 5, 0, 100, 10, 10)
storage = Storage("try_storage", 1, [])
base = Base("try_base", 10, storage)
bag = Bag("try_bag", 1, [])
save_file = {"P": [profile], "S": [storage], "Bs": [base], "Bg": [bag]}
load_file = {"P": [], "S": [], "Bs": [], "Bg": []}
#save(save_file, "try1.txt")
load(load_file, "try1.txt")
| 26.487179 | 116 | 0.604389 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,219 | 0.393353 |
2c57a4960adf0bbfbabeb5de8f75f2496f33aa17 | 3,538 | py | Python | learning/run_learning.py | lmzintgraf/MultiMAuS | 629a64ecb1f2587283611a5cdc15802cfeaf3bf5 | [
"MIT"
] | 14 | 2017-08-16T09:57:13.000Z | 2022-03-22T06:37:17.000Z | learning/run_learning.py | lmzintgraf/MultiMAuS | 629a64ecb1f2587283611a5cdc15802cfeaf3bf5 | [
"MIT"
] | null | null | null | learning/run_learning.py | lmzintgraf/MultiMAuS | 629a64ecb1f2587283611a5cdc15802cfeaf3bf5 | [
"MIT"
] | 8 | 2017-11-21T13:26:06.000Z | 2022-03-24T23:54:35.000Z | import numpy as np
import os
from datetime import datetime
from pytz import timezone
import matplotlib.pyplot as plt
from agent_qlean import QLearnAgent
from agent_bandit import BanditAgent
from environment import Environment
from simulator import parameters
from simulator.transaction_model import TransactionModel
from experiments import rewards
from authenticators.simple_authenticators import RandomAuthenticator, \
HeuristicAuthenticator, OracleAuthenticator, NeverSecondAuthenticator, \
AlwaysSecondAuthenticator
auths = [
# (Environment(BanditAgent(do_reward_shaping=True)), 'Bandit (reward shaping)'),
# (RandomAuthenticator(), 'Random'),
# (OracleAuthenticator(), 'Oracle'),
# (HeuristicAuthenticator(50), 'Heuristic'),
# (NeverSecondAuthenticator(), 'NeverSecond'),
# (AlwaysSecondAuthenticator(), 'AlwaysSecond'),
(Environment(QLearnAgent('zero', 0.01, 0.1, 0.1, False)), 'Q-Learn'),
(Environment(QLearnAgent('zero', 0.01, 0.1, 0.1, True)), 'Q-Learn with reward shaping'),
(Environment(BanditAgent()), 'Bandit'),
(Environment(BanditAgent(do_reward_shaping=True)), 'Bandit with reward shaping'),
]
authenticator = None
auth_name = ''
for k in range(len(auths)):
if auth_name != 'Q-Learning (from scratch)':
authenticator, auth_name = auths[k]
else: # if we just did Q-Learning, run it again with the pre-trained one
auth_name = 'Q-Learning (pre-trained)'
seed = 666
print("-----")
print(auth_name)
print("-----")
sum_monetary_rewards = None
for i in range(1):
# the parameters for the simulation
params = parameters.get_default_parameters()
params['seed'] = seed
params['init_satisfaction'] = 0.9
params['stay_prob'] = [0.9, 0.6]
params['num_customers'] = 100
params['num_fraudsters'] = 10
params['end_date'] = datetime(2016, 12, 31).replace(tzinfo=timezone('US/Pacific'))
path = 'results/{}_{}_{}_{}_{}_{}'.format(auth_name,
seed,
int(params['init_satisfaction']*10),
params['num_customers'],
params['num_fraudsters'],
params['end_date'].year)
if os.path.exists(path+'.npy'):
monetary_rewards = np.load(path+'.npy')
else:
# get the model for transactions
model = TransactionModel(params, authenticator=authenticator)
# run
while not model.terminated:
model.step()
agent_vars = model.log_collector.get_agent_vars_dataframe()
agent_vars.index = agent_vars.index.droplevel(1)
monetary_rewards = rewards.monetary_reward_per_timestep(agent_vars)
np.save(path, monetary_rewards)
if sum_monetary_rewards is None:
sum_monetary_rewards = monetary_rewards
else:
sum_monetary_rewards += monetary_rewards
seed += 1
sum_monetary_rewards /= (i+1)
if k == 0:
color = 'r'
elif k == 1:
color = 'r--'
elif k == 2:
color = 'b'
elif k == 3:
color = 'b--'
plt.plot(range(len(monetary_rewards)), np.cumsum(monetary_rewards), color, label=auth_name)
plt.xlabel('time step')
plt.ylabel('monetary reward (cumulative)')
plt.legend()
plt.tight_layout()
plt.show()
| 32.759259 | 97 | 0.610232 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 828 | 0.234031 |
2c5818a982167d62d1701034e950a7696ca4f6f2 | 1,484 | py | Python | ex095a.py | emerfelippini/Curso_em_video-Aulas_Python | 5b1d78b259732bb9bbad27cd30ce91bba77c5ef0 | [
"MIT"
] | null | null | null | ex095a.py | emerfelippini/Curso_em_video-Aulas_Python | 5b1d78b259732bb9bbad27cd30ce91bba77c5ef0 | [
"MIT"
] | null | null | null | ex095a.py | emerfelippini/Curso_em_video-Aulas_Python | 5b1d78b259732bb9bbad27cd30ce91bba77c5ef0 | [
"MIT"
] | null | null | null | print('\033[1;32mANALISANDO DADOS\033[m')
jogador = dict()
gols = list()
jogadores = list()
print('-=' * 50)
while True:
gols.clear()
jogador['nome'] = str(input('Nome do jogador: '))
partidas = int(input('Quantas partidas esse jogador jogou: '))
for cont in range(1, partidas + 1, +1):
partida = int(input(f'* Quantos gols ele marcou na {cont}ª partida: '))
gols.append(partida)
jogador['gols'] = gols[:]
jogador['total de gols'] = sum(gols)
jogadores.append(jogador.copy())
escolha = str(input('Deseja continuar [S/N]? ')).upper().strip()[0]
while escolha not in 'SN':
print('Opção inválida.')
escolha = str(input('Deseja continuar [S/N]? ')).upper().strip()[0]
if escolha == 'N':
break
print('-=' * 30)
print('-=' * 50)
print(f'\033[1;32m{"COD":<20}{"NOME":<10}{"GOLS":>10}{"TOTAL":>15}\033[m')
for k, v in enumerate(jogadores):
print(f'{k:<20}', end='')
for d in v.values():
print(f'{str(d):<15}', end='')
print()
print('-='* 40)
while True:
resp = int(input('Mostrar dados de qual jogador [999] para parar: '))
if resp == 999:
break
while resp > len(jogadores):
print('Escolha inválida')
resp = int(input('Mostrar dados de qual jogador [999] para parar: '))
print(f'Levantamento do jogador {jogadores[resp]["nome"]}:')
for pos, cont in enumerate(jogadores[resp]['gols']):
print(f'=> Na partida {pos + 1} fez {cont} gols.') | 37.1 | 79 | 0.58558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 581 | 0.390195 |
2c5c09b1bffec3d9d337a43ebc68475a241f04d3 | 229 | py | Python | farmy/modules/led.py | farmy-maker/farmy-py | e21cc816073e62d34a84e82a8dbc3075cb9c4d47 | [
"Apache-2.0"
] | 1 | 2017-09-28T07:44:25.000Z | 2017-09-28T07:44:25.000Z | farmy/modules/led.py | farmy-maker/farmy-py | e21cc816073e62d34a84e82a8dbc3075cb9c4d47 | [
"Apache-2.0"
] | null | null | null | farmy/modules/led.py | farmy-maker/farmy-py | e21cc816073e62d34a84e82a8dbc3075cb9c4d47 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
from modules.controller import Controller
LED_PIN = 23 # pin of led
if __name__ == "__main__":
controller = Controller(LED_PIN)
controller.run(50, 1)
print("Led Start for a second with 50% power")
| 20.818182 | 50 | 0.694323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.331878 |
2c5e49aea9d8124efaadeb1e5de150138508a7bb | 2,608 | py | Python | docs/conf.py | guillaume-wisniewski/elpis | 550c350fd0098751b9a502a253bc4066f15c47db | [
"Apache-2.0"
] | 118 | 2018-11-25T22:00:11.000Z | 2022-03-18T10:18:33.000Z | docs/conf.py | guillaume-wisniewski/elpis | 550c350fd0098751b9a502a253bc4066f15c47db | [
"Apache-2.0"
] | 189 | 2019-01-25T01:37:59.000Z | 2022-02-16T02:31:23.000Z | docs/conf.py | guillaume-wisniewski/elpis | 550c350fd0098751b9a502a253bc4066f15c47db | [
"Apache-2.0"
] | 34 | 2018-11-28T20:31:38.000Z | 2022-01-27T12:20:59.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'Elpis'
copyright = '2020, The University of Queensland'
author = 'Ben Foley, Nicholas Lambourne, Nay San'
# The full version, including alpha/beta/rc tags
release = '0.96.0'
master_doc = 'index'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx_autodoc_typehints',
'recommonmark'
]
# Show undocumented members in docs
autodoc_default_options = {
'undoc-members': True,
}
# Mock to get RTD docs to compile
autodoc_mock_imports = ["pytest"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
# We also exclude the "ugly" auto-generated elpis.rst file and replace it with our own.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'elpis/elpis.rst']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_logo = '_static/img/logo.png'
html_theme_options = {
'logo_only': True,
}
github_url = 'https://github.com/CoEDL/elpis'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'style.css',
]
# -- Extension configuration -------------------------------------------------
| 33.435897 | 87 | 0.664494 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,160 | 0.828221 |
2c5f547ed3f8ba2162e991dbf9194c4f2334bfdf | 1,417 | py | Python | launchkey/factories/service.py | bgroveben/launchkey-python | c102d76040221059e7b87d96496edb1be3824d3b | [
"MIT"
] | 1 | 2018-12-06T04:42:35.000Z | 2018-12-06T04:42:35.000Z | launchkey/factories/service.py | bgroveben/launchkey-python | c102d76040221059e7b87d96496edb1be3824d3b | [
"MIT"
] | 1 | 2018-12-11T22:31:03.000Z | 2018-12-11T22:31:03.000Z | launchkey/factories/service.py | bgroveben/launchkey-python | c102d76040221059e7b87d96496edb1be3824d3b | [
"MIT"
] | null | null | null | from .base import BaseFactory
from launchkey import LAUNCHKEY_PRODUCTION
from launchkey.clients import ServiceClient
class ServiceFactory(BaseFactory):
"""Factory for creating clients when representing a LaunchKey Service Profile"""
def __init__(self, service_id, private_key, url=LAUNCHKEY_PRODUCTION, testing=False, transport=None):
"""
:param service_id: UUID for the requesting service
:param private_key: PEM formatted private key string
:param url: URL for the LaunchKey API
:param testing: Boolean stating whether testing mode is being used. This will determine whether SSL validation
occurs.
:param: transport: Instantiated transport object. The default and currently only supported transport is
launchkey.transports.JOSETransport. If you wish to set encryption or hashing algorithms, this is where you would
do it. IE: JOSETransport(jwt_algorithm="RS512", jwe_cek_encryption="RSA-OAEP",
jwe_claims_encryption="A256CBC-HS512", content_hash_algorithm="S256")
"""
super(ServiceFactory, self).__init__('svc', service_id, private_key, url, testing, transport)
def make_service_client(self):
"""
Retrieves a client to make service calls.
:return: launchkey.clients.ServiceClient
"""
return ServiceClient(self._issuer_id, self._transport)
| 48.862069 | 120 | 0.714891 | 1,297 | 0.915314 | 0 | 0 | 0 | 0 | 0 | 0 | 937 | 0.661256 |
2c5f8611dedadefc6301d38ec8d0cd11d6e649b6 | 6,762 | py | Python | mvsnet/mvs_data_generation/utils.py | ubiquity6/MVSNet | 7dc026acb019d270e79de7be4a5cfcb33863127f | [
"MIT"
] | 7 | 2019-07-15T08:49:38.000Z | 2019-11-30T01:09:12.000Z | mvsnet/mvs_data_generation/utils.py | ubiquity6/MVSNet | 7dc026acb019d270e79de7be4a5cfcb33863127f | [
"MIT"
] | 10 | 2019-07-17T00:00:29.000Z | 2022-03-11T23:50:36.000Z | mvsnet/mvs_data_generation/utils.py | ubiquity6/MVSNet | 7dc026acb019d270e79de7be4a5cfcb33863127f | [
"MIT"
] | 3 | 2019-08-02T09:06:32.000Z | 2021-07-06T11:49:55.000Z | #!/usr/bin/env python
from __future__ import print_function
import os
import errno
import time
import math
import re
import sys
import scipy
import imageio
import cv2
import numpy as np
import json
from random import Random
from tensorflow.python.lib.io import file_io
import logging
from mvsnet.utils import setup_logger
"""
Helper package for reading and writing MVS depth map training data
"""
"""
Copyright 2019, Chris Heinrich, Ubiquity6.
"""
def center_image(img):
""" normalize image input """
img = img.astype(np.float32)
var = np.var(img, axis=(0, 1), keepdims=True)
mean = np.mean(img, axis=(0, 1), keepdims=True)
return (img - mean) / (np.sqrt(var) + 0.00000001)
def center_images(images):
for i in range(len(images)):
images[i] = center_image(images[i])
return images
def copy_and_center_images(ims):
""" Returns a copy of the images, centered"""
images = []
for i in range(len(ims)):
images.append(center_image(ims[i]))
return images
def center_image_cluster(img):
""" normalize image input. Same as above, except there are assumed
to be a block of images stacked along the 0 axis"""
img = img.astype(np.float32)
std = np.std(img)
mean = np.mean(img)
return (img - mean) / (std + 0.00000001)
def scale_camera(cam, scale=1):
""" resize input in order to produce sampled depth map """
new_cam = np.copy(cam)
# focal:
new_cam[1][0][0] = cam[1][0][0] * scale
new_cam[1][1][1] = cam[1][1][1] * scale
# principle point:
new_cam[1][0][2] = cam[1][0][2] * scale
new_cam[1][1][2] = cam[1][1][2] * scale
return new_cam
def scale_mvs_camera(cams, scale=1):
""" resize input in order to produce sampled depth map """
for view in range(len(cams)):
cams[view] = scale_camera(cams[view], scale=scale)
return cams
def scale_image(image, scale=1, interpolation='linear'):
""" resize image using cv2 """
if interpolation == 'linear':
return cv2.resize(image, None, fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)
if interpolation == 'nearest':
return cv2.resize(image, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
def scale_and_reshape_depth(depth_image, output_scale):
# Scale depth image to output_scale * image_scale
depth = np.copy(depth_image)
depth = scale_image(
depth, scale=output_scale, interpolation='nearest')
# Increase rank of depth array and set shape[2] = 1
depth_shape = (
depth.shape[0], depth.shape[1], 1)
return np.reshape(depth, depth_shape)
def reshape_depth(depth):
depth_shape = (
depth.shape[0], depth.shape[1], 1)
return np.reshape(depth, depth_shape)
def scale_mvs_input(images, cams, depth_image=None, scale=1):
""" resize input to fit into the memory """
for view in range(len(images)):
images[view] = scale_image(images[view], scale=scale)
cams[view] = scale_camera(cams[view], scale=scale)
if depth_image is None:
return images, cams
else:
depth_image = scale_image(
depth_image, scale=scale, interpolation='nearest')
return images, cams, depth_image
def crop_mvs_input(images, cams, width, height, base_image_size, depth_image=None):
""" resize images and cameras to fit the network (so both dimensions are divisible by base_image_size ) """
# crop images and cameras
for view in range(len(images)):
h, w = images[view].shape[0:2]
new_h = h
new_w = w
if new_h > height:
new_h = height
else:
new_h = int(math.ceil(h / base_image_size)
* base_image_size)
if new_w > width:
new_w = width
else:
new_w = int(math.ceil(w / base_image_size)
* base_image_size)
start_h = int(math.ceil((h - new_h) / 2))
start_w = int(math.ceil((w - new_w) / 2))
finish_h = start_h + new_h
finish_w = start_w + new_w
images[view] = images[view][start_h:finish_h, start_w:finish_w]
# Shift the principal point
cams[view][1][0][2] = cams[view][1][0][2] - start_w
cams[view][1][1][2] = cams[view][1][1][2] - start_h
# crop depth image
if not depth_image is None:
depth_image = depth_image[start_h:finish_h, start_w:finish_w]
return images, cams, depth_image
else:
return images, cams
def mask_depth_image(depth_image, min_depth, max_depth):
""" mask out-of-range pixel to zero """
ret, depth_image = cv2.threshold(
depth_image, min_depth, 100000, cv2.THRESH_TOZERO)
ret, depth_image = cv2.threshold(
depth_image, max_depth, 100000, cv2.THRESH_TOZERO_INV)
depth_image = np.expand_dims(depth_image, 2)
return depth_image
def flip_cams(cams, depth_num):
""" Modifies cams to be compatible with MVSNet GRU regularization"""
cams[0][1, 3, 0] = cams[0][1, 3, 0] + \
(depth_num - 1) * cams[0][1, 3, 1]
cams[0][1, 3, 1] = -cams[0][1, 3, 1]
return cams
def write_cam(file, cam):
# f = open(file, "w")
f = file_io.FileIO(file, "w")
f.write('extrinsic\n')
for i in range(0, 4):
for j in range(0, 4):
f.write(str(cam[0][i][j]) + ' ')
f.write('\n')
f.write('\n')
f.write('intrinsic\n')
for i in range(0, 3):
for j in range(0, 3):
f.write(str(cam[1][i][j]) + ' ')
f.write('\n')
f.write('\n' + str(cam[1][3][0]) + ' ' + str(cam[1][3][1]) +
' ' + str(cam[1][3][2]) + ' ' + str(cam[1][3][3]) + '\n')
f.close()
def write_depth_map(file_path, image):
# convert to int and clip to range of [0, 2^16 -1]
image = np.clip(image, 0, 65535).astype(np.uint16)
imageio.imsave(file_path, image)
file_path_scaled = file_path.replace('.png', '_scaled.png')
# Rescales the image so max distance is 6.5 meters, making contrast more visible
# This is purely for visualization purposes
depth_scale = 20
image_scaled = np.clip(image*depth_scale, 0, 65535).astype(np.uint16)
imageio.imsave(file_path_scaled, image_scaled)
def write_confidence_map(file_path, image):
# we convert probabilities in range [0,1] to ints in range [0, 2^16-1]
scale_factor = 65535
image *= scale_factor
image = np.clip(image, 0, 65535).astype(np.uint16)
imageio.imsave(file_path, image)
def write_image(file_path, image):
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
imageio.imsave(file_path, image.astype(np.uint8))
def mkdir_p(dirname):
try:
os.mkdir(dirname)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
| 29.528384 | 111 | 0.629104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,332 | 0.196983 |
2c6117485781c2c31bb5fbf18702c529d404a85e | 1,820 | py | Python | kite-python/kite_ml/kite/name_encoder/scope_encoder.py | kiteco/kiteco-public | 74aaf5b9b0592153b92f7ed982d65e15eea885e3 | [
"BSD-3-Clause"
] | 17 | 2022-01-10T11:01:50.000Z | 2022-03-25T03:21:08.000Z | kite-python/kite_ml/kite/name_encoder/scope_encoder.py | kiteco/kiteco-public | 74aaf5b9b0592153b92f7ed982d65e15eea885e3 | [
"BSD-3-Clause"
] | 1 | 2022-01-13T14:28:47.000Z | 2022-01-13T14:28:47.000Z | kite-python/kite_ml/kite/name_encoder/scope_encoder.py | kiteco/kiteco-public | 74aaf5b9b0592153b92f7ed982d65e15eea885e3 | [
"BSD-3-Clause"
] | 7 | 2022-01-07T03:58:10.000Z | 2022-03-24T07:38:20.000Z | from typing import Dict, Any
import tensorflow as tf
from ..utils.segmented_data import SegmentedIndices, SegmentedIndicesFeed
from ..graph_encoder.embeddings import NodeEmbeddings
class Encoder(object):
def __init__(self, nodes: NodeEmbeddings):
self._nodes = nodes
self._build()
def _build(self):
self._build_placeholders()
self._build_scope_state()
def _build_placeholders(self):
with tf.name_scope('placeholders'):
# shape [number of variables in batch]
# sample_ids[i] = s means that means that variable i is part of sample s in the batch
self._variable_node_ids = SegmentedIndices('variable_node_ids')
def _build_scope_state(self):
with tf.name_scope('build_scope_state'):
# [num variable nodes in batch]
self._variable_nodes_embedded: tf.Tensor = tf.gather(
self._nodes.embeddings,
self._variable_node_ids.indices,
name='scope_nodes_embedded',
)
# reduce across variable nodes in each graph in the batch
# shape [batch size, graph embedding depth]
self._scope_state: tf.Tensor = tf.segment_max(
self._variable_nodes_embedded,
self._variable_node_ids.sample_ids,
name='scope_state',
)
def feed_dict(self, feed: SegmentedIndicesFeed) -> Dict[tf.Tensor, Any]:
return self._variable_node_ids.feed_dict(feed)
def placeholders_dict(self) -> Dict[str, tf.Tensor]:
return self._variable_node_ids.dict()
def scope_state(self) -> tf.Tensor:
"""
:return: representation of all the variables in scope, shape [batch size, graph embedding depth]
"""
return self._scope_state
| 34.339623 | 104 | 0.644505 | 1,633 | 0.897253 | 0 | 0 | 0 | 0 | 0 | 0 | 461 | 0.253297 |
2c620ae2549d713c2a5bbc3e9b322fba02a44587 | 83,399 | py | Python | scripts/app.py | ArtellaPipe/artella-launcher | 0da785a6c1ee50ef8da972f5929d344948c635a5 | [
"MIT"
] | null | null | null | scripts/app.py | ArtellaPipe/artella-launcher | 0da785a6c1ee50ef8da972f5929d344948c635a5 | [
"MIT"
] | 6 | 2020-03-08T20:12:52.000Z | 2021-12-13T20:29:38.000Z | scripts/app.py | ArtellaPipe/artella-launcher | 0da785a6c1ee50ef8da972f5929d344948c635a5 | [
"MIT"
] | null | null | null | import os
import re
import sys
import json
import time
import psutil
import shutil
import appdirs
import zipfile
import tarfile
import argparse
import platform
import requests
import traceback
import contextlib
import subprocess
import webbrowser
import logging.config
from pathlib2 import Path
from bs4 import BeautifulSoup
from backports import tempfile
from packaging.version import Version, InvalidVersion
try:
from urlparse import urlparse
except Exception:
from urllib.parse import urlparse
try:
from urllib2 import Request, urlopen
except ImportError:
from urllib.request import Request, urlopen
try:
import PySide
from PySide.QtCore import *
from PySide.QtGui import *
except ImportError:
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from PySide2.QtGui import *
logging_name = '__logging__.ini'
logging_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), logging_name)
if not os.path.isfile(logging_path):
logging_path = os.path.join(os.path.dirname(sys.executable), logging_name)
if not os.path.isfile(logging_path):
if hasattr(sys, '_MEIPASS'):
logging_path = os.path.join(sys._MEIPASS, 'resources', logging_name)
logging.config.fileConfig(logging_path, disable_existing_loggers=False)
LOGGER = logging.getLogger('artellapipe-updater')
ARTELLA_NEXT_VERSION_FILE_NAME = 'version_to_run_next'
def is_windows():
return sys.platform.startswith('win')
def is_mac():
return sys.platform == 'darwin'
def is_linux():
return 'linux' in sys.platform
class ArtellaSplash(QSplashScreen, object):
def __init__(self, pixmap):
self._offset = QPoint()
super(ArtellaSplash, self).__init__(pixmap)
def mousePressEvent(self, event):
"""
Overrides base ArtellaDialog mousePressEvent function
:param event: QMouseEvent
"""
self._offset = event.pos()
def mouseMoveEvent(self, event):
"""
Overrides base ArtellaDialog mouseMoveEvent function
:param event: QMouseEvent
"""
x = event.globalX()
y = event.globalY()
x_w = self._offset.x()
y_w = self._offset.y()
self.move(x - x_w, y - y_w)
class ArtellaUpdaterException(Exception, object):
def __init__(self, exc):
if type(exc) in [str, unicode]:
exc = Exception(exc)
msg = '{} | {}'.format(exc, traceback.format_exc())
LOGGER.exception(msg)
traceback.print_exc()
QMessageBox.critical(None, 'Error', msg)
class ArtellaUpdater(QWidget, object):
def __init__(
self, app, project_name, project_type, app_version, deployment_repository, documentation_url=None,
deploy_tag=None, install_env_var=None, requirements_file_name=None, force_venv=False,
splash_path=None, script_path=None, requirements_path=None, artellapipe_configs_path=None,
dev=False, update_icon=False, parent=None):
super(ArtellaUpdater, self).__init__(parent=parent)
self._config_data = self._read_config()
if app and update_icon:
app.setWindowIcon(QIcon(self._get_resource(self._get_app_config('icon'))))
self._dev = dev
self._requirements_path = requirements_path if requirements_path else None
self._artella_configs_path = artellapipe_configs_path if artellapipe_configs_path else None
# We force development mode when we force a specific requirements file
if self._requirements_path and os.path.isfile(self._requirements_path):
self._dev = True
self._project_name = self._get_app_config('name') or project_name
self._project_type = self._get_app_config('type') or project_type
self._app_version = self._get_app_config('version') or app_version
self._repository = self._get_app_config('repository') or deployment_repository
self._splash_path = self._get_resource(self._get_app_config('splash')) or splash_path
self._force_venv = force_venv
self._venv_info = dict()
if self._project_name and not self._dev:
for proc in psutil.process_iter():
if proc.name().startswith(self._project_name) and proc.pid != psutil.Process().pid:
proc.kill()
self._setup_logger()
self._setup_config()
self._setup_ui()
QApplication.instance().processEvents()
self._install_path = None
self._selected_tag_index = None
self._documentation_url = documentation_url if documentation_url else self._get_default_documentation_url()
self._install_env_var = install_env_var if install_env_var else self._get_default_install_env_var()
self._requirements_file_name = requirements_file_name if requirements_file_name else 'requirements.txt'
self._all_tags = list()
self._deploy_tag = deploy_tag if deploy_tag else self._get_deploy_tag()
self._script_path = script_path if script_path and os.path.isfile(script_path) else self._get_script_path()
self._artella_app = 'lifecycler' if self._project_type == 'indie' else 'artella'
# If not valid tag is found we close the application
if not self._deploy_tag:
sys.exit()
valid_load = self._load()
if not valid_load:
sys.exit()
@property
def project_name(self):
return self._project_name
@property
def repository(self):
return self._repository
@property
def install_env_var(self):
return self._install_env_var
def get_clean_name(self):
"""
Return name of the project without spaces and lowercase
:return: str
"""
return self._project_name.replace(' ', '').lower()
def get_current_os(self):
"""
Return current OS the scrip is being executed on
:return:
"""
os_platform = platform.system()
if os_platform == 'Windows':
return 'Windows'
elif os_platform == 'Darwin':
return 'MacOS'
elif os_platform == 'Linux':
return 'Linux'
else:
raise Exception('No valid OS platform detected: {}!'.format(os_platform))
def get_config_data(self):
"""
Returns data in the configuration file
:return: dict
"""
data = dict()
config_path = self._get_config_path()
if not os.path.isfile(config_path):
return data
with open(config_path, 'r') as config_file:
try:
data = json.load(config_file)
except Exception:
data = dict()
return data
def is_python_installed(self):
"""
Returns whether current system has Python installed or not
:return: bool
"""
process = self._run_subprocess(commands_list=['python', '-c', 'quit()'], shell=False)
process.wait()
return True if process.returncode == 0 else False
def is_pip_installed(self):
"""
Returns whether pip is installed or not
:return: bool
"""
process = self._run_subprocess(commands_list=['pip', '-V'])
process.wait()
return True if process.returncode == 0 else False
def is_virtualenv_installed(self):
"""
Returns whether virtualenv is intsalled or not
:return: bool
"""
try:
process = self._run_subprocess(commands_list=['virtualenv', '--version'], shell=False)
process.wait()
except Exception:
return False
return True if process.returncode == 0 else False
def _read_config(self):
"""
Internal function that retrieves config data stored in executable
:return: dict
"""
data = {}
config_file_name = 'config.json'
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), config_file_name)
if not os.path.isfile(config_path):
config_path = os.path.join(os.path.dirname(sys.executable), 'resources', config_file_name)
if not os.path.isfile(config_path):
if hasattr(sys, '_MEIPASS'):
config_path = os.path.join(sys._MEIPASS, 'resources', config_file_name)
if not os.path.isfile(config_path):
return data
try:
with open(config_path) as config_file:
data = json.load(config_file)
except RuntimeError as exc:
raise Exception(exc)
return data
def _get_app_config(self, config_name):
"""
Returns configuration parameter stored in configuration, if exists
:param config_name: str
:return: str
"""
if not self._config_data:
return None
return self._config_data.get(config_name, None)
def _get_script_path(self):
script_path = None
config_file_name = 'launcher.py'
script_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), config_file_name)
if not os.path.isfile(script_path):
script_path = os.path.join(os.path.dirname(sys.executable), 'resources', config_file_name)
if not os.path.isfile(script_path):
if hasattr(sys, '_MEIPASS'):
script_path = os.path.join(sys._MEIPASS, 'resources', config_file_name)
LOGGER.info('Launcher Script: "{}"'.format(script_path))
return script_path
def _get_resource(self, resource_name):
resource_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'resources', resource_name)
if not os.path.isfile(resource_path):
resource_path = os.path.join(os.path.dirname(sys.executable), 'resources', resource_name)
if not os.path.isfile(resource_path):
if hasattr(sys, '_MEIPASS'):
resource_path = os.path.join(sys._MEIPASS, 'resources', resource_name)
LOGGER.info("Retrieving resource: {} >>> {}".format(resource_name, resource_path))
return resource_path
def _set_splash_text(self, new_text):
self._progress_text.setText(new_text)
QApplication.instance().processEvents()
def _setup_ui(self):
splash_pixmap = QPixmap(self._splash_path)
self._splash = ArtellaSplash(splash_pixmap)
self._splash.setWindowFlags(Qt.FramelessWindowHint)
splash_layout = QVBoxLayout()
splash_layout.setContentsMargins(5, 2, 5, 2)
splash_layout.setSpacing(2)
splash_layout.setAlignment(Qt.AlignBottom)
self._splash.setLayout(splash_layout)
label_style = """
QLabel
{
background-color: rgba(100, 100, 100, 100);
color: white;
border-radius: 5px;
}
"""
self._version_lbl = QLabel('v0.0.0')
self._version_lbl.setStyleSheet(label_style)
version_font = self._version_lbl.font()
version_font.setPointSize(10)
self._version_lbl.setFont(version_font)
self._artella_status_icon = QLabel()
self._artella_status_icon.setPixmap(QPixmap(self._get_resource('artella_off.png')).scaled(QSize(30, 30)))
install_path_icon = QLabel()
install_path_icon.setPixmap(QPixmap(self._get_resource('disk.png')).scaled(QSize(25, 25)))
self._install_path_lbl = QLabel('Install Path: ...')
self._install_path_lbl.setStyleSheet(label_style)
install_path_font = self._install_path_lbl.font()
install_path_font.setPointSize(8)
self._install_path_lbl.setFont(install_path_font)
deploy_tag_icon = QLabel()
deploy_tag_icon.setPixmap(QPixmap(self._get_resource('tag.png')).scaled(QSize(25, 25)))
self._deploy_tag_combo = QComboBox()
info_layout = QVBoxLayout()
info_layout.setContentsMargins(5, 5, 5, 5)
info_layout.setSpacing(10)
buttons_style = """
QPushButton:!hover
{
background-color: rgba(100, 100, 100, 100);
color: white;
border-radius: 5px;
}
QPushButton:hover
{
background-color: rgba(50, 50, 50, 100);
color: white;
border-radius: 5px;
}
QPushButton:pressed
{
background-color: rgba(15, 15, 15, 100);
color: white;
border-radius: 5px;
}
"""
self._launch_btn = QPushButton('Launch')
self._launch_btn.setStyleSheet(buttons_style)
self._launch_btn.setFixedWidth(150)
self._launch_btn.setFixedHeight(30)
self._launch_btn.setIconSize(QSize(40, 40))
self._launch_btn.setIcon(QPixmap(self._get_resource('play.png')))
self._close_btn = QPushButton('')
self._close_btn.setFlat(True)
self._close_btn.setFixedSize(QSize(30, 30))
self._close_btn.setIconSize(QSize(25, 25))
self._close_btn.setIcon(QPixmap(self._get_resource('close.png')))
self._open_install_folder_btn = QPushButton('Open Install Folder')
self._open_install_folder_btn.setStyleSheet(buttons_style)
self._open_install_folder_btn.setFixedWidth(150)
self._open_install_folder_btn.setFixedHeight(30)
self._open_install_folder_btn.setIconSize(QSize(25, 25))
self._open_install_folder_btn.setIcon(QPixmap(self._get_resource('search_folder.png')))
self._reinstall_btn = QPushButton('Reinstall')
self._reinstall_btn.setStyleSheet(buttons_style)
self._reinstall_btn.setFixedWidth(75)
self._reinstall_btn.setFixedHeight(30)
self._reinstall_btn.setIconSize(QSize(15, 15))
self._reinstall_btn.setIcon(QPixmap(self._get_resource('reinstall.png')))
self._uninstall_btn = QPushButton('Uninstall')
self._uninstall_btn.setStyleSheet(buttons_style)
self._uninstall_btn.setFixedWidth(75)
self._uninstall_btn.setFixedHeight(30)
self._uninstall_btn.setIconSize(QSize(20, 20))
self._uninstall_btn.setIcon(QPixmap(self._get_resource('uninstall.png')))
uninstall_reinstall_layout = QHBoxLayout()
uninstall_reinstall_layout.setSpacing(2)
uninstall_reinstall_layout.setContentsMargins(2, 2, 2, 2)
uninstall_reinstall_layout.addWidget(self._reinstall_btn)
uninstall_reinstall_layout.addWidget(self._uninstall_btn)
self._buttons_layout = QVBoxLayout()
self._buttons_layout.setContentsMargins(5, 5, 5, 5)
self._buttons_layout.setSpacing(2)
self._buttons_layout.addWidget(self._launch_btn)
self._buttons_layout.addWidget(self._open_install_folder_btn)
self._buttons_layout.addLayout(uninstall_reinstall_layout)
self._info_tag_btn = QPushButton()
self._info_tag_btn.setFlat(True)
self._info_tag_btn.setFixedSize(QSize(25, 25))
self._info_tag_btn.setIconSize(QSize(18, 18))
info_icon = QIcon()
info_icon.addPixmap(QPixmap(self._get_resource('info.png')).scaled(QSize(25, 25)))
self._info_tag_btn.setIcon(info_icon)
self._refresh_tag_btn = QPushButton()
self._refresh_tag_btn.setFlat(True)
self._refresh_tag_btn.setFixedSize(QSize(25, 25))
self._refresh_tag_btn.setIconSize(QSize(18, 18))
refresh_icon = QIcon()
refresh_icon.addPixmap(QPixmap(self._get_resource('refresh.png')).scaled(QSize(25, 25)))
self._refresh_tag_btn.setIcon(refresh_icon)
self._progress_text = QLabel('Setting {} ...'.format(self._project_name.title()))
self._progress_text.setAlignment(Qt.AlignCenter)
self._progress_text.setStyleSheet("QLabel { background-color : rgba(0, 0, 0, 180); color : white; }")
font = self._progress_text.font()
font.setPointSize(10)
self._progress_text.setFont(font)
second_layout = QHBoxLayout()
second_layout.setContentsMargins(5, 5, 5, 5)
second_layout.setSpacing(5)
second_layout.addItem(QSpacerItem(10, 0, QSizePolicy.Expanding, QSizePolicy.Preferred))
second_layout.addLayout(self._buttons_layout)
second_layout.addItem(QSpacerItem(10, 0, QSizePolicy.Expanding, QSizePolicy.Preferred))
splash_layout.addLayout(second_layout)
splash_layout.addWidget(self._progress_text)
self._artella_status_icon.setParent(self._splash)
self._version_lbl.setParent(self._splash)
self._close_btn.setParent(self._splash)
install_path_icon.setParent(self._splash)
self._install_path_lbl.setParent(self._splash)
deploy_tag_icon.setParent(self._splash)
self._deploy_tag_combo.setParent(self._splash)
self._info_tag_btn.setParent(self._splash)
self._refresh_tag_btn.setParent(self._splash)
self._artella_status_icon.setFixedSize(QSize(45, 45))
self._version_lbl.setFixedSize(50, 20)
install_path_icon.setFixedSize(QSize(35, 35))
self._install_path_lbl.setFixedSize(QSize(200, 20))
deploy_tag_icon.setFixedSize(QSize(35, 35))
self._deploy_tag_combo.setFixedSize(QSize(150, 20))
height = 5
self._version_lbl.move(10, self._splash.height() - 48)
self._artella_status_icon.move(5, height)
height += self._artella_status_icon.height() - 5
install_path_icon.move(5, height)
self._install_path_lbl.move(install_path_icon.width(), height + self._install_path_lbl.height() / 2 - 5)
height += install_path_icon.height() - 5
deploy_tag_icon.move(5, height)
height = height + self._deploy_tag_combo.height() / 2 - 5
self._deploy_tag_combo.move(deploy_tag_icon.width(), height)
self._info_tag_btn.move(self._deploy_tag_combo.width() + self._info_tag_btn.width() + 10, height - 2)
if not self._dev:
self._refresh_tag_btn.move(self._deploy_tag_combo.width() + self._refresh_tag_btn.width() + 10, height - 2)
else:
self._refresh_tag_btn.move(
self._deploy_tag_combo.width() + self._refresh_tag_btn.width() + self._info_tag_btn.width() + 10,
height - 2)
self._close_btn.move(self._splash.width() - self._close_btn.width() - 5, 0)
self._deploy_tag_combo.setFocusPolicy(Qt.NoFocus)
combo_width = 5
if self._dev:
self._deploy_tag_combo.setEnabled(False)
combo_width = 0
self._deploy_tag_combo.setStyleSheet("""
QComboBox:!editable
{
background-color: rgba(100, 100, 100, 100);
color: white;
border-radius: 5px;
padding: 1px 0px 1px 3px;
}
QComboBox::drop-down:!editable
{
background: rgba(50, 50, 50, 100);
border-top-right-radius: 5px;
border-bottom-right-radius: 5px;
image: none;
width: %dpx;
}
""" % combo_width)
self._close_btn.setVisible(False)
self._launch_btn.setVisible(False)
self._open_install_folder_btn.setVisible(False)
self._uninstall_btn.setVisible(False)
self._reinstall_btn.setVisible(False)
self._info_tag_btn.setVisible(False)
self._refresh_tag_btn.setVisible(False)
self._deploy_tag_combo.currentIndexChanged.connect(self._on_selected_tag)
self._close_btn.clicked.connect(sys.exit)
self._open_install_folder_btn.clicked.connect(self._on_open_installation_folder)
self._launch_btn.clicked.connect(self.launch)
self._reinstall_btn.clicked.connect(self._on_reinstall)
self._uninstall_btn.clicked.connect(self._on_uninstall)
self._info_tag_btn.clicked.connect(self._on_open_tag_info)
self._refresh_tag_btn.clicked.connect(self._on_refresh_tag)
self._splash.show()
self._splash.raise_()
def _open_folder(self, path=None):
"""
Opens a folder in the explorer in a independent platform way
If not path is passed the current directory will be opened
:param path: str, folder path to open
"""
if path is None:
path = os.path.curdir
if sys.platform == 'darwin':
self._check_call(commands_list=['open', '--', path])
elif sys.platform == 'linux2':
self._run_subprocess(commands_list=['xdg-open', path])
elif sys.platform is 'windows' or 'win32' or 'win64':
new_path = path.replace('/', '\\')
try:
self._check_call(commands_list=['explorer', new_path], shell=False)
except Exception:
pass
def _clean_folder(self, folder):
"""
Internal function that removes all the contents in the given folder
:param folder: str
"""
if not folder or not os.path.isdir(folder):
LOGGER.warning('Impossible to remove "{}"'.format(folder))
return
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(e)
def _setup_environment(self, clean=False):
if not self._install_path:
self._show_error('Impossible to setup virtual environment because install path is not defined!')
return False
if self._dev and not hasattr(sys, 'real_prefix'):
self._show_error('Current Python"{}" is not installed in a virtual environment!'.format(
os.path.dirname(sys.executable)))
return False
LOGGER.info("Setting Virtual Environment")
venv_path = self._get_venv_folder_path()
orig_force_env = self._force_venv
if clean and os.path.isdir(venv_path):
self._close_processes()
self._clean_folder(venv_path)
self._force_venv = True
if self._force_venv or not os.path.isdir(venv_path):
self._close_processes()
self._create_venv(force=True)
self._force_venv = orig_force_env
root_path = os.path.dirname(venv_path)
if is_windows():
venv_scripts = os.path.join(venv_path, 'Scripts')
venv_python = os.path.join(venv_scripts, 'python.exe')
pip_exe = os.path.join(venv_scripts, 'pip.exe')
elif is_mac():
venv_scripts = os.path.join(venv_path, 'bin')
venv_python = os.path.join(venv_scripts, 'python')
pip_exe = os.path.join(venv_scripts, 'pip')
venv_info = dict()
venv_info['root_path'] = root_path
venv_info['venv_folder'] = venv_path
venv_info['venv_scripts'] = venv_scripts
venv_info['venv_python'] = venv_python
venv_info['pip_exe'] = pip_exe
self._venv_info = venv_info
LOGGER.info("Virtual Environment Info: {}".format(venv_info))
# TODO: Check that all info contained in venv_info is valid
return True
def _close_processes(self):
"""
Internal function that closes all opened Python processes but the current one
"""
for proc in psutil.process_iter():
if (proc.name().startswith('python') or proc.name().startswith(self._project_name)) \
and proc.pid != psutil.Process().pid:
LOGGER.debug('Killing Python process: {}'.format(proc.name()))
proc.kill()
def _get_app_name(self):
"""
Returns name of the app
:return: str
"""
return '{}_app'.format(self.get_clean_name())
def _get_app_folder(self):
"""
Returns folder where app data is located
:return: str
"""
logger_name = self._get_app_name()
logger_path = os.path.dirname(appdirs.user_data_dir(logger_name))
if not os.path.isdir(logger_path):
os.makedirs(logger_path)
if not os.path.isdir(logger_path):
QMessageBox.critical(
self,
'Impossible to retrieve app data folder',
'Impossible to retrieve app data folder.\n\n'
'Please contact TD.'
)
return
return logger_path
def _check_setup(self):
"""
Internal function that checks if environment is properly configured
"""
self._set_splash_text('Checking if Python is installed ...')
if not self.is_python_installed():
LOGGER.warning('No Python Installation found!')
QMessageBox.warning(
self,
'No Python Installation found in {}'.format(self.get_current_os()),
'No valid Python installation found in your computer.\n\n'
'Please follow instructions in {0} Documentation to install Python in your computer\n\n'
'Click "Ok" to open {0} Documentation in your web browser'.format(self._project_name)
)
webbrowser.open(self._get_default_documentation_url())
return False
self._set_splash_text('Checking if pip is installed ...')
if not self.is_pip_installed():
LOGGER.warning('No pip Installation found!')
QMessageBox.warning(
self,
'No pip Installation found in {}'.format(self.get_current_os()),
'No valid pip installation found in your computer.\n\n'
'Please follow instructions in {0} Documentation to install Python in your computer\n\n'
'Click "Ok" to open {0} Documentation in your web browser'.format(self._project_name)
)
webbrowser.open(self._get_default_documentation_url())
return False
self._set_splash_text('Checking if virtualenv is installed ...')
if not self.is_virtualenv_installed():
LOGGER.warning('No virtualenv Installation found!')
LOGGER.info('Installing virtualenv ...')
process = self._run_subprocess(commands_list=['pip', 'install', 'virtualenv'])
process.wait()
if not self.is_virtualenv_installed():
LOGGER.warning('Impossible to install virtualenv using pip.')
QMessageBox.warning(
self,
'Impossible to install virtualenv in {}'.format(self.get_current_os()),
'Was not possible to install virtualenv in your computer.\n\n'
'Please contact your project TD.'
)
return False
LOGGER.info('virtualenv installed successfully!')
return True
def _init_tags_combo(self):
all_releases = self._get_all_releases()
try:
self._deploy_tag_combo.blockSignals(True)
for release in all_releases:
self._deploy_tag_combo.addItem(release)
finally:
if self._deploy_tag:
deploy_tag_index = [i for i in range(self._deploy_tag_combo.count())
if self._deploy_tag_combo.itemText(i) == self._deploy_tag]
if deploy_tag_index:
self._selected_tag_index = deploy_tag_index[0]
self._deploy_tag_combo.setCurrentIndex(self._selected_tag_index)
if not self._selected_tag_index:
self._selected_tag_index = self._deploy_tag_combo.currentIndex()
self._deploy_tag_combo.blockSignals(False)
def _load(self, clean=False):
"""
Internal function that initializes Artella App
"""
valid_check = self._check_setup()
if not valid_check:
return False
install_path = self._set_installation_path()
if not install_path:
return False
self._version_lbl.setText(str('v{}'.format(self._app_version)))
self._install_path_lbl.setText(install_path)
self._install_path_lbl.setToolTip(install_path)
self._init_tags_combo()
valid_venv = self._setup_environment(clean=clean)
if not valid_venv:
return False
if not self._venv_info:
LOGGER.warning('No Virtual Environment info retrieved ...')
return False
valid_install = self._setup_deployment()
if not valid_install:
return False
valid_artella = self._setup_artella()
if not valid_artella:
self._artella_status_icon.setPixmap(QPixmap(self._get_resource('artella_error.png')).scaled(QSize(30, 30)))
self._artella_status_icon.setToolTip('Error while connecting to Artella server!')
return False
else:
self._artella_status_icon.setPixmap(QPixmap(self._get_resource('artella_ok.png')).scaled(QSize(30, 30)))
self._artella_status_icon.setToolTip('Artella Connected!')
self._set_splash_text('{} Launcher is ready to lunch!'.format(self._project_name))
self._close_btn.setVisible(True)
self._info_tag_btn.setVisible(True)
# We check that stored config path exits
stored_path = self._get_app_config(self._install_env_var)
if stored_path and not os.path.isdir(stored_path):
self._set_config(self._install_env_var, '')
path_install = self._get_installation_path()
is_installed = path_install and os.path.isdir(path_install)
if is_installed:
self._launch_btn.setVisible(True)
if not self._dev:
self._open_install_folder_btn.setVisible(True)
self._reinstall_btn.setVisible(True)
self._uninstall_btn.setVisible(True)
else:
self._refresh_tag_btn.setVisible(True)
else:
QMessageBox.warning(
self,
'Was not possible to install {} environment.'.format(self._project_name),
'Was not possible to install {} environment.\n\n'
'Relaunch the app. If the problem persists, please contact your project TD'.format(
self._project_name))
return True
def launch(self):
if not self._venv_info:
LOGGER.warning(
'Impossible to launch {} Launcher because Virtual Environment Setup is not valid!'.format(
self._project_name))
return False
py_exe = self._venv_info['venv_python']
if not self._script_path or not os.path.isfile(self._script_path):
raise Exception('Impossible to find launcher script!')
LOGGER.info('Executing {} Launcher ...'.format(self._project_name))
paths_to_register = self._get_paths_to_register()
process_cmd = '"{}" "{}" --project-name {} --install-path "{}" --paths-to-register "{}" --tag "{}"'.format(
py_exe, self._script_path, self.get_clean_name(), self._install_path, '"{0}"'.format(
' '.join(paths_to_register)), self._deploy_tag)
if self._artella_configs_path:
process_cmd += ' --artella-configs-path "{}"'.format(self._artella_configs_path)
if self._dev:
process_cmd += ' --dev'
process = self._run_subprocess(command=process_cmd, close_fds=True)
self._splash.close()
# if not self._dev:
# time.sleep(3)
# QApplication.instance().quit()
# sys.exit()
def _check_installation_path(self, install_path):
"""
Returns whether or not given path is valid
:param install_path: str
:return: bool
"""
if not install_path or not os.path.isdir(install_path):
return False
return True
def _set_installation_path(self):
"""
Returns installation path is if it already set by user; Otherwise a dialog to select it will appear
:return: str
"""
path_updated = False
install_path = self._get_installation_path()
# Remove older installations
self._set_splash_text('Searching old installation ...')
old_installation = False
if os.path.isdir(install_path):
for d in os.listdir(install_path):
if d == self.get_clean_name():
old_dir = os.path.join(install_path, d)
content = os.listdir(old_dir)
if is_windows():
if 'Include' not in content or 'Lib' not in content or 'Scripts' not in content:
old_installation = True
break
elif is_mac():
if 'include' not in content or 'lib' not in content or 'bin' not in content:
old_installation = True
break
if old_installation:
LOGGER.info("Old installation found. Removing ...")
self._set_config(self.install_env_var, '')
self._set_splash_text('Removing old installation ...')
res = QMessageBox.question(
self._splash, 'Old installation found',
'All the contents in the following folder wil be removed: \n\t{}\n\nDo you want to continue?'.format(
install_path), QMessageBox.StandardButton.Yes, QMessageBox.StandardButton.No)
if res == QMessageBox.Yes:
shutil.rmtree(install_path)
QMessageBox.information(
self._splash,
'Relaunch the tool',
'Next time you launch the tool you will need to select a new installation path')
return False
if not install_path or not os.path.isdir(install_path):
self._set_splash_text('Select {} installation folder ...'.format(self._project_name))
install_path = QFileDialog.getExistingDirectory(
None, 'Select Installation Path for {}'.format(self._project_name))
if not install_path:
LOGGER.info('Installation cancelled by user')
QMessageBox.information(
self._splash,
'Installation cancelled',
'Installation cancelled by user')
return False
if not os.path.isdir(install_path):
LOGGER.info('Selected Path does not exist!')
QMessageBox.information(
self,
'Selected Path does not exist',
'Selected Path: "{}" does not exist. '
'Installation cancelled!'.format(install_path))
return False
path_updated = True
self._set_splash_text('Checking if Install Path is valid ...')
LOGGER.info('>>>>>> Checking Install Path: {}'.format(install_path))
valid_path = self._check_installation_path(install_path)
if not valid_path:
LOGGER.warning('Selected Install Path is not valid!')
return
if path_updated:
self._set_splash_text('Registering new install path ...')
valid_update_config = self._set_config(self.install_env_var, install_path)
if not valid_update_config:
return
self._set_splash_text('Install Path: {}'.format(install_path))
LOGGER.info('>>>>>> Install Path: {}'.format(install_path))
self._install_path = install_path
return install_path
def _setup_logger(self):
"""
Setup logger used by the app
"""
logger_name = self._get_app_name()
logger_path = self._get_app_folder()
logger_file = os.path.normpath(os.path.join(logger_path, '{}.log'.format(logger_name)))
fh = logging.FileHandler(logger_file)
fh.setLevel(logging.DEBUG)
fh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
LOGGER.addHandler(fh)
LOGGER.info('\n')
LOGGER.info('{} Logger: "{}"'.format(self._project_name, logger_file))
LOGGER.info("=" * 150)
LOGGER.debug('Starting {} App'.format(self._project_name))
LOGGER.info("=" * 150)
def _clean_old_config(self):
"""
Function used to clean
"""
current_os = self.get_current_os()
if current_os == 'Windows':
config_directory = Path(os.getenv('APPDATA') or '~')
elif current_os == 'MacOS':
config_directory = Path('~', 'Library', 'Preferences')
else:
config_directory = Path(os.getenv('XDG_CONFIG_HOME') or '~/.config')
old_config_path = config_directory.joinpath(Path('{}/.config'.format(self.get_clean_name())))
if old_config_path.exists():
LOGGER.info('Old Configuration found in "{}". Removing ...'.format(str(old_config_path)))
try:
os.remove(str(old_config_path))
except RuntimeError as exc:
msg = 'Impossible to remove old configuration file: {} | {}'.format(exc, traceback.format_exc())
self._show_error(msg)
return False
LOGGER.info('Old Configuration file removed successfully!')
return True
def _setup_config(self):
"""
Internal function that creates an empty configuration file if it is not already created
:return: str
"""
self._clean_old_config()
config_file = self._get_config_path()
if not os.path.isfile(config_file):
LOGGER.info('Creating {} App Configuration File: {}'.format(self._project_name, config_file))
with open(config_file, 'w') as cfg:
json.dump({}, cfg)
if not os.path.isfile(config_file):
QMessageBox.critical(
self,
'Impossible to create configuration file',
'Impossible to create configuration file.\n\n'
'Please contact TD.'
)
return
LOGGER.info('Configuration File found: "{}"'.format(config_file))
return config_file
def _get_installation_path(self):
"""
Returns current installation path stored in config file
:return: str
"""
if self._dev:
if hasattr(sys, 'real_prefix'):
install_path = os.path.dirname(os.path.dirname(sys.executable))
else:
install_path = os.path.dirname(sys.executable)
else:
config_data = self.get_config_data()
install_path = config_data.get(self.install_env_var, '')
return install_path
def _get_default_documentation_url(self):
"""
Internal function that returns a default value for the documentation URL taking into account the project name
:return: str
"""
return 'https://{}-short-film.github.io/{}-docs/pipeline/'.format(self._project_name, self.get_clean_name())
def _get_deploy_repository_url(self, release=False):
"""
Internal function that returns a default path for the deploy repository taking int account the project name
:param release: bool, Whether to retrieve releases path or the package to download
:return: str
"""
if release:
return 'https://github.com/{}/releases'.format(self._repository)
else:
return 'https://github.com/{}/archive/{}.tar.gz'.format(self._repository, self._deploy_tag)
def _sanitize_github_version(self, version):
"""extract what appears to be the version information"""
s = re.search(r'([0-9]+([.][0-9]+)+(rc[0-9]?)?)', version)
if s:
return s.group(1)
else:
return version.strip()
def _get_all_releases(self):
"""
Internal function that returns a list with all released versions of the deploy repository taking into account
the project name
:return: list(str)
"""
if self._dev:
return ['DEV']
all_versions = list()
repository = self._get_deploy_repository_url(release=True)
if not repository:
msg = '> Project {} GitHub repository is not valid! {}'.format(self._project_name.title(), repository)
self._show_error(msg)
return None
if repository.startswith('https://github.com/'):
repository = "/".join(repository.split('/')[3:5])
release_url = "https://github.com/{}/releases".format(repository)
response = requests.get(release_url, headers={'Connection': 'close'})
html = response.text
LOGGER.debug('Parsing HTML of {} GitHub release page ...'.format(self._project_name.title()))
soup = BeautifulSoup(html, 'lxml')
releases = soup.findAll(class_='release-entry')
for release in releases:
release_a = release.find("a")
if not release_a:
continue
the_version = release_a.text
if 'Latest' in the_version:
label_latest = release.find(class_='label-latest', recursive=False)
if label_latest:
the_version = release.find(class_='css-truncate-target').text
the_version = self._sanitize_github_version(the_version)
else:
the_version = self._sanitize_github_version(the_version)
if the_version not in all_versions:
all_versions.append(the_version)
return all_versions
def _get_deploy_tag(self):
"""
Internal function that returns the current tag that should be used for deployment
:return: str
"""
if self._dev:
return 'DEV'
config_data = self.get_config_data()
deploy_tag = config_data.get('tag', '')
latest_deploy_tag = self._get_latest_deploy_tag()
if not latest_deploy_tag:
return None
if not deploy_tag:
deploy_tag = latest_deploy_tag
deploy_tag_v = Version(deploy_tag)
latest_tag_v = Version(latest_deploy_tag)
if latest_tag_v > deploy_tag_v:
res = QMessageBox.question(
self._splash, 'Newer version found: {}'.format(latest_deploy_tag),
'Current Version: {}\nNew Version: {}\n\nDo you want to install new version?'.format(
deploy_tag, latest_deploy_tag), QMessageBox.StandardButton.Yes, QMessageBox.StandardButton.No)
if res == QMessageBox.Yes:
self._set_config('tag', latest_deploy_tag)
deploy_tag = latest_deploy_tag
LOGGER.info("Deploy Tag to use: {}".format(deploy_tag))
return deploy_tag
def _get_latest_deploy_tag(self, sniff=True, validate=True, format='version', pre=False):
"""
Returns last deployed version of the given repository in GitHub
:return: str
"""
if self._dev:
return 'DEV'
self._all_tags = list()
version = None
description = None
data = None
repository = self._get_deploy_repository_url(release=True)
if not repository:
msg = '> Project {} GitHub repository is not valid! {}'.format(self._project_name.title(), repository)
self._show_error(msg)
return None
if repository.startswith('https://github.com/'):
repository = "/".join(repository.split('/')[3:5])
if sniff:
release_url = "https://github.com/{}/releases".format(repository)
response = requests.get(release_url, headers={'Connection': 'close'})
html = response.text
LOGGER.debug('Parsing HTML of {} GitHub release page ...'.format(self._project_name.title()))
soup = BeautifulSoup(html, 'lxml')
r = soup.find(class_='release-entry')
while r:
break_out = False
if 'release-timeline-tags' in r['class']:
for release in r.find_all(class_='release-entry', recursive=False):
release_a = release.find("a")
if not release_a:
continue
the_version = release_a.text
the_version = self._sanitize_github_version(the_version)
if validate:
try:
LOGGER.debug("Trying version {}.".format(the_version))
v = Version(the_version)
if not v.is_prerelease or pre:
LOGGER.debug("Good version {}.".format(the_version))
version = the_version
break_out = True
break
except InvalidVersion:
# move on to next thing to parse it
msg = 'Encountered invalid version {}.'.format(the_version)
self._show_error(msg)
else:
version = the_version
break
if break_out:
break
else:
LOGGER.debug("Inside formal release")
# formal release
if pre:
label_latest = r.find(class_='label-prerelease', recursive=False)
else:
label_latest = r.find(class_='label-latest', recursive=False)
if label_latest:
the_version = r.find(class_='css-truncate-target').text
the_version = self._sanitize_github_version(the_version)
# check if version is ok and not a prerelease; move on to next tag otherwise
if validate:
try:
v = Version(the_version)
if not v.is_prerelease or pre:
version = the_version
# extra info for json output
if format == 'json':
description = r.find(class_='markdown-body')
if not description:
description = r.find(class_='commit-desc')
if description:
description = description.text
break
else:
LOGGER.debug("Found a pre-release version: {}. Trying next.".format(the_version))
except InvalidVersion:
# move on to next thing to parse it
msg = 'Encountered invalid version {}.'.format(the_version)
self._show_error(msg)
else:
version = the_version
break
r = r.find_next_sibling(class_='release-entry', recursive=False)
if not version:
msg = 'Impossible to retrieve {} lastest release version from GitHub!'.format(self._project_name.title())
self._show_error(msg)
return None
if validate:
try:
Version(version)
except InvalidVersion:
msg = 'Got invalid version: {}'.format(version)
self._show_error(msg)
return None
# return the release if we've reached far enough:
if format == 'version':
return version
elif format == 'json':
if not data:
data = {}
if description:
description = description.strip()
data['version'] = version
data['description'] = description
return json.dumps(data)
def _get_default_install_env_var(self):
"""
Internal function that returns a default env var
:return: str
"""
return '{}_install'.format(self.get_clean_name())
def _get_config_path(self):
"""
Internal function that returns path where configuration file is located
:return: str
"""
config_name = self._get_app_name()
config_path = self._get_app_folder()
config_file = os.path.normpath(os.path.join(config_path, '{}.cfg'.format(config_name)))
return config_file
def _set_config(self, config_name, config_value):
"""
Sets configuration and updates the file
:param config_name: str
:param config_value: object
"""
config_path = self._get_config_path()
if not os.path.isfile(config_path):
LOGGER.warning(
'Impossible to update configuration file because it does not exists: "{}"'.format(config_path))
return False
config_data = self.get_config_data()
config_data[config_name] = config_value
with open(config_path, 'w') as config_file:
json.dump(config_data, config_file)
return True
def _create_venv(self, force=False):
"""
Internal function that creates virtual environment
:param force: bool
:return: bool
"""
venv_path = self._get_venv_folder_path()
if self._check_venv_folder_exists() and not force:
LOGGER.info('Virtual Environment already exists: "{}"'.format(venv_path))
return True
if force and self._check_venv_folder_exists() and os.path.isdir(venv_path):
LOGGER.info('Forcing the removal of Virtual Environment folder: "{}"'.format(venv_path))
self._set_splash_text('Removing already existing virtual environment ...')
shutil.rmtree(venv_path)
self._set_splash_text('Creating Virtual Environment: "{}"'.format(venv_path))
process = self._run_subprocess(commands_list=['virtualenv', venv_path], shell=False)
process.wait()
return True if process.returncode == 0 else False
def _get_venv_folder_path(self):
"""
Returns path where virtual environment folder should be located
:return: str
"""
if not self._install_path:
return
if self._dev:
return os.path.normpath(self._install_path)
else:
return os.path.normpath(os.path.join(self._install_path, self.get_clean_name()))
def _get_paths_to_register(self):
"""
Returns paths that will be registered in sys.path during DCC environment loading
:return: list(str)
"""
paths_to_register = [self._get_installation_path()]
if self._dev:
lib_site_folder = os.path.join(self._install_path, 'Lib', 'site-packages')
else:
lib_site_folder = os.path.join(self._install_path, self.get_clean_name(), 'Lib', 'site-packages')
if os.path.isdir(lib_site_folder):
paths_to_register.append(lib_site_folder)
return paths_to_register
def _check_venv_folder_exists(self):
"""
Returns whether or not virtual environment folder for this project exists or not
:return: bool
"""
venv_path = self._get_default_install_env_var()
if not venv_path:
return False
return os.path.isdir(venv_path)
def _try_download_unizip_deployment_requirements(self, deployment_url, download_path, dirname):
valid_download = self._download_file(deployment_url, download_path)
if not valid_download:
return False
try:
valid_unzip = self._unzip_file(filename=download_path, destination=dirname, remove_sub_folders=[])
except Exception:
valid_unzip = False
if not valid_unzip:
return False
return True
def _download_deployment_requirements(self, dirname):
"""
Internal function that downloads the current deployment requirements
"""
self._set_splash_text('Downloading {} Deployment Information ...'.format(self._project_name))
deployment_url = self._get_deploy_repository_url()
if not deployment_url:
msg = 'Deployment URL not found!'
self._show_error(msg)
return False
response = requests.get(deployment_url, headers={'Connection': 'close'})
if response.status_code != 200:
msg = 'Deployment URL is not valid: "{}"'.format(deployment_url)
self._show_error(msg)
return False
repo_name = urlparse(deployment_url).path.rsplit("/", 1)[-1]
download_path = os.path.join(dirname, repo_name)
valid_status = False
total_tries = 0
self._set_splash_text('Downloading and Unzipping Deployment Data ...')
while not valid_status:
if total_tries > 10:
break
valid_status = self._try_download_unizip_deployment_requirements(deployment_url, download_path, dirname)
total_tries += 1
if not valid_status:
LOGGER.warning('Retrying downloading and unzip deployment data: {}'.format(total_tries))
if not valid_status:
msg = 'Something went wrong during the download and unzipping of: {}'.format(deployment_url)
self._show_error(msg)
return False
self._set_splash_text('Searching Requirements File: {}'.format(self._requirements_file_name))
requirement_path = None
for root, dirs, files in os.walk(dirname):
for name in files:
if name == self._requirements_file_name:
requirement_path = os.path.join(root, name)
break
if not requirement_path:
msg = 'No file named: {} found in deployment repository!'.format(self._requirements_file_name)
self._show_error(msg)
return False
LOGGER.debug('Requirements File for Deployment "{}" found: "{}"'.format(deployment_url, requirement_path))
self._requirements_path = requirement_path
return True
def _install_deployment_requirements(self):
if not self._venv_info:
self._show_error(
'Impossible to install Deployment Requirements because Virtual Environment is not configured!')
return False
if not self._requirements_path or not os.path.isfile(self._requirements_path):
self._show_error(
'Impossible to install Deployment Requirements because file does not exists:\n\n"{}"'.format(
self._requirements_path)
)
return False
pip_exe = self._venv_info.get('pip_exe', None)
if not pip_exe or not os.path.isfile(pip_exe):
self._show_error(
'Impossible to install Deployment Requirements because pip not found installed in '
'Virtual Environment:\n\n"{}"'.format(pip_exe)
)
return False
self._set_splash_text('Installing {} Requirements. Please wait ...'.format(self._project_name))
LOGGER.info('Installing Deployment Requirements with PIP: {}'.format(pip_exe))
pip_cmd = '"{}" install --upgrade --no-cache -r "{}"'.format(pip_exe, self._requirements_path)
LOGGER.info('Launching pip command: {}'.format(pip_cmd))
try:
if is_windows():
start_time = time.time()
LOGGER.info('\nPip install --> first try ...')
process = self._run_subprocess(command=pip_cmd)
output, error = process.communicate()
LOGGER.info('Pip install --> first try ---> executed in {} seconds\n!'.format(time.time() - start_time))
LOGGER.info(output)
LOGGER.error(error)
# We retry twice because sometimes pip fails when trying to install new packages
start_time = time.time()
LOGGER.info('\nPip install --> second try ...')
process = self._run_subprocess(command=pip_cmd)
output, error = process.communicate()
LOGGER.info(output)
LOGGER.error(error)
LOGGER.info('Pip install --> first try ---> executed in {} seconds\n!'.format(time.time() - start_time))
if error:
show_error = False
error_split = error.split('\n')
for error_str in error_split:
if not error_str or error_str.startswith(
('DEPRECATION:', 'WARNING:', 'You should consider upgrading via')):
continue
else:
show_error = True
break
if show_error:
error_dlg = AppErrorDialog(error)
error_dlg.exec_()
return False
except Exception as exc:
raise ArtellaUpdaterException(exc)
return True
def _setup_deployment(self):
if not self._venv_info:
return False
if self._dev:
if self._install_path and self._requirements_path and os.path.isfile(self._requirements_path):
valid_install = self._install_deployment_requirements()
if not valid_install:
LOGGER.info("Error while installing requirements. Trying to uninstall ...")
res = QMessageBox.question(
self._splash,
'Impossible to install/update tools properly',
'Current tools installation is not valid.\n\nDo you want to clean current installation?.\n\n'
'If you press Yes, next time you launch the application, you will need to select a '
'new installation path and tools will be fully reinstalled.',
buttons=QMessageBox.Yes | QMessageBox.No)
if res == QMessageBox.Yes:
self._on_uninstall(force=True)
return False
return True
with tempfile.TemporaryDirectory() as temp_dirname:
valid_download = self._download_deployment_requirements(temp_dirname)
if not valid_download or not self._requirements_path or not os.path.isfile(self._requirements_path):
return False
valid_install = self._install_deployment_requirements()
if not valid_install:
LOGGER.info("Error while installing requirements. Trying to uninstall ...")
res = QMessageBox.question(
self._splash,
'Impossible to install/update tools properly',
'Current tools installation is not valid.\n\nDo you want to clean current installation?.\n\n'
'If you press Yes, next time you launch the application, you will need to select a '
'new installation path and tools will be fully reinstalled.',
buttons=QMessageBox.Yes | QMessageBox.No)
if res == QMessageBox.Yes:
self._on_uninstall(force=True)
return False
return True
def _setup_artella(self):
"""
Internal function that initializes Artella
"""
self._set_splash_text('Updating Artella Paths ...')
self._update_artella_paths()
self._set_splash_text('Closing Artella App instances ...')
# For now we do not check if Artella was closed or not
self._close_all_artella_app_processes()
self._set_splash_text('Launching Artella App ...')
self._launch_artella_app()
return True
def _download_file(self, filename, destination):
"""
Downloads given file into given target path
:param filename: str
:param destination: str
:param console: ArtellaConsole
:param updater: ArtellaUpdater
:return: bool
"""
def _chunk_report(bytes_so_far, total_size):
"""
Function that updates progress bar with current chunk
:param bytes_so_far: int
:param total_size: int
:param console: ArtellaConsole
:param updater: ArtellaUpdater
:return:
"""
percent = float(bytes_so_far) / total_size
percent = round(percent * 100, 2)
msg = "Downloaded %d of %d bytes (%0.2f%%)" % (bytes_so_far, total_size, percent)
self._set_splash_text(msg)
LOGGER.info(msg)
def _chunk_read(response, destination, chunk_size=8192, report_hook=None):
"""
Function that reads a chunk of a dowlnoad operation
:param response: str
:param destination: str
:param console: ArtellaLauncher
:param chunk_size: int
:param report_hook: fn
:param updater: ArtellaUpdater
:return: int
"""
with open(destination, 'ab') as dst_file:
rsp = response.info().getheader('Content-Length')
if not rsp:
return
total_size = rsp.strip()
total_size = int(total_size)
bytes_so_far = 0
while 1:
chunk = response.read(chunk_size)
dst_file.write(chunk)
bytes_so_far += len(chunk)
if not chunk:
break
if report_hook:
report_hook(bytes_so_far=bytes_so_far, total_size=total_size)
dst_file.close()
return bytes_so_far
LOGGER.info('Downloading file {} to temporary folder -> {}'.format(os.path.basename(filename), destination))
try:
dst_folder = os.path.dirname(destination)
if not os.path.exists(dst_folder):
LOGGER.info('Creating Download Folder: "{}"'.format(dst_folder))
os.makedirs(dst_folder)
hdr = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) '
'Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
req = Request(filename, headers=hdr)
data = urlopen(req)
_chunk_read(response=data, destination=destination, report_hook=_chunk_report)
except Exception as exc:
raise Exception(exc)
if os.path.exists(destination):
LOGGER.info('Files downloaded succesfully!')
return True
else:
msg = 'Error when downloading files. Maybe server is down! Try it later'
self._show_error(msg)
return False
def _unzip_file(self, filename, destination, remove_first=True, remove_sub_folders=None):
"""
Unzips given file in given folder
:param filename: str
:param destination: str
:param console: ArtellaConsole
:param remove_first: bool
:param remove_sub_folders: bool
"""
LOGGER.info('Unzipping file {} to --> {}'.format(filename, destination))
try:
if remove_first and remove_sub_folders:
LOGGER.info('Removing old installation ...')
for sub_folder in remove_sub_folders:
p = os.path.join(destination, sub_folder)
LOGGER.info('\t{}'.format(p))
if os.path.exists(p):
shutil.rmtree(p)
if not os.path.exists(destination):
LOGGER.info('Creating destination folders ...')
QApplication.instance().processEvents()
os.makedirs(destination)
if filename.endswith('.tar.gz'):
zip_ref = tarfile.open(filename, 'r:gz')
elif filename.endswith('.tar'):
zip_ref = tarfile.open(filename, 'r:')
else:
zip_ref = zipfile.ZipFile(filename, 'r')
zip_ref.extractall(destination)
zip_ref.close()
return True
except Exception as exc:
raise Exception(exc)
def _get_artella_data_folder(self):
"""
Returns last version Artella folder installation
:return: str
"""
if is_mac():
artella_folder = os.path.join(os.path.expanduser('~/Library/Application Support/'), 'Artella')
elif is_windows():
if self._project_type == 'indie':
artella_folder = os.path.join(os.getenv('PROGRAMDATA'), 'Artella')
else:
artella_folder = os.path.join(os.getenv('ProgramFiles(x86)'), 'Artella')
else:
return None
if self._project_type == 'indie':
version_file = os.path.join(artella_folder, ARTELLA_NEXT_VERSION_FILE_NAME)
if os.path.isfile(version_file):
with open(version_file) as f:
artella_app_version = f.readline()
if artella_app_version is not None:
artella_folder = os.path.join(artella_folder, artella_app_version)
else:
artella_folder = [
os.path.join(artella_folder, name) for name in os.listdir(artella_folder) if os.path.isdir(
os.path.join(artella_folder, name)) and name != 'ui']
if len(artella_folder) == 1:
artella_folder = artella_folder[0]
else:
LOGGER.info('Artella folder not found!')
LOGGER.debug('ARTELLA FOLDER: {}'.format(artella_folder))
if not os.path.exists(artella_folder):
QMessageBox.information(
self._splash,
'Artella Folder not found!',
'Artella App Folder {} does not exists! Make sure that Artella is installed in your computer!')
return artella_folder
def _update_artella_paths(self):
"""
Updates system path to add artella paths if they are not already added
:return:
"""
# Artella update paths is only needed for Artella Indie projects
if self._project_type != 'indie':
return
artella_folder = self._get_artella_data_folder()
LOGGER.debug('Updating Artella paths from: {0}'.format(artella_folder))
if artella_folder is not None and os.path.exists(artella_folder):
for subdir, dirs, files in os.walk(artella_folder):
if subdir not in sys.path:
LOGGER.debug('Adding Artella path: {0}'.format(subdir))
sys.path.append(subdir)
def _close_all_artella_app_processes(self):
"""
Closes all Artella app (lifecycler.exe) processes
:return:
"""
try:
proc_name = self._artella_app
if is_windows():
proc_name = '{}.exe'.format(proc_name)
for proc in psutil.process_iter():
if proc.name() == proc_name:
LOGGER.debug('Killing Artella App process: {}'.format(proc.name()))
proc.kill()
return True
except RuntimeError as exc:
msg = 'Error while close Artella app instances using psutil library | {}'.format(exc)
self._show_error(msg)
return False
def _get_artella_app(self):
"""
Returns path where Artella path is installed
:return: str
"""
if is_windows():
if self._project_type == 'indie':
artella_folder = os.path.dirname(self._get_artella_data_folder())
else:
artella_folder = self._get_artella_data_folder()
return os.path.join(artella_folder, self._artella_app)
elif is_mac():
if self._project_type == 'indie':
artella_folder = os.path.dirname(self._get_artella_data_folder())
return os.path.join(artella_folder, self._artella_app)
else:
artella_folder = '/System/Applications'
return os.path.join(artella_folder, 'Artella Drive.app')
def _get_artella_program_folder(self):
"""
Returns folder where Artella shortcuts are located
:return: str
"""
# TODO: This only works on Windows, find a cross-platform way of doing this
return os.path.join(os.environ['PROGRAMDATA'], 'Microsoft', 'Windows', 'Start Menu', 'Programs', 'Artella')
def _get_artella_launch_shortcut(self):
"""
Returns path where Launch Artella shortcut is located
:return: str
"""
# TODO: This only works on Windows, find a cross-platform way of doing this
return os.path.join(self._get_artella_program_folder(), 'Launch Artella.lnk')
def _launch_artella_app(self):
"""
Executes Artella App
"""
if is_mac():
if self._project_type == 'indie':
artella_app_file = self._get_artella_app() + '.bundle'
else:
artella_app_file = self._get_artella_app()
else:
if self._project_type == 'indie':
artella_app_file = self._get_artella_launch_shortcut()
else:
artella_app_file = self._get_artella_app() + '.exe'
artella_app_file = artella_app_file
LOGGER.info('Artella App File: {0}'.format(artella_app_file))
if os.path.isfile(artella_app_file):
LOGGER.info('Launching Artella App ...')
LOGGER.debug('Artella App File: {0}'.format(artella_app_file))
os.startfile('"{}"'.format(artella_app_file.replace('\\', '//')))
def _on_open_tag_info(self):
"""
Internal callback function that is called when tag info button is clicked by user
Opens webpage of the release in the user browser
"""
if self._dev:
webbrowser.open('https://github.com/{}/releases'.format(self._repository))
else:
webbrowser.open('https://github.com/{}/releases/tag/{}'.format(self._repository, self._deploy_tag))
def _on_refresh_tag(self):
"""
Internal callback function that is called when tag refresh button is clicked by user
Forces requirements to be in current deployment version
"""
self._load(clean=False)
def _on_selected_tag(self, new_index):
"""
Internal callback function that is called when a new tag is selectged in tags combo box
:param new_index: int
"""
new_tag = self._deploy_tag_combo.itemText(new_index)
if not new_tag:
msg = 'New Tag "{}" is not valid!'.format(new_tag)
self._show_error(msg)
return
res = QMessageBox.question(
self._splash, 'Installing tag version: "{}"'.format(new_tag),
'Are you sure you want to install this version: "{}"?'.format(new_tag),
QMessageBox.StandardButton.Yes, QMessageBox.StandardButton.No)
if res == QMessageBox.Yes:
LOGGER.info("Installing tag version: {}".format(new_tag))
self._deploy_tag = new_tag
self._selected_tag_index = new_index
self._set_config('tag', new_tag)
self._load(clean=True)
else:
try:
self._deploy_tag_combo.blockSignals(True)
self._deploy_tag_combo.setCurrentIndex(self._selected_tag_index)
finally:
self._deploy_tag_combo.blockSignals(False)
def _on_open_installation_folder(self):
"""
Internal callback function that is called when the user press Open Installation Folder button
"""
install_path = self._get_installation_path()
if install_path and os.path.isdir(install_path) and len(os.listdir(install_path)) != 0:
self._open_folder(install_path)
else:
LOGGER.warning('{} environment not installed!'.format(self._project_name))
def _on_reinstall(self):
"""
Internal callback function that is called when reinstall button is clicked by user
Removes the current virtual environment setup and creates a new one
"""
question_flags = QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No
res = QMessageBox.question(
self._splash, 'Reinstalling {} Tools'.format(self.get_clean_name()),
'Are you sure you want to reinstall {} Tools?'.format(self._project_name), question_flags)
if res == QMessageBox.Yes:
self._load(clean=True)
def _on_uninstall(self, force=False):
"""
Internal callback function that is called when the user press Uninstall button
Removes environment variable and Tools folder
:return:
"""
question_flags = QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No
install_path = self._get_installation_path()
if install_path and os.path.isdir(install_path):
dirs_to_remove = [os.path.join(install_path, self.get_clean_name())]
res = None
if not force:
res = QMessageBox.question(
self._splash, 'Uninstalling {} Tools'.format(self._project_name),
'Are you sure you want to uninstall {} Tools?\n\nFolder/s that will be removed \n\t{}'.format(
self._project_name, '\n\t'.join(dirs_to_remove)), question_flags)
if res == QMessageBox.Yes or force:
try:
for d in dirs_to_remove:
if os.path.isdir(d):
shutil.rmtree(d, ignore_errors=True)
elif os.path.isfile(d):
os.remove(d)
after_files = os.listdir(self._install_path)
if not after_files:
try:
os.remove(self._install_path)
except Exception:
pass
self._set_config(self._install_env_var, '')
if not force:
QMessageBox.information(
self._splash, '{} Tools uninstalled'.format(self._project_name),
'{} Tools uninstalled successfully! App will be closed now!'.format(self._project_name))
QApplication.instance().quit()
except Exception as e:
self._set_config(self._install_env_var, '')
QMessageBox.critical(
self._splash, 'Error during {} Tools uninstall process'.format(self._project_name),
'Error during {} Tools uninstall: {} | {}\n\n'
'You will need to remove following folders manually:\n\n{}'.format(
self._project_name, e, traceback.format_exc(), '\n\t'.join(dirs_to_remove)))
else:
msg = '{} tools are not installed! Launch any DCC first!'.format(self._project_name)
QMessageBox.information(
self._splash, '{} Tools are not installed'.format(self._project_name),
msg
)
LOGGER.warning(msg)
def _run_subprocess(self, command=None, commands_list=None, close_fds=False, hide_console=True,
stdout=None, stderr=None, shell=True):
if not commands_list:
commands_list = list()
creation_flags = 0
if hide_console and not self._dev:
creation_flags = 0x08000000 # No window
stdout = stdout or subprocess.PIPE
stderr = stderr or subprocess.PIPE
if sys.version_info[0] == 2:
stdin = open(os.devnull, 'wb')
else:
stdin = subprocess.DEVNULL
if close_fds:
stdout = None
if command:
if is_windows():
if close_fds:
process = subprocess.Popen(
command, close_fds=close_fds, creationflags=creation_flags, stdout=stdout)
else:
process = subprocess.Popen(
command, close_fds=close_fds, creationflags=creation_flags,
stdout=stdout, stdin=stdin, stderr=stderr
)
elif is_mac():
process = subprocess.Popen(command, close_fds=close_fds, stdout=stdout, shell=shell)
else:
process = subprocess.Popen(command, close_fds=close_fds, stdout=stdout)
elif commands_list:
if is_windows():
process = subprocess.Popen(
commands_list, close_fds=close_fds, creationflags=creation_flags,
stdout=stdout, stdin=stdin, stderr=stderr)
elif is_mac():
process = subprocess.Popen(commands_list, close_fds=close_fds, stdout=stdout, shell=shell)
else:
process = subprocess.Popen(commands_list, close_fds=close_fds, stdout=stdout)
else:
msg = "Impossible to launch subprocess: command={}, commands_list={}, close_fds={}, hide_console={}".format(
command, commands_list, close_fds, hide_console)
self._show_error(msg)
return None
return process
def _check_call(self, commands_list, shell=True):
if not commands_list:
msg = "Impossible to launch subprocess: commands_list={}".format(commands_list)
self._show_error(msg)
return None
process = subprocess.check_call(commands_list, shell=shell)
return process
def _show_error(self, msg, title='Error'):
LOGGER.error(msg)
QMessageBox.critical(self._splash, title, msg)
@contextlib.contextmanager
def application():
app = QApplication.instance()
if not app:
app = QApplication(sys.argv)
yield app
app.exec_()
else:
yield app
class AppErrorDialog(QDialog, object):
def __init__(self, exc_trace, parent=None):
self._trace = exc_trace
super(AppErrorDialog, self).__init__(parent=parent)
self.setWindowTitle('Artella Launcher - Error')
self.setWindowIcon(QIcon(self._get_resource('artella_ok.png')))
self.ui()
self.setup_signals()
def ui(self):
self.main_layout = QVBoxLayout()
self.main_layout.setContentsMargins(2, 2, 2, 2)
self.main_layout.setSpacing(2)
self.setLayout(self.main_layout)
self._error_text = QPlainTextEdit(str(self._trace) if self._trace else '')
self._error_text.setReadOnly(True)
self._error_text.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.main_layout.addWidget(self._error_text)
buttons_lyt = QHBoxLayout()
self._copy_to_clipboard_btn = QPushButton('Copy to Clipboard')
buttons_lyt.addStretch()
buttons_lyt.addWidget(self._copy_to_clipboard_btn)
self.main_layout.addLayout(buttons_lyt)
def setup_signals(self):
self._copy_to_clipboard_btn.clicked.connect(self._on_copy_to_clipboard)
def _get_resource(self, resource_name):
resource_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'resources', resource_name)
if not os.path.isfile(resource_path):
resource_path = os.path.join(os.path.dirname(sys.executable), 'resources', resource_name)
if not os.path.isfile(resource_path):
if hasattr(sys, '_MEIPASS'):
resource_path = os.path.join(sys._MEIPASS, 'resources', resource_name)
LOGGER.info("Retrieving resource: {} >>> {}".format(resource_name, resource_path))
return resource_path
def _on_copy_to_clipboard(self):
clipboard = QApplication.clipboard()
clipboard.setText(self._error_text.toPlainText(), QClipboard.Clipboard)
if clipboard.supportsSelection():
clipboard.setText(self._error_text.toPlainText(), QClipboard.Selection)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--project-name', required=False)
parser.add_argument('--project-type', required=False)
parser.add_argument('--version', required=False, default="0.0.0")
parser.add_argument('--repository', required=False)
parser.add_argument('--icon-path', required=False, default=None)
parser.add_argument('--splash-path', required=False, default=None)
parser.add_argument('--script-path', required=False, default=None)
parser.add_argument('--requirements-path', required=False, default=None)
parser.add_argument('--artellapipe-configs-path', required=False, default=None)
parser.add_argument('--dev', required=False, default=False, action='store_true')
args = parser.parse_args()
with application() as app:
icon_path = args.icon_path
if icon_path and os.path.isfile(icon_path):
app.setWindowIcon(QIcon(icon_path))
else:
icon_path = None
new_app = None
valid_app = False
try:
new_app = ArtellaUpdater(
app=app,
project_name=args.project_name,
project_type=args.project_type,
app_version=args.version,
deployment_repository=args.repository,
splash_path=args.splash_path,
script_path=args.script_path,
requirements_path=args.requirements_path,
artellapipe_configs_path=args.artellapipe_configs_path,
dev=args.dev,
update_icon=not bool(icon_path)
)
valid_app = True
except Exception as exc:
raise ArtellaUpdaterException(exc)
| 39.394898 | 120 | 0.598245 | 79,864 | 0.957613 | 172 | 0.002062 | 414 | 0.004964 | 0 | 0 | 19,302 | 0.231442 |
2c626c431b8296a5e75d2393ce09f53461517c55 | 4,873 | py | Python | acme/tf/variable_utils_test.py | wookayin/acme | 71b2ab8577a118c103718f034fa62c5ad2c0fd97 | [
"Apache-2.0"
] | null | null | null | acme/tf/variable_utils_test.py | wookayin/acme | 71b2ab8577a118c103718f034fa62c5ad2c0fd97 | [
"Apache-2.0"
] | null | null | null | acme/tf/variable_utils_test.py | wookayin/acme | 71b2ab8577a118c103718f034fa62c5ad2c0fd97 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for acme.tf.variable_utils."""
import threading
from acme.testing import fakes
from acme.tf import utils as tf2_utils
from acme.tf import variable_utils as tf2_variable_utils
import sonnet as snt
import tensorflow as tf
_MLP_LAYERS = [50, 30]
_INPUT_SIZE = 28
_BATCH_SIZE = 8
_UPDATE_PERIOD = 2
class VariableClientTest(tf.test.TestCase):
def setUp(self):
super().setUp()
# Create two instances of the same model.
self._actor_model = snt.nets.MLP(_MLP_LAYERS)
self._learner_model = snt.nets.MLP(_MLP_LAYERS)
# Create variables first.
input_spec = tf.TensorSpec(shape=(_INPUT_SIZE,), dtype=tf.float32)
tf2_utils.create_variables(self._actor_model, [input_spec])
tf2_utils.create_variables(self._learner_model, [input_spec])
def test_update_and_wait(self):
# Create a variable source (emulating the learner).
np_learner_variables = tf2_utils.to_numpy(self._learner_model.variables)
variable_source = fakes.VariableSource(np_learner_variables)
# Create a variable client (emulating the actor).
variable_client = tf2_variable_utils.VariableClient(
variable_source, {'policy': self._actor_model.variables})
# Create some random batch of test input:
x = tf.random.normal(shape=(_BATCH_SIZE, _INPUT_SIZE))
# Before copying variables, the models have different outputs.
self.assertNotAllClose(self._actor_model(x), self._learner_model(x))
# Update the variable client.
variable_client.update_and_wait()
# After copying variables (by updating the client), the models are the same.
self.assertAllClose(self._actor_model(x), self._learner_model(x))
def test_update(self):
# Create a barrier to be shared between the test body and the variable
# source. The barrier will block until, in this case, two threads call
# wait(). Note that the (fake) variable source will call it within its
# get_variables() call.
barrier = threading.Barrier(2)
# Create a variable source (emulating the learner).
np_learner_variables = tf2_utils.to_numpy(self._learner_model.variables)
variable_source = fakes.VariableSource(np_learner_variables, barrier)
# Create a variable client (emulating the actor).
variable_client = tf2_variable_utils.VariableClient(
variable_source, {'policy': self._actor_model.variables},
update_period=_UPDATE_PERIOD)
# Create some random batch of test input:
x = tf.random.normal(shape=(_BATCH_SIZE, _INPUT_SIZE))
# Create variables by doing the computation once.
learner_output = self._learner_model(x)
actor_output = self._actor_model(x)
del learner_output, actor_output
for _ in range(_UPDATE_PERIOD):
# Before the update period is reached, the models have different outputs.
self.assertNotAllClose(self._actor_model.variables,
self._learner_model.variables)
# Before the update period is reached, the variable client should not make
# any requests for variables.
self.assertIsNone(variable_client._future)
variable_client.update()
# Make sure the last call created a request for variables and reset the
# internal call counter.
self.assertIsNotNone(variable_client._future)
self.assertEqual(variable_client._call_counter, 0)
future = variable_client._future
for _ in range(_UPDATE_PERIOD):
# Before the barrier allows the variables to be released, the models have
# different outputs.
self.assertNotAllClose(self._actor_model.variables,
self._learner_model.variables)
variable_client.update()
# Make sure no new requests are made.
self.assertEqual(variable_client._future, future)
# Calling wait() on the barrier will now allow the variables to be copied
# over from source to client.
barrier.wait()
# Update once more to ensure the variables are copied over.
while variable_client._future is not None:
variable_client.update()
# After a number of update calls, the variables should be the same.
self.assertAllClose(self._actor_model.variables,
self._learner_model.variables)
if __name__ == '__main__':
tf.test.main()
| 36.365672 | 80 | 0.732403 | 3,900 | 0.800328 | 0 | 0 | 0 | 0 | 0 | 0 | 2,093 | 0.42951 |
2c62b01797bc951466927b86c37a5f651bd8ad8f | 1,390 | py | Python | auth/views.py | KenMwaura1/zoo_pitch | c83edf6fb53bdfc3739bedbea258f9ffc6f6925c | [
"MIT"
] | 2 | 2021-09-19T04:45:44.000Z | 2021-09-19T18:37:16.000Z | auth/views.py | KenMwaura1/zoo_pitch | c83edf6fb53bdfc3739bedbea258f9ffc6f6925c | [
"MIT"
] | null | null | null | auth/views.py | KenMwaura1/zoo_pitch | c83edf6fb53bdfc3739bedbea258f9ffc6f6925c | [
"MIT"
] | null | null | null | from flask import flash, render_template, redirect, request, url_for
from flask_login import login_required, login_user, logout_user
from . import auth
from .forms import UserLoginForm, UserRegForm
from app.commands import db
from app.models import User
from app.send_email import mail_message
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = UserLoginForm()
if form.validate_on_submit():
user = db.session.query(User).filter_by(username=form.username.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
return render_template('auth/login.html', loginform=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
@auth.route('/signup', methods=["GET", "POST"])
def signup():
form = UserRegForm()
print(form)
if form.validate_on_submit():
user = User(email=form.email.data, username=form.username.data, password=form.password.data)
user.save_user()
mail_message("Welcome to Zoo-Pitch","email/user_welcome",user.email,user=user)
return redirect(url_for('auth.login'))
return render_template('auth/sign-up.html', reg_form=form)
| 35.641026 | 100 | 0.709353 | 0 | 0 | 0 | 0 | 1,087 | 0.782014 | 0 | 0 | 198 | 0.142446 |
2c6431e095521b9b05fd679eb3f519fd1974a1c5 | 81 | py | Python | python/sys_argv.py | mkanenobu/trashbox | c691dbf9a07991fd42304020c8aac58e1e4b9644 | [
"WTFPL"
] | 2 | 2020-05-11T13:43:27.000Z | 2020-07-31T11:57:19.000Z | python/sys_argv.py | mkanenobu/trashbox | c691dbf9a07991fd42304020c8aac58e1e4b9644 | [
"WTFPL"
] | 2 | 2020-09-27T02:35:38.000Z | 2021-03-08T08:33:02.000Z | python/sys_argv.py | mkanenobu/trashbox | c691dbf9a07991fd42304020c8aac58e1e4b9644 | [
"WTFPL"
] | 1 | 2020-05-11T13:44:04.000Z | 2020-05-11T13:44:04.000Z | import sys
# 引数を変数varに代入してprintで返す
var = sys.argv[1]
print(type(var))
print(var)
| 13.5 | 23 | 0.753086 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.457944 |
2c6833152550ccb91e9895b97f5563c2931c78f6 | 104 | py | Python | qapi/protocols/cryptography/key_distribution/exceptions.py | seunomonije/quantum-programming-api | b2d45cdbf13b8e4d3917d9bea6317898da71aa33 | [
"Apache-2.0"
] | 1 | 2021-03-13T20:59:17.000Z | 2021-03-13T20:59:17.000Z | qapi/protocols/cryptography/key_distribution/exceptions.py | yaleqc/quantum-programming-api | 9467cf89e138eab0ae08e7bb1a378338f7703a0a | [
"Apache-2.0"
] | null | null | null | qapi/protocols/cryptography/key_distribution/exceptions.py | yaleqc/quantum-programming-api | 9467cf89e138eab0ae08e7bb1a378338f7703a0a | [
"Apache-2.0"
] | 1 | 2021-01-10T04:19:05.000Z | 2021-01-10T04:19:05.000Z | class InvalidBitstringError(BaseException):
pass
class InvalidQuantumKeyError(BaseException):
pass
| 17.333333 | 44 | 0.836538 | 101 | 0.971154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2c68876005bb1a85d38b827172c2039fc408b1dc | 562 | py | Python | app/api/controller/user_controller.py | ChegeBryan/black-bandana | 6ef8f62c4e9d4415c6f6f1cc7cd8240ae21e9ce3 | [
"MIT"
] | 2 | 2019-01-05T07:01:13.000Z | 2019-03-17T08:11:19.000Z | app/api/controller/user_controller.py | ChegeBryan/black-bandana | 6ef8f62c4e9d4415c6f6f1cc7cd8240ae21e9ce3 | [
"MIT"
] | 3 | 2019-01-23T21:09:04.000Z | 2020-11-20T07:40:16.000Z | app/api/controller/user_controller.py | ChegeBryan/black-bandana | 6ef8f62c4e9d4415c6f6f1cc7cd8240ae21e9ce3 | [
"MIT"
] | null | null | null | """ User api endpoints """
from flask import request
from flask_restplus import Resource
from ..util.dto import UserDto
from ..service.user import save_new_user
api = UserDto.api
_user = UserDto.user
@api.route('/users')
class Users(Resource):
"""
User resource for the API
"""
@api.doc('Create a new user')
@api.expect(_user, validate=True)
@api.response(201, 'Successfully registered user')
def post(self):
""" Create new user post method """
data = request.get_json()
return save_new_user(data=data)
| 20.814815 | 54 | 0.66726 | 333 | 0.592527 | 0 | 0 | 354 | 0.629893 | 0 | 0 | 159 | 0.282918 |
2c6b0ca51dcc10f9f6f3f231db822aa6a2ba9c93 | 7,367 | py | Python | run.py | danielenricocahall/Rotorcraft-Safety | 446b59c23581ec39cfe98db924411c1927af5cc9 | [
"MIT"
] | null | null | null | run.py | danielenricocahall/Rotorcraft-Safety | 446b59c23581ec39cfe98db924411c1927af5cc9 | [
"MIT"
] | null | null | null | run.py | danielenricocahall/Rotorcraft-Safety | 446b59c23581ec39cfe98db924411c1927af5cc9 | [
"MIT"
] | 1 | 2021-07-26T19:05:09.000Z | 2021-07-26T19:05:09.000Z | import os
import zipfile
from typing import List, Tuple, Dict
import numpy as np
import pandas as pd
import requests
import structlog
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from tensorflow import keras, one_hot
from tensorflow.keras import layers
from tensorflow.keras.callbacks import CSVLogger
from tensorflow.keras.layers import ReLU
plt.rcParams.update({'figure.figsize': (16.0, 12.0)})
_LOGGER = structlog.get_logger(__file__)
HEADER_COLUMN = 12
LABEL_COLUMN = 'False Warning'
TEXT_COLUMN = 'Text'
def download_file(url: str, local_dir: str = '.', local_filename: str = '') -> str:
"""
Downloads a file from a provided url to a local directory
:param url: URL to download the file from
:param local_dir: Local directory to download the file to (created if it does not exist)
:param local_filename: What to name the file when saved
(if empty or none, assume the name of the original name of the file)
:return: the name of the file which was saved
"""
os.makedirs(f'{local_dir}', exist_ok=True)
local_filename = local_filename if local_filename else url.split('/')[-1]
if os.path.exists(f'{local_dir}/{local_filename}'):
_LOGGER.info(f'{local_dir}/{local_filename} already exists. Skipping download.')
else:
_LOGGER.info(f"Downloading file from {url} to {local_dir}/{local_filename}.")
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(f'./{local_dir}/{local_filename}', 'wb') as f:
for chunk in r.iter_content(chunk_size=128):
f.write(chunk)
_LOGGER.info(f"Finished saving file from {url} to {local_dir}/{local_filename}.")
return f'{local_dir}/{local_filename}'
def unzip_file(path_to_zip_file: str, dir_to_extract_to: str) -> str:
"""
Unzips a zip file to a provided directory
:param path_to_file: path to zip file
:param dir_to_extract_to: directory to extract zip file
:return: full path to unzipped file (assuming there is only one)
"""
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(dir_to_extract_to)
return f'{dir_to_extract_to}/{zip_ref.namelist()[0]}'
def load_data(path_to_file: str) -> pd.DataFrame:
"""
Loads excel data from a supplied path into a Pandas dataframe
:param path_to_file: path to excel file
:return: Pandas dataframe containing contents of excel spreadsheet
"""
_LOGGER.info(f"Started loading the excel data from {path_to_file} into a dataframe - this may take a while. "
f"You may want to grab a coffee.")
df = pd.read_excel(path_to_file, engine='openpyxl', header=HEADER_COLUMN)
_LOGGER.info(f"Finished loading the excel data from {path_to_file} into a dataframe.")
return df
def vectorize(df: pd.DataFrame, **kwargs) -> Tuple[np.array, List[str]]:
_LOGGER.info("Converting text to feature matrix")
vectorizer = TfidfVectorizer(**kwargs)
sparse_matrix = vectorizer.fit_transform(df[TEXT_COLUMN])
feature_matrix = sparse_matrix.todense()
return feature_matrix, vectorizer.get_feature_names()
def extract_and_encode_labels(df: pd.DataFrame) -> Tuple[np.array, Dict[str, int]]:
label_mapping = dict((label, i) for i, label in enumerate(df[LABEL_COLUMN].unique()))
labels = list(df[LABEL_COLUMN].map(label_mapping))
return np.array(labels), label_mapping
if __name__ == "__main__":
local_dir = './data'
compute_features = not os.path.exists(f'{local_dir}/feature_data.csv')
model_type = "knn"
if compute_features:
# download the file
path_to_downloaded_zip_file = download_file(
'https://www.fire.tc.faa.gov/zip/MasterModelVersion3DDeliverable.zip',
local_dir)
# unzip the file
path_to_file = unzip_file(path_to_downloaded_zip_file, local_dir)
# load the file into a Pandas dataframe
df = load_data(path_to_file)
# save preprocessed data to save time for future runs
df.to_csv(f'{local_dir}/feature_data.csv')
else:
# don't go through the hassle of preprocessing if we already have the preprocessed data saved
df = pd.read_csv(f'{local_dir}/feature_data.csv')
count_of_no_text = len(df[df[TEXT_COLUMN].isnull()])
df = df.dropna(subset=[TEXT_COLUMN])
_LOGGER.info(f"Dropped {count_of_no_text} records because {TEXT_COLUMN} was null or NaN")
count_of_null_labels = len(df[df[LABEL_COLUMN].isnull()])
df = df.dropna(subset=[LABEL_COLUMN])
_LOGGER.info(f"Dropped {count_of_null_labels} records because {LABEL_COLUMN} was null or NaN")
# create a sparse feature matrix of size n x m,
# where n = number of documents, m = number of words in vocabulary
feature_matrix, feature_names = vectorize(df, min_df=0.001)
labels, label_mapping = extract_and_encode_labels(df)
num_labels = len(label_mapping)
num_features = feature_matrix.shape[1]
X_train, X_test, y_train, y_test = train_test_split(feature_matrix, labels, test_size=0.05, random_state=1)
_LOGGER.info(f"Training on {X_train.shape[0]} samples, validating on {X_test.shape[0]} samples.")
_LOGGER.info(f"Number of features: {num_features}")
if model_type == "mlp":
labels = one_hot(np.array(labels), len(label_mapping))
inputs = keras.Input(shape=(num_features,))
layer_1 = layers.Dense(8192, activation=ReLU())(inputs)
layer_2 = layers.Dense(2048, activation=ReLU())(layer_1)
layer_3 = layers.Dense(512, activation=ReLU())(layer_2)
layer_4 = layers.Dense(128, activation=ReLU())(layer_3)
layer_5 = layers.Dense(32, activation=ReLU())(layer_4)
layer_6 = layers.Dense(8, activation=ReLU())(layer_5)
outputs = layers.Dense(num_labels, activation="softmax")(layer_6)
model = keras.Model(inputs=inputs, outputs=outputs)
_LOGGER.info(model.summary())
model.compile(
optimizer=keras.optimizers.Adamax(), # Optimizer
loss=keras.losses.CategoricalCrossentropy(), # Loss function to minimize
metrics=[keras.metrics.Accuracy()] # List of metrics to monitor
)
model.fit(X_train, y_train,
validation_data=(X_test, y_test), shuffle=True, epochs=200, batch_size=64,
callbacks=[CSVLogger('./results.csv')])
model.save('model')
elif model_type == "rf":
rf = RandomForestClassifier(n_jobs=-1)
rf.fit(X_train, y_train)
training_acc = rf.score(X_train, y_train)
validation_acc = rf.score(X_test, y_test)
_LOGGER.info(f"Training accuracy with Random Forest: {training_acc}")
_LOGGER.info(f"Validation accuracy with Random Forest: {validation_acc}")
elif model_type == "knn":
knn = KNeighborsClassifier(n_neighbors=5, n_jobs=-1)
knn.fit(X_train, y_train)
training_acc = knn.score(X_train, y_train)
validation_acc = knn.score(X_test, y_test)
_LOGGER.info(f"Training accuracy with kNN: {training_acc}")
_LOGGER.info(f"Validation accuracy with kNN: {validation_acc}") | 43.85119 | 113 | 0.696213 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,577 | 0.349803 |
2c6b4180cafccab3b589b0d309ed2441b3687e2d | 1,465 | py | Python | src/views/send/target_address_form_view.py | Kevingislason/bitcoin_hardware_wallet_ui | 226983546c7c8838ca8bc72accdd6adbd8013446 | [
"MIT"
] | null | null | null | src/views/send/target_address_form_view.py | Kevingislason/bitcoin_hardware_wallet_ui | 226983546c7c8838ca8bc72accdd6adbd8013446 | [
"MIT"
] | 5 | 2021-06-02T03:21:46.000Z | 2022-03-12T00:55:35.000Z | src/views/send/target_address_form_view.py | Kevingislason/abacus_wallet_bridge | 226983546c7c8838ca8bc72accdd6adbd8013446 | [
"MIT"
] | null | null | null | from PyQt6.QtCore import *
from PyQt6.QtGui import *
from PyQt6.QtWidgets import *
class TargetAddressForm(QWidget):
def __init__(self):
super().__init__()
self.layout = QHBoxLayout()
self.setLayout(self.layout)
self.target_address_label = QLabel("Pay to:")
self.target_address_label.size_policy = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Fixed)
self.target_address_label.size_policy.setHorizontalStretch(1)
self.target_address_label.setSizePolicy(self.target_address_label.size_policy)
self.target_address_input = QLineEdit()
self.target_address_input.setMaxLength(74) # max address length
self.target_address_input.size_policy = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Fixed)
self.target_address_input.size_policy.setHorizontalStretch(8)
self.target_address_input.setSizePolicy(self.target_address_input.size_policy)
self.target_address_spacer = QLabel("")
self.target_address_spacer.size_policy = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Fixed)
self.target_address_spacer.size_policy.setHorizontalStretch(1)
self.target_address_spacer.setSizePolicy(self.target_address_spacer.size_policy)
self.layout.addWidget(self.target_address_label)
self.layout.addWidget(self.target_address_input)
self.layout.addWidget(self.target_address_spacer)
| 44.393939 | 116 | 0.763823 | 1,379 | 0.941297 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.02116 |
2c6c2e152a67cf3da0646f687e2033d8230c0268 | 518 | py | Python | tests/test_overwrite_store.py | schwa-lab/libschwa-python | aebe5b0cf91e55b9e054ecff46a6e74fcd19f490 | [
"MIT"
] | 5 | 2015-03-23T17:19:18.000Z | 2017-06-07T18:24:50.000Z | tests/test_overwrite_store.py | schwa-lab/libschwa-python | aebe5b0cf91e55b9e054ecff46a6e74fcd19f490 | [
"MIT"
] | null | null | null | tests/test_overwrite_store.py | schwa-lab/libschwa-python | aebe5b0cf91e55b9e054ecff46a6e74fcd19f490 | [
"MIT"
] | null | null | null | # vim: set et nosi ai ts=2 sts=2 sw=2:
# coding: utf-8
from __future__ import absolute_import, print_function, unicode_literals
import unittest
from schwa import dr
class Node(dr.Ann):
label = dr.Field()
class Doc(dr.Doc):
store = dr.Store(Node)
class Test(unittest.TestCase):
def _test_example(self, doc):
doc.store = None
def test_example(self):
R = 'Cannot overwrite a store (.*)'
d = Doc()
d.store.create()
self.assertRaisesRegexp(ValueError, R, lambda: self._test_example(d))
| 19.185185 | 73 | 0.69305 | 343 | 0.662162 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.162162 |
2c6d51fc5d957d883989b9ddb2ae351293818f24 | 12,645 | py | Python | agents_using_gym/dqn/interactive.py | jarbus/multiagent-particle-envs | 0811fa35f0e7860940b1d7a5f450541f416ff75c | [
"MIT"
] | 8 | 2019-07-14T22:10:20.000Z | 2021-11-12T08:31:41.000Z | agents_using_gym/dqn/interactive.py | jarbus/multiagent-particle-envs | 0811fa35f0e7860940b1d7a5f450541f416ff75c | [
"MIT"
] | 8 | 2019-07-14T19:44:05.000Z | 2019-08-12T22:28:29.000Z | agents_using_gym/dqn/interactive.py | jarbus/multiagent-particle-envs | 0811fa35f0e7860940b1d7a5f450541f416ff75c | [
"MIT"
] | 3 | 2019-07-30T21:38:42.000Z | 2020-04-21T13:47:11.000Z | #!/usr/bin/env python
import os,sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import argparse
from multiagent.environment import MultiAgentEnv
import multiagent.scenarios as scenarios
import numpy as np
import keras.backend.tensorflow_backend as backend
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
import tensorflow as tf
from collections import deque
import time
import random
from tqdm import tqdm
from PIL import Image
if __name__ == '__main__':
# parse arguments
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-s', '--scenario', default='simple.py', help='Path of the scenario Python script.')
args = parser.parse_args()
# load scenario from script
scenario = scenarios.load(args.scenario).Scenario()
# create world
world = scenario.make_world()
# create multiagent environment
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, info_callback=None, shared_viewer = False)
# render call to create viewer window (necessary only for interactive policies)
env.render()
# execution loop
obs_n = env.reset()
DISCOUNT = 0.99
REPLAY_MEMORY_SIZE = 50_000 # How many last steps to keep for model training
MIN_REPLAY_MEMORY_SIZE = 1_000 # Minimum number of steps in a memory to start training
MINIBATCH_SIZE = 64 # How many steps (samples) to use for training
UPDATE_TARGET_EVERY = 5 # Terminal states (end of episodes)
MODEL_NAME = '2x256'
MIN_REWARD = 20 # For model save
MEMORY_FRACTION = 0.20
# Environment settings
EPISODES = 200
# Exploration settings
epsilon = 1 # not a constant, going to be decayed
EPSILON_DECAY = 0.99975
MIN_EPSILON = 0.001
# Stats settings
AGGREGATE_STATS_EVERY = 50 # episodes
SHOW_PREVIEW = False
# For stats
ep_rewards = [[-200],[-200],[-200]]
# For more repetitive results
random.seed(1)
np.random.seed(1)
tf.set_random_seed(1)
# Memory fraction, used mostly when trai8ning multiple agents
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=MEMORY_FRACTION)
#backend.set_session(tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)))
# Create models folder
if not os.path.isdir('models'):
os.makedirs('models')
# Own Tensorboard class
class ModifiedTensorBoard(TensorBoard):
# Overriding init to set initial step and writer (we want one log file for all .fit() calls)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.FileWriter(self.log_dir)
# Overriding this method to stop creating default log writer
def set_model(self, model):
pass
# Overrided, saves logs with our step number
# (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self, epoch, logs=None):
self.update_stats(**logs)
# Overrided
# We train for one batch only, no need to save anything at epoch end
def on_batch_end(self, batch, logs=None):
pass
# Overrided, so won't close writer
def on_train_end(self, _):
pass
# Custom method for saving own metrics
# Creates writer, writes custom metrics and closes writer
def update_stats(self, **stats):
self._write_logs(stats, self.step)
# Agent class
class DQNAgent:
def __init__(self,i):
self.index=i
# Main model
self.model = self.create_model()
# Target network
self.target_model = self.create_model()
self.target_model.set_weights(self.model.get_weights())
# An array with last n steps for training
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)
# Custom tensorboard object
self.tensorboard = ModifiedTensorBoard(log_dir="logs/{}-{}-{}".format(MODEL_NAME, self.index,int(time.time())))
# Used to count when to update target network with main network's weights
self.target_update_counter = 0
def create_model(self):
model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=(10, 10, 3))) # OBSERVATION_SPACE_VALUES = (10, 10, 3) a 10x10 RGB image.
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Dense(5, activation='linear')) # ACTION_SPACE_SIZE = how many choices (9)
model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=['accuracy'])
return model
# Adds step's data to a memory replay array
# (observation space, action, reward, new observation space, done)
def update_replay_memory(self, transition):
self.replay_memory.append(transition)
# Trains main network every step during episode
def train(self, terminal_state, step):
# Start training only if certain number of samples is already saved
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
# Get a minibatch of random samples from memory replay table
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
# Get current states from minibatch, then query NN model for Q values
current_states = np.array([transition[0] for transition in minibatch])/255
current_qs_list = self.model.predict(current_states)
# Get future states from minibatch, then query NN model for Q values
# When using target network, query it, otherwise main network should be queried
new_current_states = np.array([transition[3] for transition in minibatch])/255
future_qs_list = self.target_model.predict(new_current_states)
X = []
y = []
# Now we need to enumerate our batches
for index, (current_state, action, reward, new_current_state, done) in enumerate(minibatch):
# If not a terminal state, get new q from future states, otherwise set it to 0
# almost like with Q Learning, but we use just part of equation here
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
# Update Q value for given state
current_qs = current_qs_list[index]
current_qs[action] = new_q
# And append to our training data
X.append(current_state)
y.append(current_qs)
# Fit on all samples as one batch, log only on terminal state
self.model.fit(np.array(X)/255, np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
# Update target network counter every episode
if terminal_state:
self.target_update_counter += 1
# If counter reaches set value, update target network with weights of main network
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
# Queries main network for Q values given current observation space (environment state)
def get_qs(self, state):
return self.model.predict(np.array(state).reshape(-1, *state.shape)/255)[0]
d = {1: (100, 0, 0),
2: (0, 100, 0),
3: (0, 0, 100),
4: (25,25,25)}
def getobs(obsn):
env = np.zeros((10, 10, 3), dtype=np.uint8) # starts an rbg of our size
obs=obsn.copy()
for i in obs:
i=int((i+1)/0.2)
env[int(obs[0])][int(obs[1])][0]+=100 # sets the food location tile to green color
env[int(obs[2])][int(obs[3])][1]+=100
img = Image.fromarray(env, 'RGB') # reading to rgb. Apparently. Even tho color definitions are bgr. ???
img=np.array(img)
return img
def getobsi(obsn):
env = np.zeros((10, 10, 3), dtype=np.uint8) # starts an rbg of our size
obs=obsn.copy()
for i in obs:
i=int((i+1)/0.2)
env[int(obs[2])][int(obs[3])][0]+=100 # sets the food location tile to green color
env[int(obs[4])][int(obs[5])][1]+=100 # sets the enemy location to red
env[int(obs[6])][int(obs[7])][2] +=100 # sets the player tile to blue
env[int(obs[8])][int(obs[9])][0] +=25
env[int(obs[8])][int(obs[9])][1] +=25
env[int(obs[8])][int(obs[9])][2] +=25
env[int(obs[10])][int(obs[11])][0] +=25
env[int(obs[10])][int(obs[11])][1] +=25
env[int(obs[10])][int(obs[11])][2] +=25
env[int(obs[12])][int(obs[13])][0] +=25
env[int(obs[12])][int(obs[13])][1] +=25
env[int(obs[12])][int(obs[13])][2] +=25
img = Image.fromarray(env, 'RGB') # reading to rgb. Apparently. Even tho color definitions are bgr. ???
img=np.array(img)
return img
# create interactive policies for each agent
policies = [DQNAgent(i) for i in range(env.n)]
for episode in tqdm(range(1, EPISODES + 1), ascii=True, unit='episodes'):
episode_reward=[0,0,0]
step=1
for i, policy in enumerate(policies):
policy.tensorboard.step=episode
# query for action from each agent's policy
obs_n=env.reset()
done = False
while not done:
act_n = []
action_n=[]
for i, policy in enumerate(policies):
act = np.zeros(5)
if np.random.random() > epsilon:
# Get action from Q table
action = np.argmax(policy.get_qs(getobs(obs_n[i])))
else:
# Get random action
action = np.random.randint(0, 5)
act[action]+=1.0
action_n.append(action)
act_n.append(act)
# step environment
newobs_n, reward_n, done_n, _ = env.step(act_n)
if step>=100:
done=True
for i, policy in enumerate(policies):
episode_reward[i]+=reward_n[i]
policy.update_replay_memory((getobs(obs_n[i]), action_n[i], reward_n[i], getobs(newobs_n[i]), done))
policy.train(done, step)
obs_n=newobs_n
step+=1
#if SHOW_PREVIEW and not episode % AGGREGATE_STATS_EVERY:
if episode % 50==1:
env.render()
for i, policy in enumerate(policies):
ep_rewards[i].append(episode_reward[i])
if not episode % AGGREGATE_STATS_EVERY or episode == 1:
average_reward = sum(ep_rewards[i][-AGGREGATE_STATS_EVERY:])/len(ep_rewards[i][-AGGREGATE_STATS_EVERY:])
min_reward = min(ep_rewards[i][-AGGREGATE_STATS_EVERY:])
max_reward = max(ep_rewards[i][-AGGREGATE_STATS_EVERY:])
policy.tensorboard.update_stats(reward_avg=average_reward, reward_min=min_reward, reward_max=max_reward, epsilon=epsilon)
# Save model, but only when min reward is greater or equal a set value
if min_reward >= MIN_REWARD:
policy.model.save(f'models/{MODEL_NAME+str(policy.index)}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model')
if epsilon > MIN_EPSILON:
epsilon *= EPSILON_DECAY
epsilon = max(MIN_EPSILON, epsilon)
| 39.88959 | 191 | 0.594069 | 5,729 | 0.453064 | 0 | 0 | 0 | 0 | 0 | 0 | 3,571 | 0.282404 |
2c6de534fc4e463aad9241c1d824830542522b58 | 1,101 | py | Python | generate/web/render.py | traines-source/climate-change-cartograms | 5ade2d7e5985c317a0ce0c7159f7f47d24566fa3 | [
"MIT"
] | null | null | null | generate/web/render.py | traines-source/climate-change-cartograms | 5ade2d7e5985c317a0ce0c7159f7f47d24566fa3 | [
"MIT"
] | null | null | null | generate/web/render.py | traines-source/climate-change-cartograms | 5ade2d7e5985c317a0ce0c7159f7f47d24566fa3 | [
"MIT"
] | null | null | null | import jinja2
import gettext
import markdown
from pathlib import Path
import sys
sys.path.append('../')
import utils
credits_html = ""
with open("credits.md", "r") as file:
credits_html = markdown.markdown(file.read())
env = jinja2.Environment(
extensions=['jinja2.ext.i18n'],
loader=jinja2.FileSystemLoader('./')
)
mappings = utils.read_json("../emissions/mappings.json")
binaries = []
binaries.extend(mappings["year"]["mapping"])
binaries.extend(mappings["parameters"]["mapping"])
binaries.extend(mappings["metrics"]["mapping"])
binaries.extend(mappings["impacts"]["mapping"])
locales = ["en", "de"]
for locale in locales:
print(locale)
tr = gettext.translation(domain='messages', localedir='locale/', languages=[locale])
tr.install()
env.install_gettext_translations(tr, newstyle=True)
tm = env.get_template('index.tmpl.html')
html = tm.render(mappings=mappings, binaries=binaries, credits=credits_html)
Path("working/"+locale).mkdir(parents=True, exist_ok=True)
with open("working/"+locale+"/index.html", "w") as outf:
outf.write(html) | 28.973684 | 88 | 0.705722 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.202543 |
2c6dec3157e23cf5a69e188d1d748411cfc9f657 | 1,263 | py | Python | camper/db/tests/test_registration_form.py | mrtopf/camper | 7016539f92202bbea608c6d53ce19097d4ad931d | [
"MIT"
] | 13 | 2016-03-13T02:33:39.000Z | 2021-04-01T13:09:12.000Z | camper/db/tests/test_registration_form.py | comlounge/camper | 7016539f92202bbea608c6d53ce19097d4ad931d | [
"MIT"
] | 122 | 2016-03-10T09:28:09.000Z | 2021-09-07T23:49:05.000Z | camper/db/tests/test_registration_form.py | mrtopf/camper | 7016539f92202bbea608c6d53ce19097d4ad931d | [
"MIT"
] | 5 | 2017-01-11T22:00:57.000Z | 2020-04-26T14:03:32.000Z | from camper.db import BarcampSchema, Barcamp
import datetime
def test_get_empty_registration_form(barcamps, barcamp):
barcamps.save(barcamp)
barcamp = barcamps.by_slug("barcamp")
assert barcamp.registration_form == []
def test_add_to_registration_form(barcamps, barcamp):
barcamps.save(barcamp)
field = {
'name' : 'fullname',
'title' : 'Your full name, please',
'fieldtype' : 'textfield',
'description' : 'enter your full name here',
'required' : False,
}
barcamp = barcamps.by_slug("barcamp")
barcamp.registration_form.append(field)
barcamp.save()
barcamp = barcamps.by_slug("barcamp")
assert len(barcamp.registration_form) == 1
def test_save_registration_data(barcamps, barcamp):
barcamps.save(barcamp)
# create the field
field = {
'name' : 'fullname',
'title' : 'Your full name, please',
'fieldtype' : 'textfield',
'description' : 'enter your full name here',
'required' : False,
}
barcamp = barcamps.by_slug("barcamp")
barcamp.registration_form.append(field)
barcamp.save()
barcamp = barcamps.by_slug("barcamp")
# use the field
assert len(barcamp.registration_form) == 1
| 29.372093 | 56 | 0.646873 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 316 | 0.250198 |
2c7005ff55f4d4af986e8e9ec3ba70d8376e5301 | 1,487 | py | Python | source/payment/models.py | codebam/avrod_python_login | dd4679c372d85c363f1057462c6abc448f8284c9 | [
"BSD-3-Clause"
] | null | null | null | source/payment/models.py | codebam/avrod_python_login | dd4679c372d85c363f1057462c6abc448f8284c9 | [
"BSD-3-Clause"
] | 7 | 2020-06-05T20:02:11.000Z | 2021-09-22T18:05:28.000Z | source/payment/models.py | codebam/avrod_python_login | dd4679c372d85c363f1057462c6abc448f8284c9 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
import datetime
class Session(models.Model):
session_key = models.CharField(max_length=64)
created_at = models.DateTimeField(auto_now_add=True)
class License(models.Model):
license_key = models.CharField(max_length=32, primary_key=True, unique=True)
created_at = models.DateTimeField(auto_now_add=True)
@classmethod
def create(cls, license_key):
license = cls(license_key=license_key)
return license
class Customer(models.Model):
customer_id = models.CharField(max_length=18, unique=True, primary_key=True)
user_id = models.OneToOneField(User, on_delete=models.CASCADE)
@classmethod
def create(cls, customer_id, user_id):
customer = cls(customer_id=customer_id, user_id=user_id)
return customer
class Subscription(models.Model):
sub_id = models.CharField(max_length=18, unique=True, primary_key=True)
customer_id = models.ForeignKey(Customer, on_delete=models.CASCADE, null=True)
license_key = models.ForeignKey(License, on_delete=models.CASCADE, null=True)
renewal_date = models.DateField()
created_at = models.DateTimeField(auto_now_add=True)
@classmethod
def create(cls, sub_id, customer_id, license_key, renewal_date):
subscription = cls(sub_id=sub_id, customer_id=customer_id, license_key=license_key, renewal_date=renewal_date)
return subscription
| 35.404762 | 118 | 0.755884 | 1,353 | 0.909886 | 0 | 0 | 488 | 0.328178 | 0 | 0 | 0 | 0 |
2c70f5ee336b36bda237297efa046d3ee2a8c63f | 109 | py | Python | job/celery_tasks/demo.py | RockFeng0/flask_demo | cf5631935de628f65c37f32aa7875cecc37f707b | [
"MIT"
] | null | null | null | job/celery_tasks/demo.py | RockFeng0/flask_demo | cf5631935de628f65c37f32aa7875cecc37f707b | [
"MIT"
] | null | null | null | job/celery_tasks/demo.py | RockFeng0/flask_demo | cf5631935de628f65c37f32aa7875cecc37f707b | [
"MIT"
] | 1 | 2020-06-27T14:04:53.000Z | 2020-06-27T14:04:53.000Z | #! python3
# -*- encoding: utf-8 -*-
from job import celery
@celery.task()
def add(x, y):
return x+y
| 9.909091 | 25 | 0.587156 | 0 | 0 | 0 | 0 | 44 | 0.40367 | 0 | 0 | 35 | 0.321101 |
2c712ba3bbbd5dbef50c22c0533027e482779dd3 | 82 | py | Python | config.py | outbreakdm/Dead-Matter-Discord-Bot-Player-Counter | c51335afa52aa01fc3ee037e29bbd79665af516a | [
"MIT"
] | null | null | null | config.py | outbreakdm/Dead-Matter-Discord-Bot-Player-Counter | c51335afa52aa01fc3ee037e29bbd79665af516a | [
"MIT"
] | null | null | null | config.py | outbreakdm/Dead-Matter-Discord-Bot-Player-Counter | c51335afa52aa01fc3ee037e29bbd79665af516a | [
"MIT"
] | null | null | null | BOT_TOKEN = "754938335214174210"
SERVER_ADDRESS = ("192.154.227.44:7797", 27016)
| 20.5 | 47 | 0.743902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.5 |
2c720583f4ebee4e37aa78c9477f1e28a11ae206 | 34,994 | py | Python | mykits/mykit.py | mo-han/mo-han-toolbox | 1f907d42aea9574c34f10474cc7194dbace6ff02 | [
"MIT"
] | 24 | 2019-12-08T03:56:32.000Z | 2021-10-02T13:26:37.000Z | mykits/mykit.py | mo-han/mo-han-toolbox | 1f907d42aea9574c34f10474cc7194dbace6ff02 | [
"MIT"
] | 2 | 2020-04-27T14:20:01.000Z | 2020-07-17T06:05:33.000Z | mykits/mykit.py | mo-han/mo-han-toolbox | 1f907d42aea9574c34f10474cc7194dbace6ff02 | [
"MIT"
] | 10 | 2019-08-06T01:11:28.000Z | 2021-07-19T08:45:11.000Z | #!/usr/bin/env python3
# encoding=utf8
"""This tool heavily depends on `mylib` package, make sure `mylib` folder is in the same path with this tool."""
import cmd
import shlex
from argparse import ArgumentParser, REMAINDER
from collections import defaultdict
from pprint import pprint
from send2trash import send2trash
import mylib.__deprecated__
import mylib.easy
import mylib.ext.ostk
from mylib.__deprecated__ import fs_inplace_rename, fs_inplace_rename_regex, list_files, list_dirs
from mylib.cli import arg_type_pow2, arg_type_range_factory, add_dry_run
from mylib.easy import *
from mylib.easy.argparse import CompactHelpFormatterWithDefaults
from mylib.ext.tricks import Attreebute, eval_or_str, deco_factory_exit_on_keyboard_interrupt
from mylib.ext import fstk, tui
from mylib.ext.fstk import make_path, ctx_pushd
from mylib.ext.ostk import clipboard, set_console_title
rtd = Attreebute() # runtime data
tui_lp = tui.LinePrinter()
an = AttrName()
common_parser_kwargs = {'formatter_class': CompactHelpFormatterWithDefaults}
ap = ArgumentParser(**common_parser_kwargs)
sub = ap.add_subparsers(title='sub-commands')
class HasParser:
parser: ArgumentParser
@classmethod
def run(cls):
pass
def has_parser_done(cls: HasParser):
cls.parser.set_defaults(target=cls.run)
return cls
class MyKitCmd(cmd.Cmd):
last_not_repeat = None
def __init__(self):
super(MyKitCmd, self).__init__()
self.prompt = __class__.__name__ + ':\n'
self._stop = None
self._done = None
def precmd(self, line):
if line:
tui_lp.l(shorter=1)
self._done = False
return line
def postcmd(self, stop, line):
if self._done:
tui_lp.l(shorter=1)
return self._stop
def emptyline(self):
return
def default(self, line):
try:
argv_l = shlex.split(line)
rtd.args = args = ap.parse_args(argv_l)
func = args.target
if func not in [cmd_mode_func, gui_mode]:
self._done = func
return func()
else:
self._done = None
except SystemExit:
pass
def do_quit(self, line):
self._stop = True
do_exit = do_q = do_quit
def do_repeat(self, line):
if self.last_not_repeat:
return self.onecmd(self.last_not_repeat)
do_r = do_repeat
def onecmd(self, line):
super(MyKitCmd, self).onecmd(line)
if self.lastcmd not in ('r', 'repeat'):
self.last_not_repeat = self.lastcmd
def main():
# from mylib.os_util import ensure_sigint_signal
# ensure_sigint_signal()
rtd.args = args = ap.parse_args()
try:
target = args.target
except AttributeError:
target = cmd_mode_func
target()
def add_sub_parser(name: str, aliases: list = None, desc: str = None, target=None) -> ArgumentParser:
aliases = aliases or []
sub_parser = sub.add_parser(name, aliases=aliases, help=desc, description=desc, **common_parser_kwargs)
if target:
sub_parser.set_defaults(target=target)
return sub_parser
def test_only():
print('ok')
test = add_sub_parser('test', [], 'for testing...')
test.set_defaults(target=test_only)
def gui_mode():
pass
def cmd_mode_func():
set_console_title(MyKitCmd.__name__)
MyKitCmd().cmdloop()
cmd_mode = add_sub_parser('cmd', ['cli'], 'command line interactive mode')
cmd_mode.set_defaults(target=cmd_mode_func)
def merge_zip_files_func():
from more_itertools import all_equal
from fs import open_fs
from fs.compress import write_zip
from fs.copy import copy_fs_if_newer
from filetype import guess
def ask_for_dst_path():
return tui.prompt_input('? Input the path of the ZIP file to merge into: ')
def is_zip_file(path: str):
mime = guess(path).mime
if mime == 'application/zip':
return True
else:
print('! Not a ZIP file: {path}')
return False
args = rtd.args
dry_run = args.dry_run
auto_yes = args.yes
src_l = args.src or mylib.ext.ostk.clipboard.list_path()
src_l = [s for s in src_l if is_zip_file(s)]
if len(src_l) < 2:
print(f'! at least 2 zip files')
return
print('# Merge all below ZIP files:')
print('\n'.join(src_l))
dbx_l = [mylib.easy.split_path_dir_base_ext(p) for p in src_l]
if all_equal([d for d, b, x in dbx_l]):
common_dir = dbx_l[0][0]
else:
common_dir = ''
if all_equal([x for d, b, x in dbx_l]):
common_ext = dbx_l[0][-1]
else:
common_ext = ''
if common_dir and common_ext:
common_base = os.path.commonprefix([b for d, b, x in dbx_l]).strip()
if common_base:
tmp_dst = mylib.easy.join_path_dir_base_ext(common_dir, common_base, common_ext)
if auto_yes or tui.prompt_confirm(f'? Merge into ZIP file "{tmp_dst}"', default=True):
dst = tmp_dst
else:
dst = ask_for_dst_path()
else:
dst = ask_for_dst_path()
elif common_dir:
if auto_yes or tui.prompt_confirm(f'? Put merged ZIP file into this dir "{common_dir}"', default=True):
filename = tui.prompt_input(f'? Input the basename of the ZIP file to merge into: ')
dst = fstk.make_path(common_dir, filename)
else:
dst = ask_for_dst_path()
else:
dst = ask_for_dst_path()
if dry_run:
print(f'@ Merge into ZIP file "{dst}"')
return
print(f'* Merge into ZIP file "{dst}"')
with open_fs('mem://tmp') as tmp:
for s in src_l:
with open_fs(f'zip://{s}') as z:
copy_fs_if_newer(z, tmp) # todo: seem check time of ZIP-FS but not files inside
write_zip(tmp, dst)
for s in src_l:
if s == dst:
continue
send2trash(s)
print(f'# Trash <- {s}')
merge_zip_files = add_sub_parser('merge.zip.files', ['mg.zip'], 'merge multiple files', merge_zip_files_func)
add_dry_run(merge_zip_files)
merge_zip_files.add_argument('src', nargs='*')
merge_zip_files.add_argument('-y', '--yes', help='auto confirm yes', action='store_true')
def tag_filter_files_func():
from mylib.easy.filename_tags import EnclosedFilenameTagsSet
args = rtd.args
ext_rm = set(args.X or [])
ext_kp = set(args.x or [])
tag_rm = set(args.T or [])
tag_kp = set(args.t or [])
dry = args.dry_run
rm = defaultdict(set)
kp = defaultdict(set)
for f in fstk.files_from_iter(args.src or mylib.ext.ostk.clipboard.list_path(), recursive=False):
ft = EnclosedFilenameTagsSet(f)
ext = ft.extension
prefix = ft.before_tags
if any(map(ft.has_tag, tag_kp)) or ext in ext_kp:
kp[prefix].add(f)
elif any(map(ft.has_tag, tag_rm)) or ext in ext_rm:
rm[prefix].add(f)
else:
kp[prefix].add(f)
for prefix, rm_set in rm.items():
kp_set = kp.get(prefix, set())
if kp_set:
print(f'@ {prefix}')
for f in kp_set:
print(f'# {f}')
for f in rm_set - kp_set:
print(f'- {f}')
if not dry:
send2trash(f)
tag_filter_files = add_sub_parser('tag.filter.files', [], 'filter files by tags and ext')
tag_ff = tag_filter_files
tag_ff.set_defaults(target=tag_filter_files_func)
tag_ff.add_argument('src', nargs='*')
tag_ff.add_argument('-D', '--dry-run', action='store_true')
tag_ff.add_argument('-X', dest='X', metavar='ext', nargs='*', help='files with these extensions will be removed')
tag_ff.add_argument('-x', dest='x', metavar='ext', nargs='*', help='files with these extensions will be kept')
tag_ff.add_argument('-T', dest='T', metavar='tag', nargs='*', help='files with these tags will be removed')
tag_ff.add_argument('-t', dest='t', metavar='tag', nargs='*', help='files with these tags will be kept')
def catalog_files_by_year_func():
import shutil
args = rtd.args
suffix_l = args.suffix or ['']
dry_run = args.dry_run
files = (p for p in fstk.files_from_iter(args.src or mylib.ext.ostk.clipboard.list_path()) if
any(map(p.endswith, suffix_l)))
for f in files:
dirname, basename = os.path.split(f)
year = re.findall(r'(\d{4})-\d{2}-\d{2}', basename)
if not year:
continue
year = year[0]
new_dir = make_path(dirname, year)
print(f'{new_dir} <- {basename}')
if not dry_run:
with ctx_pushd(new_dir, ensure_dst=True):
shutil.move(f, basename)
catalog_files_by_year = add_sub_parser('catalog.files.year', ['clf.yr'],
'catalog files into sub-folders by year (search ISO 8601 date in filename)')
catalog_files_by_year.set_defaults(target=catalog_files_by_year_func)
catalog_files_by_year.add_argument('-x', '--suffix', metavar='ext', nargs='*')
catalog_files_by_year.add_argument('-D', '--dry-run', action='store_true')
catalog_files_by_year.add_argument('src', nargs='*')
@has_parser_done
class GetCloudflareIP(HasParser):
parser = add_sub_parser('cfip', [], 'get cloudflare ip addresses from hostmonit.com')
parser.add_argument('file', help='write whole data to JSON file', nargs='?')
parser.add_argument('-L', '--list', action='store_true')
parser.add_argument('-P', '--isp', choices=('CM', 'CT', 'CU'))
parser.add_argument('-H', '--hostname')
@classmethod
def run(cls):
from mylib.tools.mykit_parts import list_several_cloudflare_ipaddr
args = rtd.args
file = args.file
as_list = args.list
isp = args.isp
hostname = args.hostname
list_several_cloudflare_ipaddr(file, hostname, as_list, isp)
def video_guess_crf_func():
from mylib.ffmpeg_alpha import guess_video_crf, file_is_video
args = rtd.args
path_l = [path for path in list_files(args.src or clipboard) if file_is_video(path)]
codec = args.codec
work_dir = args.work_dir
redo = args.redo
auto_clean = not args.no_clean
for path in path_l:
tui_lp.l()
tui_lp.p(path)
try:
tui_lp.p(guess_video_crf(src=path, codec=codec, work_dir=work_dir, redo=redo, auto_clean=auto_clean))
except (KeyError, ZeroDivisionError) as e:
tui_lp.p(f'! {repr(e)}')
tui_lp.p(f'- {path}')
video_guess_crf = add_sub_parser('video.crf.guess', ['crf'], 'guess CRF parameter value of video file')
video_guess_crf.set_defaults(target=video_guess_crf_func)
video_guess_crf.add_argument('src', nargs='*')
video_guess_crf.add_argument('-c', '--codec', nargs='?')
video_guess_crf.add_argument('-w', '--work-dir')
video_guess_crf.add_argument('-R', '--redo', action='store_true')
video_guess_crf.add_argument('-L', '--no-clean', action='store_true')
@has_parser_done
class FlatDir(HasParser):
parser = add_sub_parser('flatten-directory', ['flat-dir'])
parser.add_argument('-p', f'--prefix', action='store_true')
parser.add_argument('-D', '--dry-run', action='store_true')
parser.add_argument('src', nargs='*')
@classmethod
def run(cls):
from mylib.tools.mykit_parts import flat_dir
args = rtd.args
prefix = args.prefix
dry_run = args.dry_run
src = args.src or mylib.ext.ostk.clipboard.list_path()
# print(prefix, dry_run, src)
flat_dir(src, prefix, dry_run)
@has_parser_done
class MoveIntoDir(HasParser):
parser = add_sub_parser('move-into-directory', ['mvd'])
parser.add_argument('-D', '--dry-run', action='store_true')
parser.add_argument('-a', '--alias', nargs='*', help='list, show, set or delete dst mapping aliases')
parser.add_argument('-d', '--sub-dir', action='store_true', help='into sub-directory by name')
parser.add_argument('-p', '--pattern')
parser.add_argument('dst', nargs='?', help='dest dir')
parser.add_argument('src', nargs='*')
@classmethod
@deco_factory_exit_on_keyboard_interrupt(2)
def run(cls):
from mylib.tools.mykit_parts import move_into_dir
args = rtd.args
src = args.src or mylib.ext.ostk.clipboard.list_path()
dst = args.dst
alias = args.alias
dry_run = args.dry_run
sub_dir = args.sub_dir
pattern = args.pattern
move_into_dir(src, dst, pattern, alias, dry_run, sub_dir)
def tail_filter_files_func():
from mylib.ext.ostk import filter_filename_tail, join_filename_tail
args = rtd.args
tk = set(args.tails_keep or [])
xk = set(args.extensions_keep or [])
tg = set(args.tails_gone or [])
xg = set(args.extensions_gone or [])
dry = args.dry_run
src = list_files(args.src or clipboard, recursive=False)
from collections import defaultdict
keep = defaultdict(list)
gone = defaultdict(list)
for dn, fn, tail, ext in filter_filename_tail(src, tk | tg, tk, xk):
keep[(dn, fn)].append((tail, ext))
for dn, fn, tail, ext in filter_filename_tail(src, tk | tg, tg, xg):
gone[(dn, fn)].append((tail, ext))
for g in gone:
if g in keep:
dn, fn = g
tui_lp.l()
print(f'* {os.path.join(dn, fn)}')
for tail, ext in keep[g]:
print(f'@ {tail} {ext}')
for tail, ext in gone[g]:
print(f'- {tail} {ext}')
if not dry:
send2trash(join_filename_tail(dn, fn, tail, ext))
tail_filter_files = add_sub_parser('tail.filter.files', [], 'filter files by filename tails and extensions')
tail_ff = tail_filter_files
tail_ff.set_defaults(target=tail_filter_files_func)
tail_ff.add_argument('-t', '--tails-keep', nargs='*', metavar='tail', help='keep files with these tails')
tail_ff.add_argument('-x', '--extensions-keep', nargs='*', metavar='ext', help='keep files with these extensions')
tail_ff.add_argument('-T', '--tails-gone', nargs='*', metavar='tail', help='remove files with these tails')
tail_ff.add_argument('-X', '--extensions-gone', nargs='*', metavar='ext', help='remove files with these extensions')
tail_ff.add_argument('-D', '--dry-run', action='store_true')
tail_ff.add_argument('src', nargs='*')
def cookies_write_func():
import json
from mylib.web_client import convert_cookies_json_to_netscape
args = rtd.args
files = args.file or list_files(clipboard, recursive=False)
verbose = args.verbose
for fp in files:
tui_lp.l()
print(f'* {fp}')
data = input('# input cookies data, or copy data to clipboard and press enter:\n')
if not data:
print(f'# empty input, paste from clipboard')
data = clipboard.get()
if verbose:
pprint(data)
try:
j = json.loads(data)
c = convert_cookies_json_to_netscape(j, disable_filepath=True)
except json.decoder.JSONDecodeError:
c = data
if verbose:
pprint(c)
with open(fp, 'w') as f:
f.write(c)
cookies_write = add_sub_parser('cookies.write', ['cwr'], 'write cookies file')
cookies_write.set_defaults(target=cookies_write_func)
cookies_write.add_argument('file', nargs='*')
cookies_write.add_argument('-v', '--verbose', action='store_true')
def ccj_func():
from mylib.web_client import convert_cookies_file_json_to_netscape
files = rtd.args.file or list_files(clipboard, recursive=False)
for fp in files:
print(f'* {fp}')
convert_cookies_file_json_to_netscape(fp)
cookies_conv_json = add_sub_parser('cookies.conv.json', ['ccj'], 'convert .json cookies file')
cookies_conv_json.set_defaults(target=ccj_func)
cookies_conv_json.add_argument('file', nargs='*')
def ffmpeg_img2vid_func():
from mylib.ffmpeg_alpha import FFmpegRunnerAlpha, FFmpegArgsList, parse_kw_opt_str
ff = FFmpegRunnerAlpha(banner=False, overwrite=True)
ff.logger.setLevel('INFO')
args = rtd.args
images = args.images
output = args.output or f'{os.path.split(os.path.abspath(images))[0]}.mp4'
res_fps = args.res_fps
keywords = args.keyword or ()
ffmpeg_options = args.opt or ()
output_args = FFmpegArgsList()
if os.path.isdir(os.path.dirname(images)):
images_l = [images]
else:
images_l = [os.path.join(folder, images) for folder in mylib.ext.ostk.clipboard.list_path() if
os.path.isdir(folder)]
for i in images_l:
if output in ('mp4', 'webm'):
o = f'{os.path.realpath(os.path.dirname(i))}.{output}'
else:
o = output
for kw in keywords:
output_args.add(*parse_kw_opt_str(kw))
output_args.add(*ffmpeg_options)
try:
tui_lp.l()
print(i)
print(o)
ff.img2vid(i, res_fps, o, output_args)
except KeyboardInterrupt:
exit(2)
ffmpeg_img2vid = add_sub_parser('ffmpeg.img2vid', ['img2vid'], 'convert images (frames) into video using ffmpeg')
ffmpeg_img2vid.set_defaults(target=ffmpeg_img2vid_func)
ffmpeg_img2vid.add_argument('-i', '--images', help='input images, e.g. "%%03d.jpg"', required=True)
ffmpeg_img2vid.add_argument('-o', '--output', help='output video', nargs='?')
ffmpeg_img2vid.add_argument('-r', '--res-fps', metavar='WxH@FPS', required=True)
ffmpeg_img2vid.add_argument('-k', '--keyword', nargs='*', help='')
ffmpeg_img2vid.add_argument('opt', help='ffmpeg options (better insert -- before them)', nargs='*')
def ffmpeg_func():
from mylib.ffmpeg_alpha import kw_video_convert
args = rtd.args
source = args.source or clipboard
keywords = args.keywords or ()
video_filters = args.video_filters
cut_points = args.cut_points
output_path = args.output_path
overwrite = args.overwrite
redo_origin = args.redo_origin
verbose = args.verbose
dry_run = args.dry_run
opts = args.opts
if verbose:
print(args)
for filepath in mylib.__deprecated__.list_files(source, recursive=False):
kw_video_convert(filepath, keywords=keywords, vf=video_filters, cut_points=cut_points, dest=output_path,
overwrite=overwrite, redo=redo_origin, verbose=verbose, dry_run=dry_run, ffmpeg_opts=opts)
ffmpeg = add_sub_parser('wrap.ffmpeg', ['ffmpeg', 'ff'], 'convert video file using ffmpeg')
ffmpeg.set_defaults(target=ffmpeg_func)
ffmpeg.add_argument('-s', '--source', nargs='*', metavar='path', help='if omitted, will try paths in clipboard')
ffmpeg.add_argument('-k', '--keywords', metavar='kw', nargs='*')
ffmpeg.add_argument('-vf', '--video-filters', nargs='*')
ffmpeg.add_argument('-t', '--time-cut', dest='cut_points', metavar='ts', nargs='*')
ffmpeg.add_argument('-o', '--output-path')
ffmpeg.add_argument('-O', '--overwrite', action='store_true')
ffmpeg.add_argument('-R', '--redo-origin', action='store_true')
ffmpeg.add_argument('-v', '--verbose', action='count', default=0)
ffmpeg.add_argument('-D', '--dry-run', action='store_true')
ffmpeg.add_argument('opts', nargs='*', help='ffmpeg options (insert -- before opts)')
def ffprobe_func():
from ffmpeg import probe
from pprint import pprint
file = rtd.args.file
ss = rtd.args.select_streams
if not file:
file = mylib.ext.ostk.clipboard.list_path()[0]
if ss:
pprint(probe(file, select_streams=ss))
else:
pprint(probe(file))
ffprobe = add_sub_parser('wrap.ffprobe', ['ffprobe', 'ffp'], 'json format ffprobe on a file')
ffprobe.set_defaults(target=ffprobe_func)
ffprobe.add_argument('-s', '--select-streams')
ffprobe.add_argument('file', nargs='?')
def file_type_func():
from filetype import guess
files = rtd.args.file
if rtd.args.print_no_path:
fmt = '{type}'
else:
fmt = '{type} ({file})'
if not files:
files = mylib.ext.ostk.clipboard.list_path()(exist_only=True)
for f in files:
try:
print(fmt.format(type=guess(f).mime, file=f))
except AttributeError:
print('N/A')
file_type = add_sub_parser('filetype', ['ftype', 'ft'], 'get file type by path')
file_type.set_defaults(target=file_type_func)
file_type.add_argument('file', nargs='*')
file_type.add_argument('-P', '--print-no-path', action='store_true')
def pip2pi_func():
from mylib.pip2pi_x import libpip2pi_commands_x
import sys
argv0 = ' '.join(sys.argv[:2]) + ' --'
sys.argv = [argv0] + rtd.args.arg
libpip2pi_commands_x.pip2pi(['pip2pi'] + rtd.args.arg)
pip2pi = add_sub_parser('pip2pi', [], 'modified pip2pi (from pip2pi)')
pip2pi.set_defaults(target=pip2pi_func)
pip2pi.add_argument('arg', nargs='*', help='arguments propagated to pip2pi, put a -- before them')
def dir2pi_func():
from mylib.pip2pi_x import libpip2pi_commands_x
import sys
argv0 = ' '.join(sys.argv[:2]) + ' --'
sys.argv = [argv0] + rtd.args.arg
libpip2pi_commands_x.dir2pi(['dir2pi'] + rtd.args.arg)
dir2pi = add_sub_parser('dir2pi', [], 'modified dir2pi (from pip2pi)')
dir2pi.set_defaults(target=dir2pi_func)
dir2pi.add_argument('arg', nargs='*', help='arguments propagated to dir2pi, put a -- before them')
def ytdl_func():
from mylib.youtube_dl_x import youtube_dl_main_x
import sys
argv0 = ' '.join(sys.argv[:2]) + ' --'
sys.argv = [argv0] + rtd.args.param
youtube_dl_main_x()
ytdl = add_sub_parser('ytdl', [], 'youtube-dl with modifications: [iwara.tv] fix missing uploader')
ytdl.set_defaults(target=ytdl_func)
ytdl.add_argument('param', nargs='*', help='argument(s) propagated to youtube-dl, better put a -- before it')
def regex_rename_func():
args = rtd.args
source = args.source
recursive = args.recursive
pattern = args.pattern
replace = args.replace
only_basename = args.only_basename
dry_run = args.dry_run
only_dirs = args.only_dirs
if only_dirs:
src_l = list_dirs(source or clipboard, recursive=recursive)
else:
src_l = list_files(source or clipboard, recursive=recursive)
for src in src_l:
try:
fs_inplace_rename_regex(src, pattern, replace, only_basename, dry_run)
except OSError as e:
print(repr(e))
regex_rename = add_sub_parser('rename.regex', ['regren', 'rern', 'rrn'], 'regex rename file(s) or folder(s)')
regex_rename.set_defaults(target=regex_rename_func)
regex_rename.add_argument('-B', '-not-only-basename', dest='only_basename', action='store_false')
regex_rename.add_argument('-D', '--dry-run', action='store_true')
regex_rename.add_argument('-d', '--only-dirs', action='store_true')
regex_rename.add_argument('-s', '--source')
regex_rename.add_argument('-r', '--recursive', action='store_true')
regex_rename.add_argument('pattern')
regex_rename.add_argument('replace')
def rename_func():
args = rtd.args
source = args.source
recursive = args.recursive
pattern = args.pattern
replace = args.replace
only_basename = args.only_basename
dry_run = args.dry_run
only_dirs = args.only_dirs
if only_dirs:
src_l = list_dirs(source or clipboard, recursive=recursive)
else:
src_l = list_files(source or clipboard, recursive=recursive)
# print(source)
# print(src_l)
for src in src_l:
try:
fs_inplace_rename(src, pattern, replace, only_basename, dry_run)
except OSError as e:
print(repr(e))
rename = add_sub_parser('rename', ['ren'], 'rename file(s) or folder(s)')
rename.set_defaults(target=rename_func)
rename.add_argument('-B', '-not-only-basename', dest='only_basename', action='store_false')
rename.add_argument('-D', '--dry-run', action='store_true')
rename.add_argument('-d', '--only-dirs', action='store_true')
rename.add_argument('-s', '--source')
rename.add_argument('-r', '--recursive', action='store_true')
rename.add_argument('pattern')
rename.add_argument('replace')
def run_from_lines_func():
import os
args = rtd.args
source = args.source
dry_run = args.dry_run
cmd_fmt = ' '.join(args.command) or input('< ')
if '{}' in cmd_fmt:
cmd_fmt = cmd_fmt.replace('{}', '{line}')
if '{line}' not in cmd_fmt:
cmd_fmt += ' "{line}"'
print('>', cmd_fmt, file=sys.stderr)
if source == ':clipboard.path':
lines = mylib.ext.ostk.clipboard.list_path()
elif source == ':clipboard':
lines = str(clipboard.get()).splitlines()
elif source:
with open(source, 'r') as fd:
lines = fd.readlines()
else:
lines = []
try:
for line in lines:
line = line.strip()
if not line:
continue
command = cmd_fmt.format(line=line.strip())
print('#', command, file=sys.stderr)
if not dry_run:
os.system(command)
except KeyboardInterrupt:
sys.exit(2)
run_from_lines = add_sub_parser(
'run.from.lines', ['run.lines', 'rl'],
'given lines from file, clipboard, etc. formatted command will be executed for each of the line')
run_from_lines.set_defaults(target=run_from_lines_func)
run_from_lines.add_argument('-s', '--source', help='":clipboard", ":clipboard.path", or path of file (text lines)')
run_from_lines.add_argument('command', nargs='*')
run_from_lines.add_argument('-D', '--dry-run', action='store_true')
def dukto_x_func():
from mylib.dukto import run, copy_recv_text, config_at
from threading import Thread
from queue import Queue
args = rtd.args
config_at.server.text.queue = Queue()
config_at.server.msg_echo = args.echo
t = Thread(target=copy_recv_text, args=(args.file, args.clipboard))
t.daemon = True
ndrop_args = rtd.args.ndrop_args
while ndrop_args and ndrop_args[0] == '--':
ndrop_args.pop(0)
sys.argv[0] = 'mykit dukto-x'
sys.argv[1:] = ndrop_args
t.start()
run()
dukto_x = add_sub_parser('dukto-x', ['dukto'],
'extended dukto server, remainder arguments conform to ndrop')
dukto_x.set_defaults(target=dukto_x_func)
dukto_x.add_argument('-f', '--copy-text-to-file', metavar='file', dest='file')
dukto_x.add_argument('-c', '--copy-text-to-clipboard', action='store_true', dest='clipboard')
dukto_x.add_argument('-e', '--echo', action='store_true')
dukto_x.add_argument('ndrop_args', metavar='[--] arguments for ndrop', nargs=REMAINDER)
def url_from_clipboard():
from expykit import os
from mylib.easy.text import regex_find
from html import unescape
args = rtd.args
pattern = args.pattern
try:
t = os.clipboard.get_html()
except AttributeError:
t = os.clipboard.get()
if not pattern:
urls = []
elif pattern == 'ed2k':
p = r'ed2k://[^/]+/'
urls = regex_find(p, t, dedup=True)
elif pattern == 'magnet':
p = r'magnet:[^\s"]+'
urls = regex_find(p, unescape(t), dedup=True)
elif pattern == 'iwara':
from mylib.sites.iwara import find_video_url
urls = find_video_url(t)
elif pattern in ('pornhub', 'ph'):
from mylib.sites.pornhub import find_url_in_text
urls = find_url_in_text(t)
elif pattern in ('youtube', 'ytb'):
from mylib.sites.youtube import find_url_in_text
urls = find_url_in_text(t)
elif pattern in ('bilibili', 'bili'):
from mylib.sites.bilibili.__to_be_deprecated__ import find_url_in_text
urls = find_url_in_text(t)
elif pattern in ('hc.fyi', 'hentai.cafe', 'hentaicafe'):
p = r'https://hentai.cafe/hc.fyi/\d+'
urls = regex_find(p, t, dedup=True)
else:
urls = regex_find(pattern, t)
urls = '\r\n'.join(urls)
os.clipboard.clear()
os.clipboard.set(urls)
print(urls)
clipboard_findurl = add_sub_parser('clipboard.findurl', ['cb.url', 'cburl'],
'find URLs from clipboard, then copy found URLs back to clipboard')
clipboard_findurl.set_defaults(target=url_from_clipboard)
clipboard_findurl.add_argument('pattern', help='URL pattern, or website name')
def clipboard_rename_func():
from mylib.gui_old import rename_dialog
for f in mylib.ext.ostk.clipboard.list_path():
rename_dialog(f)
clipboard_rename = add_sub_parser('clipboard.rename', ['cb.ren', 'cbren'], 'rename files in clipboard')
clipboard_rename.set_defaults(target=clipboard_rename_func)
def potplayer_rename_func():
from mylib.enchant.potplayer import PotPlayerKit
args = rtd.args
PotPlayerKit().rename_file_gui(alt_tab=args.no_keep_front)
potplayer_rename = add_sub_parser('potplayer.rename', ['pp.ren', 'ppren'], 'rename media file opened in PotPlayer')
potplayer_rename.set_defaults(target=potplayer_rename_func)
potplayer_rename.add_argument('-F', '--no-keep-front', action='store_true', help='do not keep PotPlayer in front')
def bilibili_download_func():
from mylib.sites.bilibili.__to_be_deprecated__ import download_bilibili_video
args = rtd.args
if args.verbose:
print(args)
download_bilibili_video(**vars(args))
bilibili_download = add_sub_parser('bilibili.download', ['bldl'], 'bilibili video downloader (source-patched you-get)')
bilibili_download.set_defaults(target=bilibili_download_func)
bilibili_download.add_argument('url')
bilibili_download.add_argument('-v', '--verbose', action='store_true')
bilibili_download.add_argument('-f', '--force', action='store_true')
bilibili_download.add_argument('-c', '--cookies', metavar='FILE')
bilibili_download.add_argument('-i', '--info', action='store_true')
bilibili_download.add_argument('-l', '--playlist', action='store_true', help='BUGGY! DO NOT USE!')
bilibili_download.add_argument('-o', '--output', metavar='dir')
bilibili_download.add_argument('-p', '--parts', nargs='*', metavar='N')
bilibili_download.add_argument('-q', '--qn-want', type=int, metavar='N',
help='120, 116, 112, 80, 74, 64, 48, 32, 16, 0')
bilibili_download.add_argument('-Q', '--qn-max', type=int, metavar='N', default=116,
help='max qn (quality number), default to 116 (1080P60), not 120 (4K).')
bilibili_download.add_argument('-C', '--no-caption', dest='caption', action='store_false')
bilibili_download.add_argument('-A', '--no-moderate-audio', dest='moderate_audio', action='store_false',
help='by default the best quality audio is NOT used, '
'instead, a moderate quality (~128kbps) is chose, which is good enough. '
'this option force choosing the best quality audio stream')
def json_edit_func():
from mylib.ext.fstk import write_json_file
from mylib.ext.fstk import read_json_file
args = rtd.args
file = args.file or list_files(clipboard)[0]
indent = args.indent
delete = args.delete
item_l = args.item
d = read_json_file(file)
if delete:
def handle(key, value):
if key in d:
if value:
if d[key] == value:
del d[key]
else:
del d[key]
else:
def handle(key, value):
d[key] = value
for item in item_l:
k, v = map(eval_or_str, item.split('=', maxsplit=1))
handle(k, v)
write_json_file(file, d, indent=indent)
json_edit = add_sub_parser('json.edit', ['jse'], 'edit JSON file')
json_edit.set_defaults(target=json_edit_func)
json_edit.add_argument('-f', '--file', nargs='?')
json_edit.add_argument('-i', '--indent', type=int, default=4)
json_edit.add_argument('-d', '--delete', action='store_true')
json_edit.add_argument('item', nargs='+')
def json_key_func():
from mylib.ext.fstk import read_json_file
args = rtd.args
d = read_json_file(args.file)
print(d[args.key])
json_key = add_sub_parser('json.getkey', ['jsk'], 'find in JSON file by key')
json_key.set_defaults(target=json_key_func)
json_key.add_argument('file', help='JSON file to query')
json_key.add_argument('key', help='query key')
def update_json_file():
from mylib.ext.fstk import write_json_file
from mylib.ext.fstk import read_json_file
args = rtd.args
old, new = args.old, args.new
d = read_json_file(old)
d.update(read_json_file(new))
write_json_file(old, d, indent=args.indent)
json_update = add_sub_parser('json.update', ['jsup'], 'update <old> JSON file with <new>')
json_update.set_defaults(target=update_json_file)
json_update.add_argument('old', help='JSON file with old data')
json_update.add_argument('new', help='JSON file with new data')
json_update.add_argument('-t', '--indent', type=int, default=4, metavar='N')
def view_similar_images():
from mylib.picture import view_similar_images_auto
args = rtd.args
kwargs = {
'thresholds': args.thresholds,
'hashtype': args.hashtype,
'hashsize': args.hashsize,
'trans': args.transpose,
'dryrun': args.dry_run,
}
dir_l = (p for p in (args.dir or mylib.ext.ostk.clipboard.list_path()) if os.path.isdir(p))
if dir_l:
for d in dir_l:
with ctx_pushd(d):
view_similar_images_auto(**kwargs)
else:
view_similar_images_auto(**kwargs)
img_sim_view = add_sub_parser('img.sim.view', ['vsi'], 'view similar images in current working directory')
img_sim_view.set_defaults(target=view_similar_images)
img_sim_view.add_argument('dir', nargs='*')
img_sim_view.add_argument(
'-t', '--thresholds', type=arg_type_range_factory(float, '0<x<=1'), nargs='+', metavar='N',
help='(multiple) similarity thresholds')
img_sim_view.add_argument(
'-H', '--hashtype', type=str, choices=[s + 'hash' for s in ('a', 'd', 'p', 'w')], help='image hash type')
img_sim_view.add_argument(
'-s', '--hashsize', type=arg_type_pow2, metavar='N',
help='the side size of the image hash square, must be a integer power of 2')
img_sim_view.add_argument(
'-T', '--no-transpose', action='store_false', dest='transpose',
help='do not find similar images for transposed variants (rotated, flipped)')
img_sim_view.add_argument(
'-D', '--dry-run', action='store_true', help='find similar images, but without viewing them')
def move_ehviewer_images():
from mylib.sites.ehentai import ehviewer_images_catalog
args = rtd.args
ehviewer_images_catalog(args.src or mylib.ext.ostk.clipboard.list_path()[0],
dry_run=args.dry_run, db_json_path=args.db_json or 'ehdb.json')
ehv_img_mv = add_sub_parser('ehv.img.mv', ['ehvmv'],
'move ehviewer downloaded images into folders')
ehv_img_mv.set_defaults(target=move_ehviewer_images)
ehv_img_mv.add_argument('-D', '--dry-run', action='store_true')
ehv_img_mv.add_argument('-j', '--db-json')
ehv_img_mv.add_argument('-s', '--src', nargs='?')
if __name__ == '__main__':
main()
| 36.225673 | 119 | 0.655884 | 3,566 | 0.101903 | 0 | 0 | 2,294 | 0.065554 | 0 | 0 | 7,342 | 0.209807 |
2c76dcc1048590c3fb8ac98732063ed2e3b0d867 | 8,282 | py | Python | tests/audit_task_test.py | danieldiamond/dbt-sugar | 0645722cb52cf9eb685d65b556beb9b4c2d3cbcf | [
"Apache-2.0"
] | 94 | 2020-12-21T20:00:38.000Z | 2022-03-31T13:53:00.000Z | tests/audit_task_test.py | danieldiamond/dbt-sugar | 0645722cb52cf9eb685d65b556beb9b4c2d3cbcf | [
"Apache-2.0"
] | 410 | 2020-12-19T09:25:59.000Z | 2022-03-23T04:20:40.000Z | tests/audit_task_test.py | danieldiamond/dbt-sugar | 0645722cb52cf9eb685d65b556beb9b4c2d3cbcf | [
"Apache-2.0"
] | 16 | 2021-03-29T23:11:35.000Z | 2022-03-10T11:27:26.000Z | from pathlib import Path
from unittest.mock import call
import pytest
from dbt_sugar.core.clients.dbt import DbtProfile
from dbt_sugar.core.config.config import DbtSugarConfig
from dbt_sugar.core.flags import FlagParser
from dbt_sugar.core.main import parser
from dbt_sugar.core.task.audit import AuditTask
from dbt_sugar.core.task.base import COLUMN_NOT_DOCUMENTED
FIXTURE_DIR = Path(__file__).resolve().parent
def __init_descriptions(datafiles):
flag_parser = FlagParser(parser)
config_filepath = Path(FIXTURE_DIR).joinpath("sugar_config.yml")
flag_parser.consume_cli_arguments(
test_cli_args=[
"audit",
"--config-path",
str(config_filepath),
]
)
sugar_config = DbtSugarConfig(flag_parser)
sugar_config.load_config()
profile = DbtProfile(
flags=flag_parser,
profile_name="dbt_sugar_test",
target_name=str(),
profiles_dir=Path(datafiles),
)
profile.read_profile()
audit_task = AuditTask(flag_parser, FIXTURE_DIR, sugar_config=sugar_config, dbt_profile=profile)
audit_task.dbt_definitions = {"columnA": "descriptionA", "columnB": "descriptionB"}
audit_task.repository_path = Path("tests/test_dbt_project/")
return audit_task
@pytest.mark.parametrize(
"dbt_definitions, result",
[
pytest.param(
{"columnA": "descriptionA", "columnB": "descriptionB"},
"100.0",
id="all_columns_documented",
),
pytest.param(
{"columnA": COLUMN_NOT_DOCUMENTED, "columnB": COLUMN_NOT_DOCUMENTED},
"0.0",
id="none_columns_documented",
),
pytest.param(
{"columnA": "descriptionA", "columnB": COLUMN_NOT_DOCUMENTED},
"50.0",
id="half_columns_documented",
),
],
)
@pytest.mark.datafiles(FIXTURE_DIR)
def test_get_project_total_test_coverage(datafiles, dbt_definitions, result):
audit_task = __init_descriptions(datafiles)
audit_task.dbt_definitions = dbt_definitions
assert audit_task.get_project_total_test_coverage() == result
@pytest.mark.parametrize(
"failures, total, result",
[
pytest.param(
0,
0,
"0.0",
id="calculate_failures_with_0_failures_and_total",
),
pytest.param(
8,
10,
"20.0",
id="calculate_failures",
),
pytest.param(
0,
10,
"100.0",
id="calculate_failures_with_0_failures",
),
],
)
@pytest.mark.datafiles(FIXTURE_DIR)
def test_calculate_coverage_percentage(datafiles, failures, total, result):
audit_task = __init_descriptions(datafiles)
assert audit_task.calculate_coverage_percentage(misses=failures, total=total) == result
@pytest.mark.parametrize(
"data, total, result",
[
pytest.param(
[],
"0.0",
{},
id="check_results_with_data_being_empty",
),
pytest.param(
["column_A"],
"10.0",
{"column_A": "", "": "", "Total": "10.0"},
id="check_results_with_one_data_element",
),
pytest.param(
["column_A", "column_B"],
"10.0",
{"column_A": "", "column_B": "", "": "", "Total": "10.0"},
id="check_results_with_more_than_one_data_element",
),
],
)
@pytest.mark.datafiles(FIXTURE_DIR)
def test_print_nicely_the_data(datafiles, data, total, result):
audit_task = __init_descriptions(datafiles)
assert audit_task.print_nicely_the_data(data=data, total=total) == result
@pytest.mark.parametrize(
"dbt_tests, model_name, call_input",
[
pytest.param(
{
"dim_company": [
{"name": "id", "tests": []},
{"name": "name", "tests": []},
{"name": "age", "tests": []},
{"name": "address", "tests": ["not_null"]},
{"name": "salary", "tests": ["unique"]},
],
"stg_customers": [{"name": "customer_id", "tests": ["unique", "not_null"]}],
},
"dim_company",
[
call(
columns=["Untested Columns", "% coverage"],
data={"age": "", "id": "", "name": "", "": "", "Total": "40.0"},
title="Test Coverage",
)
],
id="check_test_coverage_calculation",
),
],
)
@pytest.mark.datafiles(FIXTURE_DIR)
def test_get_model_test_coverage(datafiles, mocker, dbt_tests, model_name, call_input):
create_table = mocker.patch("dbt_sugar.core.task.audit.AuditTask.create_table")
audit_task = __init_descriptions(datafiles)
audit_task.model_name = model_name
audit_task.dbt_tests = dbt_tests
audit_task.get_model_test_coverage()
create_table.assert_has_calls(call_input)
@pytest.mark.parametrize(
"dbt_tests, call_input",
[
pytest.param(
{
"dim_company": [
{"name": "id", "tests": []},
{"name": "name", "tests": []},
{"name": "age", "tests": []},
{"name": "address", "tests": ["not_null"]},
{"name": "salary", "tests": ["unique"]},
],
"stg_customers": [{"name": "customer_id", "tests": ["unique", "not_null"]}],
},
[
call(
columns=["Model Name", "% coverage"],
data={"dim_company": "40.0", "stg_customers": "100.0", "": "", "Total": "50.0"},
title="Test Coverage",
)
],
id="check_test_coverage_calculation",
),
],
)
@pytest.mark.datafiles(FIXTURE_DIR)
def test_get_project_test_coverage(datafiles, mocker, dbt_tests, call_input):
create_table = mocker.patch("dbt_sugar.core.task.audit.AuditTask.create_table")
audit_task = __init_descriptions(datafiles)
audit_task.dbt_tests = dbt_tests
audit_task.get_project_test_coverage()
create_table.assert_has_calls(call_input)
@pytest.mark.parametrize(
"model_content, model_name, call_input",
[
pytest.param(
{
"version": 2,
"models": [
{
"name": "dim_company",
"description": "aa.",
"columns": [
{"name": "id", "description": "No description for this column."},
{"name": "name", "description": "No description for this column."},
{"name": "age", "description": "No description for this column."},
{
"name": "address",
"description": "No description for this column.",
"tests": ["not_null"],
},
{"name": "salary", "description": "hey.", "tests": ["unique"]},
],
}
],
},
"dim_company",
[
call(
columns=["Undocumented Columns", "% coverage"],
data={"id": "", "name": "", "age": "", "address": "", "": "", "Total": "20.0"},
title="Documentation Coverage",
)
],
id="check_column_description_coverage_calculation",
),
],
)
@pytest.mark.datafiles(FIXTURE_DIR)
def test_get_model_column_description_coverage(
datafiles, mocker, model_content, model_name, call_input
):
audit_task = __init_descriptions(datafiles)
audit_task.get_model_column_description_coverage()
create_table = mocker.patch("dbt_sugar.core.task.audit.AuditTask.create_table")
audit_task = __init_descriptions(datafiles)
audit_task.model_content = model_content
audit_task.model_name = model_name
audit_task.get_model_column_description_coverage()
create_table.assert_has_calls(call_input)
| 33.128 | 100 | 0.546245 | 0 | 0 | 0 | 0 | 6,996 | 0.844723 | 0 | 0 | 2,151 | 0.25972 |
2c77779e7a9b8341a00c99ca1a39c874946c475d | 624 | py | Python | src/15-led-compass/plt_mag_calibration.py | Petrus97/discovery | 0d1039a1d3e8eee791a8914399de5a3904102729 | [
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null | src/15-led-compass/plt_mag_calibration.py | Petrus97/discovery | 0d1039a1d3e8eee791a8914399de5a3904102729 | [
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null | src/15-led-compass/plt_mag_calibration.py | Petrus97/discovery | 0d1039a1d3e8eee791a8914399de5a3904102729 | [
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null | import csv
import math
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import sys
# apply plot style
sns.set()
x = []
y = []
with open(sys.argv[1], 'r') as f:
rows = csv.reader(f, delimiter='\t')
for row in rows:
# discard rows that are missing data
if len(row) != 3 or not row[0] or not row[1]:
continue
x.append(int(row[0]))
y.append(int(row[1]))
r = math.ceil(max(max(np.abs(x)), max(np.abs(y))) / 100) * 100
plt.plot(x, y, '.')
plt.xlim(-r, r)
plt.ylim(-r, r)
plt.gca().set_aspect(1)
plt.tight_layout()
plt.savefig('emf.svg')
plt.close | 18.352941 | 62 | 0.604167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.116987 |
2c7b2b9359b867f746d77215054dee5fb93437cf | 4,453 | py | Python | altair_transform/utils/_evaljs.py | jacobcolbert/altair-transform | b2c8f64e79c614ff431bd416f89f9c195fbd3f55 | [
"MIT"
] | 3 | 2019-11-13T22:12:57.000Z | 2019-11-13T22:17:01.000Z | altair_transform/utils/_evaljs.py | jacobcolbert/altair-transform | b2c8f64e79c614ff431bd416f89f9c195fbd3f55 | [
"MIT"
] | null | null | null | altair_transform/utils/_evaljs.py | jacobcolbert/altair-transform | b2c8f64e79c614ff431bd416f89f9c195fbd3f55 | [
"MIT"
] | null | null | null | """Functionality to evaluate contents of the ast"""
from functools import singledispatch, wraps
import operator
from typing import Any, Union
from altair_transform.utils import ast, Parser
__all__ = ['evaljs']
def evaljs(expression: Union[str, ast.Expr], namespace: dict = None) -> Any:
"""Evaluate a javascript expression, optionally with a namespace."""
if isinstance(expression, str):
parser = Parser()
expression = parser.parse(expression)
return visit(expression, namespace or {})
@singledispatch
def visit(obj: Any, namespace: dict) -> Any:
return obj
@visit.register
def _visit_expr(obj: ast.Expr, namespace: dict) -> Any:
return obj.value
@visit.register
def _visit_binop(obj: ast.BinOp, namespace: dict) -> Any:
if obj.op not in BINARY_OPERATORS:
raise NotImplementedError(f"Binary Operator A {obj.op} B")
op = BINARY_OPERATORS[obj.op]
return op(visit(obj.lhs, namespace), visit(obj.rhs, namespace))
@visit.register
def _visit_unop(obj: ast.UnOp, namespace: dict) -> Any:
if obj.op not in UNARY_OPERATORS:
raise NotImplementedError(f"Unary Operator {obj.op}x")
op = UNARY_OPERATORS[obj.op]
return op(visit(obj.rhs, namespace))
@visit.register
def _visit_ternop(obj: ast.TernOp, namespace: dict) -> Any:
if obj.op not in TERNARY_OPERATORS:
raise NotImplementedError(
f"Ternary Operator A {obj.op[0]} B {obj.op[1]} C")
op = TERNARY_OPERATORS[obj.op]
return op(visit(obj.lhs, namespace),
visit(obj.mid, namespace),
visit(obj.rhs, namespace))
@visit.register
def _visit_number(obj: ast.Number, namespace: dict) -> Any:
return obj.value
@visit.register
def _visit_string(obj: ast.String, namespace: dict) -> Any:
return obj.value
@visit.register
def _visit_global(obj: ast.Global, namespace: dict) -> Any:
if obj.name not in namespace:
raise NameError("{0} is not a valid name".format(obj.name))
return namespace[obj.name]
@visit.register
def _visit_name(obj: ast.Name, namespace: dict) -> Any:
return obj.name
@visit.register
def _visit_list(obj: ast.List, namespace: dict) -> Any:
return [visit(entry, namespace) for entry in obj.entries]
@visit.register
def _visit_object(obj: ast.Object, namespace: dict) -> Any:
def _visit(entry):
if isinstance(entry, tuple):
return tuple(visit(e, namespace) for e in entry)
if isinstance(entry, ast.Name):
return (visit(entry, namespace),
visit(ast.Global(entry.name), namespace))
return dict(_visit(entry) for entry in obj.entries)
@visit.register
def _visit_attr(obj: ast.Attr, namespace: dict) -> Any:
obj_ = visit(obj.obj, namespace)
attr = visit(obj.attr, namespace)
if isinstance(obj_, dict):
return obj_[attr]
return getattr(obj_, attr)
@visit.register
def _visit_item(obj: ast.Item, namespace: dict) -> Any:
obj_ = visit(obj.obj, namespace)
item = visit(obj.item, namespace)
if isinstance(obj_, list) and isinstance(item, float):
item = int(item)
return obj_[item]
@visit.register
def _visit_func(obj: ast.Func, namespace: dict) -> Any:
func = visit(obj.func, namespace)
args = [visit(arg, namespace) for arg in obj.args]
return func(*args)
def int_inputs(func):
@wraps(func)
def wrapper(*args):
return float(func(*map(int, args)))
return wrapper
@int_inputs
def zerofill_rshift(lhs: int, rhs: int) -> int:
if lhs < 0:
lhs = lhs + 0x100000000
return lhs >> rhs
# TODO: do implicit type conversions ugh...
UNARY_OPERATORS = {
'~': int_inputs(operator.inv),
'-': operator.neg,
'+': operator.pos,
'!': operator.not_,
}
BINARY_OPERATORS = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"**": operator.pow,
"%": operator.mod,
"&": int_inputs(operator.and_),
"|": int_inputs(operator.or_),
"^": int_inputs(operator.xor),
"<<": int_inputs(operator.lshift),
">>": int_inputs(operator.rshift),
">>>": zerofill_rshift,
"<": operator.lt,
"<=": operator.le,
">": operator.gt,
">=": operator.ge,
"==": operator.eq,
"===": operator.eq,
"!=": operator.ne,
"!==": operator.ne,
"&&": lambda a, b: a and b,
"||": lambda a, b: a or b,
}
TERNARY_OPERATORS = {
("?", ":"): lambda a, b, c: b if a else c
}
| 26.040936 | 76 | 0.644509 | 0 | 0 | 0 | 0 | 2,970 | 0.666966 | 0 | 0 | 401 | 0.090052 |
2c7b9d2be0c8aa29b6e9f7fc1a79c1f1342d98ef | 453 | py | Python | stix/test/common/names_test.py | threatstream/stix11 | 267dbf390abc0897c691e256396cd1888f423494 | [
"BSD-3-Clause"
] | 1 | 2019-07-25T17:19:48.000Z | 2019-07-25T17:19:48.000Z | stix/test/common/names_test.py | threatstream/stix11 | 267dbf390abc0897c691e256396cd1888f423494 | [
"BSD-3-Clause"
] | 1 | 2020-04-15T20:45:55.000Z | 2020-04-15T20:45:55.000Z | stix/test/common/names_test.py | threatstream/stix11 | 267dbf390abc0897c691e256396cd1888f423494 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from stix.test import EntityTestCase
from stix.common import Names
class NamesTests(EntityTestCase, unittest.TestCase):
klass = Names
_full_dict = [
"foo",
"bar",
{'value': 'User Data Loss', 'xsi:type': 'stixVocabs:IncidentEffectVocab-1.0'},
]
if __name__ == '__main__':
unittest.main()
| 21.571429 | 86 | 0.675497 | 212 | 0.467991 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.421634 |
2c7ba2c92a4eb8e4a79941d83d6a59aa5150b61d | 94 | py | Python | server.py | yyc0309/watsonwork-github-issues | 3377ff0fa87e5323165c7b2db9e9a3fc5da6ca50 | [
"Apache-2.0"
] | null | null | null | server.py | yyc0309/watsonwork-github-issues | 3377ff0fa87e5323165c7b2db9e9a3fc5da6ca50 | [
"Apache-2.0"
] | 7 | 2018-03-05T21:14:04.000Z | 2018-03-09T23:16:37.000Z | server.py | yyc0309/watsonwork-github-issues | 3377ff0fa87e5323165c7b2db9e9a3fc5da6ca50 | [
"Apache-2.0"
] | null | null | null | from github_app import app
app.run(host='0.0.0.0', port=int(app.config['PORT']), debug=True)
| 23.5 | 65 | 0.702128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.159574 |
2c7d5fed6f3d1c89d2afce10eef18d6e78236d68 | 4,855 | py | Python | old_lambda/lambda_function/operations.py | jdkandersson/cloudformation-kubernetes | 8bd14379540bd2d122283c74166883e375cb348e | [
"Apache-2.0"
] | null | null | null | old_lambda/lambda_function/operations.py | jdkandersson/cloudformation-kubernetes | 8bd14379540bd2d122283c74166883e375cb348e | [
"Apache-2.0"
] | null | null | null | old_lambda/lambda_function/operations.py | jdkandersson/cloudformation-kubernetes | 8bd14379540bd2d122283c74166883e375cb348e | [
"Apache-2.0"
] | null | null | null | """Kubernetes operations."""
import typing
import kubernetes
from . import exceptions
from . import helpers
class CreateReturn(typing.NamedTuple):
"""
Structure of the create return value.
Attrs:
status: The status of the operation. Is SUCCESS or FAILURE.
reason: If the status is FAILURE, the reason for the failure.
physical_name: If the status is success, the physical name of the created
resource in the form [<namespace>/]<name> where the namespace is included
if the operation is namespaced.
"""
status: str
reason: typing.Optional[str]
physical_name: typing.Optional[str]
def create(*, body: typing.Dict[str, typing.Any]) -> CreateReturn:
"""
Execute create command.
Assume body has at least metadata with a name.
Args:
body: The body to create.
Returns:
Information about the outcome of the operation.
"""
try:
api_version = helpers.get_api_version(body=body)
kind = helpers.get_kind(body=body)
except exceptions.ParentError as exc:
return CreateReturn("FAILURE", str(exc), None)
client_function, namespaced = helpers.get_function(
api_version=api_version, kind=kind, operation="create"
)
# Handling non-namespaced cases
if not namespaced:
try:
response = client_function(body=body)
return CreateReturn("SUCCESS", None, response.metadata.name)
except kubernetes.client.rest.ApiException as exc:
return CreateReturn("FAILURE", str(exc), None)
# Handling namespaced
namespace = helpers.calculate_namespace(body=body)
try:
response = client_function(body=body, namespace=namespace)
return CreateReturn(
"SUCCESS", None, f"{response.metadata.namespace}/{response.metadata.name}"
)
except kubernetes.client.rest.ApiException as exc:
return CreateReturn("FAILURE", str(exc), None)
class ExistsReturn(typing.NamedTuple):
"""
Structure of the update return value.
Attrs:
status: The status of the operation. Is SUCCESS or FAILURE.
reason: If the status is FAILURE, the reason for the failure.
"""
status: str
reason: typing.Optional[str]
def update(*, body: typing.Dict[str, typing.Any], physical_name: str) -> ExistsReturn:
"""
Execute update command.
Assume body has at least metadata with a name.
Args:
body: The body to update.
physical_name: The namespace (if namespaced) and name of the resource.
Returns:
Information about the outcome of the operation.
"""
try:
api_version = helpers.get_api_version(body=body)
kind = helpers.get_kind(body=body)
except exceptions.ParentError as exc:
return ExistsReturn("FAILURE", str(exc))
client_function, namespaced = helpers.get_function(
api_version=api_version, kind=kind, operation="update"
)
# Handling non-namespaced cases
if not namespaced:
try:
client_function(body=body, name=physical_name)
return ExistsReturn("SUCCESS", None)
except kubernetes.client.rest.ApiException as exc:
return ExistsReturn("FAILURE", str(exc))
# Handling namespaced
namespace, name = physical_name.split("/")
try:
client_function(body=body, namespace=namespace, name=name)
return ExistsReturn("SUCCESS", None)
except kubernetes.client.rest.ApiException as exc:
return ExistsReturn("FAILURE", str(exc))
def delete(*, body: typing.Dict[str, typing.Any], physical_name: str) -> ExistsReturn:
"""
Execute delete command.
Assume body has at least metadata with a name.
Args:
body: The body to delete.
physical_name: The namespace (if namespaced) and name of the resource.
Returns:
Information about the outcome of the operation.
"""
try:
api_version = helpers.get_api_version(body=body)
kind = helpers.get_kind(body=body)
except exceptions.ParentError as exc:
return ExistsReturn("FAILURE", str(exc))
client_function, namespaced = helpers.get_function(
api_version=api_version, kind=kind, operation="delete"
)
# Handling non-namespaced cases
if not namespaced:
try:
client_function(name=physical_name)
return ExistsReturn("SUCCESS", None)
except kubernetes.client.rest.ApiException as exc:
return ExistsReturn("FAILURE", str(exc))
# Handling namespaced
namespace, name = physical_name.split("/")
try:
client_function(namespace=namespace, name=name)
return ExistsReturn("SUCCESS", None)
except kubernetes.client.rest.ApiException as exc:
return ExistsReturn("FAILURE", str(exc))
| 29.969136 | 86 | 0.664264 | 846 | 0.174253 | 0 | 0 | 0 | 0 | 0 | 0 | 1,805 | 0.371782 |
2c7e0f2d39a195aca2df95f3b0bcc74ec5c6e19b | 660 | py | Python | opennem/exporter/aws.py | tourdownunder/opennem | deec3e2079db9d9d84171010fd0c239170d1e7ce | [
"MIT"
] | null | null | null | opennem/exporter/aws.py | tourdownunder/opennem | deec3e2079db9d9d84171010fd0c239170d1e7ce | [
"MIT"
] | 1 | 2020-09-06T04:17:59.000Z | 2020-09-06T04:17:59.000Z | opennem/exporter/aws.py | tourdownunder/opennem | deec3e2079db9d9d84171010fd0c239170d1e7ce | [
"MIT"
] | null | null | null | import logging
import os
from smart_open import open
logger = logging.getLogger(__name__)
S3_EXPORT_DEFAULT_BUCKET = "s3://data.opennem.org.au/v3/"
UPLOAD_ARGS = {
"ContentType": "application/json",
}
def write_to_s3(file_path: str, data: str) -> int:
"""
Write data to an s3 path
"""
s3_save_path = os.path.join(S3_EXPORT_DEFAULT_BUCKET, file_path)
write_count = 0
with open(
s3_save_path,
"w",
transport_params=dict(multipart_upload_kwargs=UPLOAD_ARGS),
) as fh:
write_count = fh.write(data)
logger.info("Wrote {} to {}".format(len(data), s3_save_path))
return write_count
| 20.625 | 68 | 0.663636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.187879 |
2c7f7b1f3c0b5c1795a18f512daba60361ae64d1 | 2,549 | py | Python | Samples/NLPSample.py | Klangoo/MagnetApiClient.Python | adf36c0e8b094a282827801b1ccf0aaf56165b3f | [
"MIT"
] | null | null | null | Samples/NLPSample.py | Klangoo/MagnetApiClient.Python | adf36c0e8b094a282827801b1ccf0aaf56165b3f | [
"MIT"
] | null | null | null | Samples/NLPSample.py | Klangoo/MagnetApiClient.Python | adf36c0e8b094a282827801b1ccf0aaf56165b3f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -- coding: UTF-8 --
"""
Magnet API NLP Sample
Copyright 2018, Klangoo Inc.
"""
from klangooclient.MagnetAPIClient import MagnetAPIClient
ENDPOINT = 'https://nlp.klangoo.com/Service.svc'
CALK = 'enter your calk here'
SECRET_KEY = 'enter your secret key here'
client = MagnetAPIClient(ENDPOINT, CALK, SECRET_KEY)
def test_process_document():
request = { 'text' : 'The United States of America (USA), commonly known as the United States (U.S.) or America, is a federal republic composed of 50 states, a federal district, five major self-governing territories, and various possessions.',
'lang' : 'en', 'format' : 'json' }
json = client.callwebmethod('ProcessDocument', request, 'POST')
print('\nProcess Document:')
print(json)
def test_get_summary():
request = { 'text' : 'The United States of America (USA), commonly known as the United States (U.S.) or America, is a federal republic composed of 50 states, a federal district, five major self-governing territories, and various possessions.',
'lang' : 'en', 'format' : 'json' }
json = client.callwebmethod('GetSummary', request, 'POST')
print('\nGet Summary:')
print(json)
def test_get_entities():
request = { 'text' : 'The United States of America (USA), commonly known as the United States (U.S.) or America, is a federal republic composed of 50 states, a federal district, five major self-governing territories, and various possessions.',
'lang' : 'en', 'format' : 'json' }
json = client.callwebmethod('GetEntities', request, 'POST')
print('\nGet Entities:')
print(json)
def test_get_categories():
request = { 'text' : 'The United States of America (USA), commonly known as the United States (U.S.) or America, is a federal republic composed of 50 states, a federal district, five major self-governing territories, and various possessions.',
'lang' : 'en', 'format' : 'json' }
json = client.callwebmethod('GetCategories', request, 'POST')
print('\nGet Categories:')
print(json)
def test_get_key_topics():
request = { 'text' : 'The United States of America (USA), commonly known as the United States (U.S.) or America, is a federal republic composed of 50 states, a federal district, five major self-governing territories, and various possessions.',
'lang' : 'en', 'format' : 'json' }
json = client.callwebmethod('GetKeyTopics', request, 'POST')
print('\nGet Key Topics:')
print(json)
if __name__ == "__main__":
test_process_document()
test_get_summary()
test_get_entities()
test_get_categories()
test_get_key_topics() | 44.719298 | 245 | 0.721852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,651 | 0.647705 |
2c80a3d4d74ea16474f3364d5a8e43e993d0be8b | 952 | py | Python | 15-3SUM/solution.py | alfmunny/leetcode | e35d2164c7e6e66410309fe1667ceab5a7689bef | [
"MIT"
] | null | null | null | 15-3SUM/solution.py | alfmunny/leetcode | e35d2164c7e6e66410309fe1667ceab5a7689bef | [
"MIT"
] | null | null | null | 15-3SUM/solution.py | alfmunny/leetcode | e35d2164c7e6e66410309fe1667ceab5a7689bef | [
"MIT"
] | null | null | null | class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
if len(nums) < 3:
return []
ans = []
nums.sort()
for i in range(0, len(nums)-2):
if nums[i] > 0:
break
if i > 0 and nums[i-1] == nums[i]:
continue
left, right = i+1, len(nums)-1
while right > left:
s = nums[left] + nums[right] + nums[i]
if s == 0:
ans.append([nums[i], nums[left], nums[right]])
left += 1
right -= 1
while right > left and nums[left] == nums[left-1]:
left += 1
while right > left and nums[right] == nums[right+1]:
right -= 1
elif s < 0:
left += 1
else:
right -= 1
return ans
| 32.827586 | 72 | 0.366597 | 951 | 0.99895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2c81da921e001da3b682a7f055c4752416bfa10f | 38,211 | py | Python | bin/validate.py | sooshie/security-content | 3007fd2ac4743041f0e37151b17780ca8f094bbf | [
"Apache-2.0"
] | null | null | null | bin/validate.py | sooshie/security-content | 3007fd2ac4743041f0e37151b17780ca8f094bbf | [
"Apache-2.0"
] | null | null | null | bin/validate.py | sooshie/security-content | 3007fd2ac4743041f0e37151b17780ca8f094bbf | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
'''
Validates Manifest file under the security-content repo for correctness.
'''
import glob
import json
import jsonschema
import yaml
import sys
import argparse
from os import path
def validate_detection_contentv2(detection, DETECTION_UUIDS, errors, macros, lookups):
if detection['id'] == '':
errors.append('ERROR: Blank ID')
if detection['id'] in DETECTION_UUIDS:
errors.append('ERROR: Duplicate UUID found: %s' % detection['id'])
else:
DETECTION_UUIDS.append(detection['id'])
if detection['name'].endswith(" "):
errors.append(
"ERROR: Detection name has trailing spaces: '%s'" %
detection['name'])
try:
detection['description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: description not ascii")
if 'how_to_implement' in detection:
try:
detection['how_to_implement'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: how_to_implement not ascii")
if 'eli5' in detection:
try:
detection['eli5'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: eli5 not ascii")
if 'known_false_positives' in detection:
try:
detection['known_false_positives'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: known_false_positives not ascii")
# modded to pass validation for uba detections - not yet fleshed out
if 'splunk' in detection['detect']:
# do a regex match here instead of key values
# if (detection['detect']['splunk']['correlation_rule']['search'].find('tstats') != -1) or \
# (detection['detect']['splunk']['correlation_rule']['search'].find('datamodel') != -1):
if (detection['detect']['splunk']['correlation_rule']['search'].find('datamodel') != -1):
if 'data_models' not in detection['data_metadata']:
errors.append("ERROR: The Splunk search uses a data model but 'data_models' field is not set")
if not detection['data_metadata']['data_models']:
errors.append("ERROR: The Splunk search uses a data model but 'data_models' is empty")
# do a regex match here instead of key values
if (detection['detect']['splunk']['correlation_rule']['search'].find('sourcetype') != -1):
if 'data_sourcetypes' not in detection['data_metadata']:
errors.append("ERROR: The Splunk search specifies a sourcetype but 'data_sourcetypes' field is not set")
elif not detection['data_metadata']['data_sourcetypes']:
errors.append("ERROR: The Splunk search specifies a sourcetype but 'data_sourcetypes' is empty")
if 'macros' in detection['detect']['splunk']['correlation_rule']:
for macro in detection['detect']['splunk']['correlation_rule']['macros']:
if macro not in macros:
errors.append("ERROR: The Splunk search specifies a macro \"{}\" but there is no macro manifest for it".format(macro))
if 'lookups' in detection['detect']['splunk']['correlation_rule']:
for lookup in detection['detect']['splunk']['correlation_rule']['lookups']:
if lookup not in lookups:
errors.append("ERROR: The Splunk search specifies a lookup \"{}\" but there is no lookup manifest for it".format(lookup))
if 'notable' in detection['detect']['splunk']['correlation_rule']:
if ('drilldown_search' in detection['detect']['splunk']['correlation_rule']['notable']) ^ \
('drilldown_name' in detection['detect']['splunk']['correlation_rule']['notable']):
errors.append("ERROR: Both drilldown_search and drilldown_name must be defined")
elif 'uba' in detection['detect']:
if (detection['detect']['uba']['correlation_rule']['search'].find('tstats') != -1) or \
(detection['detect']['splunk']['correlation_rule']['search'].find('datamodel') != -1):
if 'data_models' not in detection['data_metadata']:
errors.append("ERROR: The Splunk search uses a data model but 'data_models' field is not set")
if not detection['data_metadata']['data_models']:
errors.append("ERROR: The Splunk search uses a data model but 'data_models' is empty")
# do a regex match here instead of key values
if (detection['detect']['uba']['correlation_rule']['search'].find('sourcetype') != -1):
if 'data_sourcetypes' not in detection['data_metadata']:
errors.append("ERROR: The Splunk search specifies a sourcetype but 'data_sourcetypes' \
field is not set")
if not detection['data_metadata']['data_sourcetypes']:
errors.append("ERROR: The Splunk search specifies a sourcetype but \
'data_sourcetypes' is empty")
# do a regex match here instead of key values
return errors
def validate_investigation_contentv2(investigation, investigation_uuids, errors, macros, lookups):
if investigation['id'] == '':
errors.append('ERROR: Blank ID')
if investigation['id'] in investigation_uuids:
errors.append('ERROR: Duplicate UUID found: %s' % investigation['id'])
else:
investigation_uuids.append(investigation['id'])
if investigation['name'].endswith(" "):
errors.append(
"ERROR: Investigation name has trailing spaces: '%s'" %
investigation['name'])
try:
investigation['description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: description not ascii")
if 'how_to_implement' in investigation:
try:
investigation['how_to_implement'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: how_to_implement not ascii")
if 'eli5' in investigation:
try:
investigation['eli5'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: eli5 not ascii")
if 'known_false_positives' in investigation:
try:
investigation['known_false_positives'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: known_false_positives not ascii")
if 'splunk' in investigation['investigate']:
# do a regex match here instead of key values
if (investigation['investigate']['splunk']['search'].find('tstats') != -1) or \
(investigation['investigate']['splunk']['search'].find('datamodel') != -1):
if 'data_models' not in investigation['data_metadata']:
errors.append("ERROR: The Splunk search uses a data model but 'data_models' field is not set")
if not investigation['data_metadata']['data_models']:
errors.append("ERROR: The Splunk search uses a data model but 'data_models' is empty")
# do a regex match here instead of key values
if (investigation['investigate']['splunk']['search'].find('sourcetype') != -1):
if 'data_sourcetypes' not in investigation['data_metadata']:
errors.append("ERROR: The Splunk search specifies a sourcetype but 'data_sourcetypes' \
field is not set")
if not investigation['data_metadata']['data_sourcetypes']:
errors.append("ERROR: The Splunk search specifies a sourcetype but \
'data_sourcetypes' is empty")
if 'macros' in investigation['investigate']['splunk']:
for macro in investigation['investigate']['splunk']['macros']:
if macro not in macros:
errors.append("ERROR: The Splunk search specifies a macro \"{}\" but there is no macro manifest for it".format(macro))
if 'lookups' in investigation['investigate']['splunk']:
for lookup in investigation['investigate']['splunk']['lookups']:
if lookup not in lookups:
errors.append("ERROR: The Splunk search specifies a lookup \"{}\" but there is no lookup manifest for it".format(lookup))
return errors
def validate_baselines_contentv2(baseline, baselines_uuids, errors, macros, lookups):
if baseline['id'] == '':
errors.append('ERROR: Blank ID')
if baseline['id'] in baselines_uuids:
errors.append('ERROR: Duplicate UUID found: %s' % baseline['id'])
else:
baselines_uuids.append(baseline['id'])
if baseline['name'].endswith(" "):
errors.append(
"ERROR: Investigation name has trailing spaces: '%s'" %
baseline['name'])
try:
baseline['description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: description not ascii")
if 'how_to_implement' in baseline:
try:
baseline['how_to_implement'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: how_to_implement not ascii")
if 'eli5' in baseline:
try:
baseline['eli5'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: eli5 not ascii")
if 'known_false_positives' in baseline:
try:
baseline['known_false_positives'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: known_false_positives not ascii")
if 'splunk' in baseline['baseline']:
# do a regex match here instead of key values
if (baseline['baseline']['splunk']['search'].find('tstats') != -1) or \
(baseline['baseline']['splunk']['search'].find('datamodel') != -1):
if 'data_models' not in baseline['data_metadata']:
errors.append("ERROR: The Splunk search uses a data model but 'data_models' field is not set")
if not baseline['data_metadata']['data_models']:
errors.append("ERROR: The Splunk search uses a data model but 'data_models' is empty")
# do a regex match here instead of key values
if (baseline['baseline']['splunk']['search'].find('sourcetype') != -1):
if 'data_sourcetypes' not in baseline['data_metadata']:
errors.append("ERROR: The Splunk search specifies a sourcetype but 'data_sourcetypes' \
field is not set")
if not baseline['data_metadata']['data_sourcetypes']:
errors.append("ERROR: The Splunk search specifies a sourcetype but \
'data_sourcetypes' is empty")
if 'macros' in baseline['baseline']['splunk']:
for macro in baseline['baseline']['splunk']['macros']:
if macro not in macros:
errors.append("ERROR: The Splunk search specifies a macro \"{}\" but there is no macro manifest for it".format(macro))
if 'lookups' in baseline['baseline']['splunk']:
for lookup in baseline['baseline']['splunk']['lookups']:
if lookup not in lookups:
errors.append("ERROR: The Splunk search specifies a lookup \"{}\" but there is no lookup manifest for it".format(lookup))
return errors
def validate_detection_contentv1(detection, DETECTION_UUIDS, errors):
try:
detection['search_description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: description not ascii")
if detection['search_name'].endswith(" "):
errors.append(
"ERROR: Detection name has trailing spaces: '%s'" %
detection['search_name'])
if detection['search_id'] == '':
errors.append('ERROR: Blank ID')
if detection['search_id'] in DETECTION_UUIDS:
errors.append('ERROR: Duplicate UUID found: %s' % detection['search_id'])
else:
DETECTION_UUIDS.append(detection['search_id'])
if '| tstats' in detection['search'] or 'datamodel' in detection['search']:
if 'data_models' not in detection['data_metadata']:
errors.append(
"ERROR: The search uses a data model but 'data_models' \
field is not set")
if 'data_models' in detection and not \
detection['data_metadata']['data_models']:
errors.append(
"ERROR: The search uses a data model but 'data_models' is empty")
if 'sourcetype' in detection['search']:
if 'data_sourcetypes' not in detection['data_metadata']:
errors.append(
"ERROR: The search specifies a sourcetype but 'data_sourcetypes' \
field is not set")
if 'data_sourcetypes' in detection and not \
detection['data_metadata']['data_sourcetypes']:
errors.append(
"ERROR: The search specifies a sourcetype but \
'data_sourcetypes' is empty")
try:
detection['search_description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: search_description not ascii")
if 'how_to_implement' in detection:
try:
detection['how_to_implement'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: how_to_implement not ascii")
if 'eli5' in detection:
try:
detection['eli5'].encode('ascii')
except UnicodeEncodeError:
errors.append("eli5 not ascii")
if 'known_false_positives' in detection:
try:
detection['known_false_positives'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: known_false_positives not ascii")
if 'correlation_rule' in detection and 'notable' in \
detection['correlation_rule']:
try:
detection['correlation_rule']['notable']['rule_title'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: rule_title not ascii")
try:
detection['correlation_rule']['notable']['rule_description'].encode(
'ascii')
except UnicodeEncodeError:
errors.append("ERROR: rule_description not ascii")
return errors
def validate_investigation_contentv1(investigation, investigation_uuids, errors):
try:
investigation['search_description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: description not ascii")
if investigation['search_name'].endswith(" "):
errors.append(
"ERROR: Investigation name has trailing spaces: '%s'" %
investigation['search_name'])
if investigation['search_id'] == '':
errors.append('ERROR: Blank ID')
if investigation['search_id'] in investigation_uuids:
errors.append('ERROR: Duplicate UUID found: %s' % investigation['search_id'])
else:
investigation_uuids.append(investigation['search_id'])
if '| tstats' in investigation['search'] or 'datamodel' in investigation['search']:
if 'data_models' not in investigation['data_metadata']:
errors.append(
"ERROR: The search uses a data model but 'data_models' \
field is not set")
if 'data_models' in investigation and not \
investigation['data_metadata']['data_models']:
errors.append(
"ERROR: The search uses a data model but 'data_models' is empty")
if 'sourcetype' in investigation['search']:
if 'data_sourcetypes' not in investigation['data_metadata']:
errors.append(
"ERROR: The search specifies a sourcetype but 'data_sourcetypes' \
field is not set")
if 'data_sourcetypes' in investigation and not \
investigation['data_metadata']['data_sourcetypes']:
errors.append(
"ERROR: The search specifies a sourcetype but \
'data_sourcetypes' is empty")
try:
investigation['search_description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: search_description not ascii")
if 'how_to_implement' in investigation:
try:
investigation['how_to_implement'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: how_to_implement not ascii")
if 'eli5' in investigation:
try:
investigation['eli5'].encode('ascii')
except UnicodeEncodeError:
errors.append("eli5 not ascii")
if 'known_false_positives' in investigation:
try:
investigation['known_false_positives'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: known_false_positives not ascii")
return errors
def validate_baselines_contentv1(baseline, baselines_uuids, errors):
try:
baseline['search_description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: description not ascii")
if baseline['search_name'].endswith(" "):
errors.append(
"ERROR: Baseline name has trailing spaces: '%s'" %
baseline['search_name'])
if baseline['search_id'] == '':
errors.append('ERROR: Blank ID')
if baseline['search_id'] in baselines_uuids:
errors.append('ERROR: Duplicate UUID found: %s' % baseline['search_id'])
else:
baselines_uuids.append(baseline['search_id'])
if '| tstats' in baseline['search'] or 'datamodel' in baseline['search']:
if 'data_models' not in baseline['data_metadata']:
errors.append(
"ERROR: The search uses a data model but 'data_models' \
field is not set")
if 'data_models' in baseline and not \
baseline['data_metadata']['data_models']:
errors.append(
"ERROR: The search uses a data model but 'data_models' is empty")
if 'sourcetype' in baseline['search']:
if 'data_sourcetypes' not in baseline['data_metadata']:
errors.append(
"ERROR: The search specifies a sourcetype but 'data_sourcetypes' \
field is not set")
if 'data_sourcetypes' in baseline and not \
baseline['data_metadata']['data_sourcetypes']:
errors.append(
"ERROR: The search specifies a sourcetype but \
'data_sourcetypes' is empty")
try:
baseline['search_description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: search_description not ascii")
if 'how_to_implement' in baseline:
try:
baseline['how_to_implement'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: how_to_implement not ascii")
if 'eli5' in baseline:
try:
baseline['eli5'].encode('ascii')
except UnicodeEncodeError:
errors.append("eli5 not ascii")
if 'known_false_positives' in baseline:
try:
baseline['known_false_positives'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: known_false_positives not ascii")
return errors
def validate_investigation_content(investigation, investigation_uuids, macros, lookups):
'''Validate that the content of a investigation manifest is correct'''
errors = []
# run v1 content validation
if investigation["spec_version"] == 1:
errors = validate_investigation_contentv1(investigation, investigation_uuids, errors)
if investigation["spec_version"] == 2:
errors = validate_investigation_contentv2(investigation, investigation_uuids, errors, macros, lookups)
return errors
def validate_detection_content(detection, DETECTION_UUIDS, macros, lookups):
'''Validate that the content of a detection manifest is correct'''
errors = []
# run v1 content validation
if detection["spec_version"] == 1:
errors = validate_detection_contentv1(detection, DETECTION_UUIDS, errors)
if detection["spec_version"] == 2:
errors = validate_detection_contentv2(detection, DETECTION_UUIDS, errors, macros, lookups)
return errors
def validate_story_content(story, STORY_UUIDS):
''' Validate that the content of a story manifest is correct'''
errors = []
if story['id'] == '':
errors.append('ERROR: Blank ID')
if story['id'] in STORY_UUIDS:
errors.append('ERROR: Duplicate UUID found: %s' % story['id'])
else:
STORY_UUIDS.append(story['id'])
try:
story['description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: description not ascii")
try:
story['narrative'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: narrative not ascii")
return errors
def validate_baselines_content(baseline, baselines_uuids, macros, lookups):
'''Validate that the content of a baseline manifest is correct'''
errors = []
# run v1 content validation
if baseline["spec_version"] == 1:
errors = validate_baselines_contentv1(baseline, baselines_uuids, errors)
if baseline["spec_version"] == 2:
errors = validate_baselines_contentv2(baseline, baselines_uuids, errors, macros, lookups)
return errors
def validate_investigation(REPO_PATH, verbose, macros, lookups):
''' Validates Investigation'''
INVESTIGATION_UUIDS = []
# retrive
v1_schema_file_investigative = path.join(path.expanduser(REPO_PATH), 'spec/v1/investigative_search.json.spec')
try:
v1_schema_investigative = json.loads(open(v1_schema_file_investigative, 'rb').read())
except IOError:
print "ERROR: reading version 1 investigations schema file {0}".format(v1_schema_file_investigative)
v1_schema_file_contexual = path.join(path.expanduser(REPO_PATH), 'spec/v1/contextual_search.json.spec')
try:
v1_schema_contexual = json.loads(open(v1_schema_file_contexual, 'rb').read())
except IOError:
print "ERROR: reading version 1 investigations schema file {0}".format(v1_schema_file_contexual)
v2_schema_file = path.join(path.expanduser(REPO_PATH), 'spec/v2/investigations.spec.json')
try:
v2_schema = json.loads(open(v2_schema_file, 'rb').read())
except IOError:
print "ERROR: reading version 2 investigations schema file {0}".format(v2_schema_file)
error = False
manifest_files = path.join(path.expanduser(REPO_PATH), "investigations/*.yml")
for manifest_file in glob.glob(manifest_files):
if verbose:
print "processing investigation {0}".format(manifest_file)
# read in each investigation
with open(manifest_file, 'r') as stream:
try:
investigation = list(yaml.safe_load_all(stream))[0]
except yaml.YAMLError as exc:
print(exc)
print "Error reading {0}".format(manifest_file)
error = True
continue
# validate v1 and v2 stories against spec for both investigations and old contexual searches
if investigation['spec_version'] == 1 and investigation['search_type'] == "contextual":
try:
jsonschema.validate(instance=investigation, schema=v1_schema_contexual)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
elif investigation['spec_version'] == 1 and investigation['search_type'] == "investigative":
try:
jsonschema.validate(instance=investigation, schema=v1_schema_investigative)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
elif investigation['spec_version'] == 2:
try:
jsonschema.validate(instance=investigation, schema=v2_schema)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
else:
print "ERROR: Story {0} does not contain a spec_version which is required".format(manifest_file)
error = True
continue
# now lets validate the content
investigation_errors = validate_investigation_content(investigation, INVESTIGATION_UUIDS, macros, lookups)
if investigation_errors:
error = True
for err in investigation_errors:
print "{0} at:\n\t {1}".format(err, manifest_file)
return error
def validate_detection(REPO_PATH, verbose, macros, lookups):
''' Validates Detections'''
DETECTION_UUIDS = []
# retrive
v1_schema_file = path.join(path.expanduser(REPO_PATH), 'spec/v1/detection_search.json.spec')
try:
v1_schema = json.loads(open(v1_schema_file, 'rb').read())
except IOError:
print "ERROR: reading version 1 detection schema file {0}".format(v1_schema_file)
except ValueError:
print "ERROR: File is not proper JSON {0}".format(v1_schema_file)
v2_schema_file = path.join(path.expanduser(REPO_PATH), 'spec/v2/detections.spec.json')
try:
v2_schema = json.loads(open(v2_schema_file, 'rb').read())
except IOError:
print "ERROR: reading version 2 detection schema file {0}".format(v2_schema_file)
except ValueError:
print "ERROR: File is not proper JSON {0}".format(v2_schema_file)
error = False
manifest_files = path.join(path.expanduser(REPO_PATH), "detections/*.yml")
for manifest_file in glob.glob(manifest_files):
if verbose:
print "processing detection {0}".format(manifest_file)
# read in each detection
with open(manifest_file, 'r') as stream:
try:
detection = list(yaml.safe_load_all(stream))[0]
except yaml.YAMLError as exc:
print(exc)
print "Error reading {0}".format(manifest_file)
error = True
continue
# validate v1 and v2 stories against spec
if detection['spec_version'] == 1:
try:
jsonschema.validate(instance=detection, schema=v1_schema)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
elif detection['spec_version'] == 2:
try:
jsonschema.validate(instance=detection, schema=v2_schema)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
else:
print "ERROR: Story {0} does not contain a spec_version which is required".format(manifest_file)
error = True
continue
# now lets validate the content
detection_errors = validate_detection_content(detection, DETECTION_UUIDS, macros, lookups)
if detection_errors:
error = True
for err in detection_errors:
print "{0} at:\n\t {1}".format(err, manifest_file)
return error
def validate_story(REPO_PATH, verbose):
''' Validates Stories'''
STORY_UUIDS = []
# retrive
v1_schema_file = path.join(path.expanduser(REPO_PATH), 'spec/v1/analytic_story.json.spec')
try:
v1_schema = json.loads(open(v1_schema_file, 'rb').read())
except IOError:
print "ERROR: reading version 1 story schema file {0}".format(v1_schema_file)
except ValueError:
print "ERROR: File is not proper JSON {0}".format(v1_schema_file)
v2_schema_file = path.join(path.expanduser(REPO_PATH), 'spec/v2/story.spec.json')
try:
v2_schema = json.loads(open(v2_schema_file, 'rb').read())
except IOError:
print "ERROR: reading version 2 story schema file {0}".format(v2_schema_file)
except ValueError:
print "ERROR: File is not proper JSON {0}".format(v2_schema_file)
error = False
story_manifest_files = path.join(path.expanduser(REPO_PATH), "stories/*.yml")
for story_manifest_file in glob.glob(story_manifest_files):
if verbose:
print "processing story {0}".format(story_manifest_file)
# read in each story
with open(story_manifest_file, 'r') as stream:
try:
story = list(yaml.safe_load_all(stream))[0]
except yaml.YAMLError as exc:
print(exc)
print "Error reading {0}".format(story_manifest_file)
error = True
continue
# validate v1 and v2 stories against spec
if story['spec_version'] == 1:
try:
jsonschema.validate(instance=story, schema=v1_schema)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), story_manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
elif story['spec_version'] == 2:
try:
jsonschema.validate(instance=story, schema=v2_schema)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), story_manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
else:
print "ERROR: Story {0} does not contain a spec_version which is required".format(story_manifest_file)
error = True
continue
# now lets validate the content
story_errors = validate_story_content(story, STORY_UUIDS)
if story_errors:
error = True
for err in story_errors:
print "{0} at:\n\t {1}".format(err, story_manifest_file)
return error
def validate_baselines(REPO_PATH, verbose, macros, lookups):
''' Validates Baselines'''
BASELINE_UUIDS = []
# retrive
v1_schema_file = path.join(path.expanduser(REPO_PATH), 'spec/v1/support_search.json.spec')
try:
v1_schema = json.loads(open(v1_schema_file, 'rb').read())
except IOError:
print "ERROR: reading version 1 baseline schema file {0}".format(v1_schema_file)
except ValueError:
print "ERROR: File is not proper JSON {0}".format(v1_schema_file)
v2_schema_file = path.join(path.expanduser(REPO_PATH), 'spec/v2/baselines.spec.json')
try:
v2_schema = json.loads(open(v2_schema_file, 'rb').read())
except IOError:
print "ERROR: reading version 2 baseline schema file {0}".format(v2_schema_file)
except ValueError:
print "ERROR: File is not proper JSON {0}".format(v2_schema_file)
error = False
baselines_manifest_files = path.join(path.expanduser(REPO_PATH), "baselines/*.yml")
for baselines_manifest_file in glob.glob(baselines_manifest_files):
if verbose:
print "processing baseline {0}".format(baselines_manifest_file)
# read in each baseline
with open(baselines_manifest_file, 'r') as stream:
try:
baseline = list(yaml.safe_load_all(stream))[0]
except yaml.YAMLError as exc:
print(exc)
print "Error reading {0}".format(baselines_manifest_file)
error = True
continue
# validate v1 and v2 stories against spec
if baseline['spec_version'] == 1:
try:
jsonschema.validate(instance=baseline, schema=v1_schema)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), baselines_manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
elif baseline['spec_version'] == 2:
try:
jsonschema.validate(instance=baseline, schema=v2_schema)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), baselines_manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
else:
print "ERROR: Baseline {0} does not contain a spec_version which is required".format(baselines_manifest_file)
error = True
continue
# now lets validate the content
baselines_errors = validate_baselines_content(baseline, BASELINE_UUIDS, macros, lookups)
if baselines_errors:
error = True
for err in baselines_errors:
print "{0} at:\n\t {1}".format(err, baselines_manifest_file)
return error
def validate_macros(REPO_PATH, verbose):
''' Validates Macros'''
error = False
schema_file = path.join(path.expanduser(REPO_PATH), 'spec/v2/macros.spec.json')
schema = json.loads(open(schema_file, 'rb').read())
macro_manifests = {}
macros_manifest_files = path.join(path.expanduser(REPO_PATH), "macros/*.yml")
for macros_manifest_file in glob.glob(macros_manifest_files):
if verbose:
print "processing macro {0}".format(macros_manifest_file)
# read in each macro
with open(macros_manifest_file, 'r') as stream:
try:
macro = list(yaml.safe_load_all(stream))[0]
except yaml.YAMLError as exc:
print(exc)
print "Error reading {0}".format(macros_manifest_file)
error = True
continue
try:
jsonschema.validate(instance=macro, schema=schema)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), macros_manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
macro_manifests[macro['name']] = macro
return error, macro_manifests
def validate_lookups(REPO_PATH, verbose):
''' Validates Lookups'''
error = False
schema_file = path.join(path.expanduser(REPO_PATH), 'spec/v2/lookups.spec.json')
schema = json.loads(open(schema_file, 'rb').read())
lookup_manifests = {}
lookups_manifest_files = path.join(path.expanduser(REPO_PATH), "lookups/*.yml")
for lookups_manifest_file in glob.glob(lookups_manifest_files):
if verbose:
print "processing lookup {0}".format(lookups_manifest_file)
# read in each lookup
with open(lookups_manifest_file, 'r') as stream:
try:
lookup = list(yaml.safe_load_all(stream))[0]
except yaml.YAMLError as exc:
print(exc)
print "Error reading {0}".format(lookups_manifest_file)
error = True
continue
try:
jsonschema.validate(instance=lookup, schema=schema)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), lookups_manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
if 'filename' in lookup:
lookup_csv_file = path.join(path.expanduser(REPO_PATH), "lookups/%s" % lookup['filename'])
if not path.isfile(lookup_csv_file):
print "ERROR: filename {} does not exist".format(lookup['filename'])
print lookup_csv_file
print "\t{}".format(lookups_manifest_file)
error = True
lookup_manifests[lookup['name']] = lookup
return error, lookup_manifests
if __name__ == "__main__":
# grab arguments
parser = argparse.ArgumentParser(description="validates security content manifest files", epilog="""
Validates security manifest for correctness, adhering to spec and other common items.
VALIDATE DOES NOT PROCESS RESPONSES SPEC for the moment.""")
parser.add_argument("-p", "--path", required=True, help="path to security-security content repo")
parser.add_argument("-v", "--verbose", required=False, action='store_true', help="prints verbose output")
# parse them
args = parser.parse_args()
REPO_PATH = args.path
verbose = args.verbose
macros_error, macros = validate_macros(REPO_PATH, verbose)
lookups_error, lookups = validate_lookups(REPO_PATH, verbose)
story_error = validate_story(REPO_PATH, verbose)
detection_error = validate_detection(REPO_PATH, verbose, macros, lookups)
investigation_error = validate_investigation(REPO_PATH, verbose, macros, lookups)
baseline_error = validate_baselines(REPO_PATH, verbose, macros, lookups)
if story_error:
sys.exit("Errors found")
elif detection_error:
sys.exit("Errors found")
elif investigation_error:
sys.exit("Errors found")
elif baseline_error:
sys.exit("Errors found")
elif macros_error:
sys.exit("Errors found")
elif lookups_error:
sys.exit("Errors found")
else:
print "No Errors found"
| 39.886221 | 141 | 0.630211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13,305 | 0.348198 |
2c86d57953ceb081ab010190a1fd6e152927560a | 8,829 | py | Python | py/sandbox.python.py | schaabs/sandbox | ee8abb2a8220ca841b9b5a2579c25d100a43eb4f | [
"MIT"
] | null | null | null | py/sandbox.python.py | schaabs/sandbox | ee8abb2a8220ca841b9b5a2579c25d100a43eb4f | [
"MIT"
] | 2 | 2018-02-01T19:58:53.000Z | 2018-02-23T00:50:18.000Z | py/sandbox.python.py | schaabs/sandbox | ee8abb2a8220ca841b9b5a2579c25d100a43eb4f | [
"MIT"
] | 1 | 2020-12-16T06:35:51.000Z | 2020-12-16T06:35:51.000Z | import hashlib
import shutil
import io
import gzip
import platform
import os
import struct
import json
import mmap
class ElfConst:
CLASS_32 = 1
CLASS_64 = 2
DATA_LE = 1
DATA_BE = 2
TYPE_RELOC = 1
TYPE_EXEC = 2
TYPE_SHARED = 3
TYPE_CORE = 4
class Layout:
ElfIdent = b'=4sBBBBBxxxxxxx'
ElfFileHeader32BE = b'>HHIIIIIHHHHHH'
ElfFileHeader32LE = b'<HHIIIIIHHHHHH'
ElfFileHeader64BE = b'>HHIQQQIHHHHHH'
ElfFileHeader64LE = b'<HHIQQQIHHHHHH'
ElfProgramHeader32BE = b'>IIIIIIII'
ElfProgramHeader32LE = b'<IIIIIIII'
ElfProgramHeader64BE = b'>IIQQQQQQ'
ElfProgramHeader64LE = b'<IIQQQQQQ'
ElfNoteHeader32BE = b'>III'
ElfNoteHeader32LE = b'<III'
ElfNoteHeader64BE = b'>III'
ElfNoteHeader64LE = b'<III'
class ExplicitLayout:
layout = None
def size(self):
if not self.layout:
return 0
return struct.calcsize(self.layout)
def _struct_unpack_from(self, file, offset=0):
file.seek(offset)
bytestr = file.read(self.size());
return struct.unpack(self.layout, bytestr)
class ElfIdent(ExplicitLayout):
magic = None
elfClass = None
elfData = None
fileVersion = None
fileAbi = None
abiVersion = None
def __init__(self):
self.layout = Layout.ElfIdent
def unpack_from(self, file, offset=0):
print 'offset=' + hex(offset) + ' size=' + hex(self.size())
(self.magic, self.elfClass, self.elfData, self.fileVersion, self.fileAbi, self.abiVersion) = self._struct_unpack_from(file, offset)
print self
def is_valid(self):
#if the magic string doesn't match the expected '\x7fELF' return false
return self.magic == '\x7fELF'
def __str__(self):
dict = {
'magic': self.magic,
'elfClass': hex(self.elfClass),
'elfData': hex(self.elfData),
'fileVersion': hex(self.fileVersion),
'fileAbi': hex(self.fileAbi),
'abiVersion': hex(self.abiVersion)
}
return json.dumps(dict)
class ElfFileHeader(ExplicitLayout):
type = None
machine = None
version = None
entry = None
phoff = None
shoff = None
flags = None
ehsize = None
phentsize = None
phnum = None
shentsize = None
shnum = None
shstrndx = None
def __init__(self, elfident):
if elfident.elfClass == ElfConst.CLASS_32:
if elfident.elfData == ElfConst.DATA_BE:
self.layout = Layout.ElfFileHeader32BE
elif elfident.elfData == ElfConst.DATA_LE:
self.layout = Layout.ElfFileHeader32LE
elif elfident.elfClass == ElfConst.CLASS_64:
if elfident.elfData == ElfConst.DATA_BE:
self.layout = Layout.ElfFileHeader64BE
elif elfident.elfData == ElfConst.DATA_LE:
self.layout = Layout.ElfFileHeader64LE
# returns the data at the specified offset as an ElfFileHeader
def unpack_from(self, file, offset):
print 'offset=' + hex(offset) + ' size=' + hex(self.size())
(self.type, self.machine, self.version, self.entry,
self.phoff, self.shoff, self.flags, self.ehsize,
self.phentsize, self.phnum, self.shentsize, self.shnum,
self.shstrndx) = self._struct_unpack_from(file, offset)
print self
def __str__(self):
dict = {
'type': hex(self.type),
'machine': hex(self.machine),
'version': hex(self.version),
'entry': hex(self.entry),
'phoff': hex(self.phoff),
'shoff': hex(self.shoff),
'flags': hex(self.flags),
'ehsize': hex(self.ehsize),
'phentsize': hex(self.phentsize),
'phnum': hex(self.phnum),
'shentsize': hex(self.shentsize),
'shnum': hex(self.shnum),
'shstrndx': hex(self.shstrndx)
}
return json.dumps(dict)
class ElfProgramHeader(ExplicitLayout):
type = None
offset = None
vaddr = None
paddr = None
filesz = None
memsz = None
flags = None
align = None
def __init__(self, elfident):
self._elfident = elfident
if elfident.elfClass == ElfConst.CLASS_32:
if elfident.elfData == ElfConst.DATA_BE:
self.layout = Layout.ElfProgramHeader32BE
elif elfident.elfData == ElfConst.DATA_LE:
self.layout = Layout.ElfProgramHeader32LE
elif elfident.elfClass == ElfConst.CLASS_64:
if elfident.elfData == ElfConst.DATA_BE:
self.layout = Layout.ElfProgramHeader64BE
elif elfident.elfData == ElfConst.DATA_LE:
self.layout = Layout.ElfProgramHeader64LE
def unpack_from(self, file, offset):
print 'offset=' + hex(offset) + ' size=' + hex(self.size())
if self._elfident.elfClass == ElfConst.CLASS_32:
(self.type, self.offset, self.vaddr, self.paddr,
self.filesz, self.memsz, self.flags, self.align) = self._struct_unpack_from(file, offset)
else:
(self.type, self.flags, self.offset, self.vaddr,
self.paddr, self.filesz, self.memsz, self.align) = self._struct_unpack_from(file, offset)
print self
def __str__(self):
str = ''.join([
'(type=', hex(self.type),
' offset=', hex(self.offset),
' vaddr=', hex(self.vaddr),
' paddr=', hex(self.paddr),
' filesz=', hex(self.filesz),
' memsz=', hex(self.memsz),
' flags=', hex(self.flags),
' align', hex(self.align), ')'
])
return str
class ElfNote:
noteHeader = None
name = None
descr = None
class ElfNoteHeader(ExplicitLayout):
namesz = None
descsz = None
type = None
def __init__(self, elfident):
if elfident.elfClass == ElfConst.CLASS_32:
if elfident.elfData == ElfConst.DATA_BE:
self.layout = Layout.ElfNoteHeader32BE
elif elfident.elfData == ElfConst.DATA_LE:
self.layout = Layout.ElfNoteHeader32LE
elif elfident.elfClass == ElfConst.CLASS_64:
if elfident.elfData == ElfConst.DATA_BE:
self.layout = Layout.ElfNoteHeader64BE
elif elfident.elfData == ElfConst.DATA_LE:
self.layout = Layout.ElfNoteHeader64LE
def unpack_from(self, file, offset):
(self.namesz, self.descsz, self.type) = self._struct_unpack_from(file, offset)
def __str__(self):
dict = {
'namesz': hex(self.namesz),
'descsz': hex(self.descsz),
'type': hex(self.type)
}
return json.dumps(dict)
class ElfFile:
ident = None
fileHeader = None
programHeaders = [ ]
notes = [ ]
@staticmethod
def unpack_from(file, offset=0):
elffile = ElfFile()
elffile.ident = ElfIdent()
elffile.ident.unpack_from(file, offset)
if not elffile.ident.is_valid():
return None
elffile.fileHeader = ElfFileHeader(elffile.ident)
elffile.fileHeader.unpack_from(file, offset + elffile.ident.size())
elffile._unpack_program_headers(file, offset)
return elffile
def _unpack_program_headers(self, file, offset):
for i in range(0, self.fileHeader.phnum):
ph = ElfProgramHeader(self.ident)
print offset + self.fileHeader.phoff + (i * self.fileHeader.phentsize)
ph.unpack_from(file, offset + self.fileHeader.phoff + (i * self.fileHeader.phentsize))
self.programHeaders.append(ph)
if ph.type == 4:
_unpack_notes(self, file, ph.offset, ph.offset + ph.filesz)
def __str__(self):
filestr = 'ident:\n' + str(self.ident) + '\nfileHeader:\n' + str(self.fileHeader) + '\nprogramHeaders:\n' + '\n'.join(str(ph) for ph in self.programHeaders)
return filestr
if __name__ == '__main__':
with open('libcoreclr.so', 'rb') as corefile:
print ''
print ''
print(ElfFile.unpack_from(corefile, 0))
| 30.030612 | 164 | 0.557028 | 8,469 | 0.959225 | 0 | 0 | 444 | 0.050289 | 0 | 0 | 704 | 0.079737 |
2c88f77b721f16bdbc0e12c2be60eceb519ef175 | 800 | py | Python | log_redaction_cli.py | kalaboster/strings | a0c7160af0715599721afce92e739283a556f80c | [
"Apache-2.0"
] | 1 | 2019-09-25T04:34:25.000Z | 2019-09-25T04:34:25.000Z | log_redaction_cli.py | kalaboster/strings | a0c7160af0715599721afce92e739283a556f80c | [
"Apache-2.0"
] | null | null | null | log_redaction_cli.py | kalaboster/strings | a0c7160af0715599721afce92e739283a556f80c | [
"Apache-2.0"
] | null | null | null | """log_redaction_cli
Usage:
log_redaction_cli.py --tarfile <tarfile> --working-dir <working-dir> --output-dir <output-dir>
log_redaction_cli.py (-h | --help)
log_redaction_cli.py --version
Options:
-h --help Pass in a string: example command: python log_redaction_cli.py --tarfile "test/files/test_output.tar.gz" --working-dir "/home/kalab/github/stringer/test/files" --output-dir log_redataction_example
--version v version
"""
from docopt import docopt
import stringer.utils.log_redaction_utils as log_redact
if __name__ == '__main__':
arguments = docopt(__doc__, version='0.0.9')
perm_list = log_redact.process_gz(file=arguments.get("<tarfile>"),working_dir=arguments.get("<working-dir>"), output_gz_dir=arguments.get("<output-dir>"))
print(str(perm_list))
| 33.333333 | 214 | 0.7325 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 509 | 0.63625 |
2c89750a4b91a2c0339c7248c5dedfa016626923 | 15,323 | py | Python | seq2seq.py | CAMeL-Lab/gender-reinflection | 006de318a326c8ea67d610adb30d3e0a8d6e59db | [
"MIT"
] | 4 | 2020-12-02T12:44:26.000Z | 2021-07-19T01:42:33.000Z | seq2seq.py | CAMeL-Lab/gender-reinflection | 006de318a326c8ea67d610adb30d3e0a8d6e59db | [
"MIT"
] | null | null | null | seq2seq.py | CAMeL-Lab/gender-reinflection | 006de318a326c8ea67d610adb30d3e0a8d6e59db | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import numpy as np
import torch.nn.functional as F
from attention import AdditiveAttention
class Encoder(nn.Module):
"""Encoder bi-GRU"""
def __init__(self, input_dim, char_embed_dim,
encoder_hidd_dim,
decoder_hidd_dim,
num_layers,
morph_embeddings=None,
fasttext_embeddings=None,
char_padding_idx=0,
word_padding_idx=0,
dropout=0):
super(Encoder, self).__init__()
morph_embeddings_dim = 0
self.morph_embedding_layer = None
fasttext_embeddings_dim = 0
self.fasttext_embedding_layer = None
self.char_embedding_layer = nn.Embedding(input_dim,
char_embed_dim,
padding_idx=char_padding_idx)
if morph_embeddings is not None:
self.morph_embedding_layer = nn.Embedding.from_pretrained(morph_embeddings,
padding_idx=word_padding_idx)
morph_embeddings_dim = morph_embeddings.shape[1]
if fasttext_embeddings is not None:
self.fasttext_embedding_layer = nn.Embedding.from_pretrained(fasttext_embeddings)
fasttext_embeddings_dim = fasttext_embeddings.shape[1]
self.rnn = nn.GRU(input_size=char_embed_dim + morph_embeddings_dim + fasttext_embeddings_dim,
hidden_size=encoder_hidd_dim,
num_layers=num_layers,
batch_first=True,
bidirectional=True,
dropout=dropout if num_layers > 1 else 0.0)
self.linear_map = nn.Linear(encoder_hidd_dim * 2, decoder_hidd_dim)
def forward(self, char_src_seqs, word_src_seqs, src_seqs_lengths):
embedded_seqs = self.char_embedding_layer(char_src_seqs)
# embedded_seqs shape: [batch_size, max_src_seq_len, char_embed_dim]
# Add morph embeddings to the char embeddings if needed
if self.morph_embedding_layer is not None:
embedded_word_seqs_morph = self.morph_embedding_layer(word_src_seqs)
# embedded_word_seqs_morph shape: [batch_size, max_src_seq_len, morph_embeddings_dim]
embedded_seqs = torch.cat((embedded_seqs, embedded_word_seqs_morph), dim=2)
# embedded_seqs shape: [batch_size, max_src_seq_len, char_embed_dim + morph_embeddings_dim]
# Add fasttext embeddings to the char embeddings if needed
if self.fasttext_embedding_layer is not None:
embedded_word_seqs_ft = self.fasttext_embedding_layer(word_src_seqs)
# embedded_word_seqs_ft shape: [batch_size, max_src_seq_len, fasttext_embeddings_dim]
embedded_seqs = torch.cat((embedded_seqs, embedded_word_seqs_ft), dim=2)
# embedded_seqs shape: [batch_size, max_src_seq_len, char_embed_dim + fasttext_embeddings_dim]
# packing the embedded_seqs
packed_embedded_seqs = pack_padded_sequence(embedded_seqs, src_seqs_lengths, batch_first=True)
output, hidd = self.rnn(packed_embedded_seqs)
# hidd shape: [num_layers * num_dirs, batch_size, encoder_hidd_dim]
# concatenating the forward and backward vectors for each layer
hidd = torch.cat([hidd[0:hidd.size(0):2], hidd[1:hidd.size(0):2]], dim=2)
# hidd shape: [num layers, batch_size, num_directions * encoder_hidd_dim]
# mapping the encode hidd state to the decoder hidd dim space
hidd = torch.tanh(self.linear_map(hidd))
# unpacking the output
output, lengths = pad_packed_sequence(output, batch_first=True)
# output shape: [batch_size, src_seqs_length, num_dirs * encoder_hidd_dim]
return output, hidd
class Decoder(nn.Module):
"""Decoder GRU
Things to note:
- The input to the decoder rnn at each time step is the
concatenation of the embedded token and the context vector
- The context vector will have a size of batch_size, encoder_hidd_dim * 2
- The prediction layer input is the concatenation of
the context vector and the h_t of the decoder
"""
def __init__(self, input_dim, char_embed_dim,
decoder_hidd_dim, num_layers,
output_dim,
encoder_hidd_dim,
padding_idx=0,
embed_trg_gender=False,
gender_embeddings=None,
gender_input_dim=0,
gender_embed_dim=0,
dropout=0):
super(Decoder, self).__init__()
self.attention = AdditiveAttention(encoder_hidd_dim=encoder_hidd_dim,
decoder_hidd_dim=decoder_hidd_dim)
self.gender_embedding_layer = None
if embed_trg_gender:
if gender_embeddings is None:
self.gender_embedding_layer = nn.Embedding(gender_input_dim, gender_embed_dim)
else:
self.gender_embedding_layer = nn.Embedding.from_pretrained(gender_embeddings)
self.char_embedding_layer = nn.Embedding(input_dim,
char_embed_dim,
padding_idx=padding_idx)
# the input to the rnn is the context_vector + embedded token --> embed_dim + hidd_dim
self.rnn = nn.GRU(input_size=char_embed_dim + encoder_hidd_dim * 2,
hidden_size=decoder_hidd_dim,
num_layers=num_layers,
batch_first=True,
dropout=dropout if num_layers > 1 else 0.0)
# the input to the classifier is h_t + context_vector + gender_embed_dim? --> hidd_dim * 2
self.classification_layer = nn.Linear(encoder_hidd_dim * 2
+ decoder_hidd_dim * num_layers
+ gender_embed_dim + char_embed_dim, output_dim)
self.dropout_layer = nn.Dropout(dropout)
def forward(self, trg_seqs, encoder_outputs, decoder_h_t, context_vectors,
attention_mask, trg_gender=None):
# trg_seqs shape: [batch_size]
batch_size = trg_seqs.shape[0]
trg_seqs = trg_seqs.unsqueeze(1)
# trg_seqs shape: [batch_size, 1]
# Step 1: embedding the target seqs
embedded_seqs = self.char_embedding_layer(trg_seqs)
# embedded_seqs shape: [batch_size, 1, embed_dim]
# context_vectors shape: [batch_size, encoder_hidd_dim * 2]
# changing shape to: [batch_size, 1, encoder_hidd_dim * 2]
context_vectors = context_vectors.unsqueeze(1)
# concatenating the embedded trg sequence with the context_vectors
rnn_input = torch.cat((embedded_seqs, context_vectors), dim=2)
# rnn_input shape: [batch_size, 1, embed_dim + encoder_hidd_dim * 2]
# Step 2: feeding the input to the rnn and updating the decoder_h_t
decoder_output, decoder_h_t = self.rnn(rnn_input, decoder_h_t)
# decoder output shape: [batch_size, 1, num_dirs * hidd_dim]
# decoder_h_t shape: [num_layers * num_dirs, batch_size, hidd_dim]
# Step 3: updating the context vectors through attention
context_vectors, atten_scores = self.attention(keys=encoder_outputs,
query=decoder_h_t,
mask=attention_mask)
# Step 4: get the prediction vector
# embed trg gender info if needed
if self.gender_embedding_layer is not None:
embedded_trg_gender = self.gender_embedding_layer(trg_gender)
# embedded_trg_gender shape: [batch_size, gender_embed_dim]
# concatenating decoder_h_t, context_vectors, and the
# embedded_trg_gender to create a prediction vector
if self.rnn.num_layers == 1:
assert decoder_output.squeeze(1).eq(decoder_h_t.view(decoder_h_t.shape[1], -1)).all().item()
predictions_vector = torch.cat((decoder_h_t.view(decoder_h_t.shape[1], -1),
context_vectors, embedded_trg_gender,
embedded_seqs.squeeze(1)), dim=1)
# predictions_vector: [batch_size, hidd_dim + encoder_hidd_dim * 2 + gender_embed_dim]
else:
# concatenating decoder_h_t with context_vectors to
# create a prediction vector
predictions_vector = torch.cat((decoder_h_t.view(decoder_h_t.shape[1], -1),
context_vectors, embedded_seqs.squeeze(1)), dim=1)
# predictions_vector: [batch_size, hidd_dim + encoder_hidd_dim * 2]
# Step 5: feeding the prediction vector to the fc layer
# to a make a prediction
# apply dropout if needed
predictions_vector = self.dropout_layer(predictions_vector)
prediction = self.classification_layer(predictions_vector)
# prediction shape: [batch_size, output_dim]
return prediction, decoder_h_t, atten_scores, context_vectors
class Seq2Seq(nn.Module):
"""Seq2Seq model"""
def __init__(self, encoder_input_dim, encoder_embed_dim,
encoder_hidd_dim, encoder_num_layers,
decoder_input_dim, decoder_embed_dim,
decoder_hidd_dim, decoder_num_layers,
decoder_output_dim,
morph_embeddings=None, fasttext_embeddings=None,
gender_embeddings=None,
embed_trg_gender=False, gender_input_dim=0,
gender_embed_dim=0, char_src_padding_idx=0,
word_src_padding_idx=0, trg_padding_idx=0,
dropout=0, trg_sos_idx=2):
super(Seq2Seq, self).__init__()
self.encoder = Encoder(input_dim=encoder_input_dim,
char_embed_dim=encoder_embed_dim,
encoder_hidd_dim=encoder_hidd_dim,
decoder_hidd_dim=decoder_hidd_dim,
num_layers=encoder_num_layers,
morph_embeddings=morph_embeddings,
fasttext_embeddings=fasttext_embeddings,
char_padding_idx=char_src_padding_idx,
word_padding_idx=word_src_padding_idx,
dropout=dropout)
self.decoder = Decoder(input_dim=decoder_input_dim,
char_embed_dim=decoder_embed_dim,
decoder_hidd_dim=decoder_hidd_dim,
num_layers=decoder_num_layers,
encoder_hidd_dim=encoder_hidd_dim,
output_dim=decoder_input_dim,
padding_idx=trg_padding_idx,
embed_trg_gender=embed_trg_gender,
gender_input_dim=gender_input_dim,
gender_embed_dim=gender_embed_dim,
gender_embeddings=gender_embeddings,
dropout=dropout)
self.char_src_padding_idx = char_src_padding_idx
self.trg_sos_idx = trg_sos_idx
self.sampling_temperature = 3
def create_mask(self, src_seqs, src_padding_idx):
mask = (src_seqs != src_padding_idx)
return mask
def forward(self, char_src_seqs, word_src_seqs, src_seqs_lengths, trg_seqs,
trg_gender=None, teacher_forcing_prob=0.3):
# trg_seqs shape: [batch_size, trg_seqs_length]
# reshaping to: [trg_seqs_length, batch_size]
trg_seqs = trg_seqs.permute(1, 0)
trg_seqs_length, batch_size = trg_seqs.shape
# passing the src to the encoder
encoder_outputs, encoder_hidd = self.encoder(char_src_seqs, word_src_seqs, src_seqs_lengths)
# creating attention masks
attention_mask = self.create_mask(char_src_seqs, self.char_src_padding_idx)
predictions = []
decoder_attention_scores = []
# initializing the trg_seqs to <s> token
y_t = torch.ones(batch_size, dtype=torch.long) * self.trg_sos_idx
# intializing the context_vectors to zero
context_vectors = torch.zeros(batch_size, self.encoder.rnn.hidden_size * 2)
# context_vectors shape: [batch_size, encoder_hidd_dim * 2]
# initializing the hidden state of the decoder to the encoder hidden state
decoder_h_t = encoder_hidd
# decoder_h_t shape: [batch_size, decoder_hidd_dim]
# moving y_t and context_vectors to the right device
y_t = y_t.to(encoder_hidd.device)
context_vectors = context_vectors.to(encoder_hidd.device)
for i in range(0, trg_seqs_length):
teacher_forcing = np.random.random() < teacher_forcing_prob
# if teacher_forcing, use ground truth target tokens
# as an input to the decoder
if teacher_forcing:
y_t = trg_seqs[i]
# do a single decoder step
prediction, decoder_h_t, atten_scores, context_vectors = self.decoder(trg_seqs=y_t,
trg_gender=trg_gender,
encoder_outputs=encoder_outputs,
decoder_h_t=decoder_h_t,
context_vectors=context_vectors,
attention_mask=attention_mask)
# If not teacher force, use the maximum
# prediction as an input to the decoder in
# the next time step
if not teacher_forcing:
# we multiply the predictions with a sampling_temperature
# to make the probablities peakier, so we can be confident about the
# maximum prediction
pred_output_probs = F.softmax(prediction * self.sampling_temperature, dim=1)
y_t = torch.argmax(pred_output_probs, dim=1)
predictions.append(prediction)
decoder_attention_scores.append(atten_scores)
predictions = torch.stack(predictions)
# predictions shape: [trg_seq_len, batch_size, output_dim]
predictions = predictions.permute(1, 0, 2)
# predictions shape: [batch_size, trg_seq_len, output_dim]
decoder_attention_scores = torch.stack(decoder_attention_scores)
# attention_scores_total shape: [trg_seq_len, batch_size, src_seq_len]
decoder_attention_scores = decoder_attention_scores.permute(1, 0, 2)
# attention_scores_total shape: [batch_size, trg_seq_len, src_seq_len]
return predictions, decoder_attention_scores
| 47.003067 | 114 | 0.607714 | 15,116 | 0.986491 | 0 | 0 | 0 | 0 | 0 | 0 | 3,867 | 0.252366 |
2c8a508e2824287924c65736a4d078672526b373 | 2,319 | py | Python | cowkit/qcow_header.py | Junzki/cowkit | ae1f6915322b0a5b7a67b7c50111265221e443f8 | [
"BSD-3-Clause"
] | null | null | null | cowkit/qcow_header.py | Junzki/cowkit | ae1f6915322b0a5b7a67b7c50111265221e443f8 | [
"BSD-3-Clause"
] | null | null | null | cowkit/qcow_header.py | Junzki/cowkit | ae1f6915322b0a5b7a67b7c50111265221e443f8 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding:utf-8 -*-
"""
https://people.gnome.org/~markmc/qcow-image-format.html
"""
import dataclasses
import struct
from typing import List
QCOW_HEADER_SIZE = 72
QCOW_MAGIC = b'QFI\xfb'
QCOW_HEADER_GROUP_META = 8
QCOW_HEADER_GROUP_BACKING_FILE = 12
QCOW_HEADER_GROUP_SIZE = 16
QCOW_HEADER_GROUP_L1 = 12
QCOW_HEADER_GROUP_REFCOUNT = 12
QCOW_HEADER_GROUP_SNAPSHOT = 12
QCOW_HEADER_SEG = (
QCOW_HEADER_GROUP_META,
QCOW_HEADER_GROUP_BACKING_FILE,
QCOW_HEADER_GROUP_SIZE,
QCOW_HEADER_GROUP_L1,
QCOW_HEADER_GROUP_REFCOUNT,
QCOW_HEADER_GROUP_SNAPSHOT
)
@dataclasses.dataclass
class QCowHeader:
magic: bytes = QCOW_MAGIC
version: int = 1
backing_file_offset: int = 0
backing_file_size: int = 0
cluster_bits: int = 0
size: int = 0
crypt_method: int = 0
l1_size: int = 0
l1_table_offset: int = 0
refcount_table_offset: int = 0
refcount_table_clusters: int = 0
nb_snapshots: int = 0
snapshots_offset: int = 0
def qcow_guess_type(header: bytes) -> bool:
magic = header[:len(QCOW_MAGIC)]
return QCOW_MAGIC == magic
def qcow_split_header(header: bytes) -> List[bytes]:
slices = list()
ptr = 0
for seg in QCOW_HEADER_SEG:
next_ = ptr + seg
s = header[ptr:next_]
ptr = next_
slices.append(s)
return slices
def qcow_parse_header(header: bytes) -> QCowHeader:
if not qcow_guess_type(header):
raise ValueError("Not a QCOW Image")
slices = qcow_split_header(header)
meta_, backing_, size_, l1_, refcount_, snapshot_ = slices
_, version_ = struct.unpack('>II', meta_)
backing_file_offset, backing_file_size = struct.unpack('>QI', backing_)
cluster_bits, size, crypt_method = struct.unpack('>IQI', size_)
l1_size, l1_table_offset = struct.unpack('>IQ', l1_)
refcount_table_offset, refcount_table_clusters = struct.unpack('>QI', refcount_)
nb_snapshots, snapshots_offset = struct.unpack('>IQ', snapshot_)
return QCowHeader(QCOW_MAGIC, version_,
backing_file_offset, backing_file_size,
cluster_bits, size, crypt_method,
l1_size, l1_table_offset,
refcount_table_offset, refcount_table_clusters,
nb_snapshots, snapshots_offset)
| 25.483516 | 84 | 0.68564 | 385 | 0.16602 | 0 | 0 | 408 | 0.175938 | 0 | 0 | 145 | 0.062527 |
2c8bdc178f22c281aadc681cca47bedaa07c3f51 | 1,901 | py | Python | faddr/cli.py | kido5217/faddr | 116c789ac3985cc3f461203e249d8043dcb73428 | [
"MIT"
] | 1 | 2022-03-10T17:52:13.000Z | 2022-03-10T17:52:13.000Z | faddr/cli.py | kido5217/faddr | 116c789ac3985cc3f461203e249d8043dcb73428 | [
"MIT"
] | 45 | 2021-08-08T15:23:42.000Z | 2022-03-28T20:23:57.000Z | faddr/cli.py | kido5217/faddr | 116c789ac3985cc3f461203e249d8043dcb73428 | [
"MIT"
] | 1 | 2021-10-22T00:46:35.000Z | 2021-10-22T00:46:35.000Z | """CLI entry points of faddr."""
import argparse
import pathlib
import sys
from faddr import logger
from faddr.config import load_config
from faddr.rancid import RancidDir
from faddr.database import Database
def parse_args_db():
"""Parsing CMD keys."""
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
parser.add_argument(
"-c",
"--confguration-file",
help="Faddr file configuration location",
)
parser.add_argument(
"-r",
"--rancid-dir",
help="Rancid basedir location",
)
parser.add_argument(
"-g",
"--rancid-groups",
help="Rancid groups to parse, separated by coma(,)",
)
parser.add_argument(
"-d",
"--database-dir",
help="Database dir location",
)
parser.add_argument(
"-f",
"--database-file",
help="Database file name",
)
args = parser.parse_args()
return vars(args)
def faddr_db():
"""Parsing devices' config files and writing data to database."""
args = parse_args_db()
logger.debug(f"Arguments from CMD: {args}")
config = load_config(cmd_args=args)
rancid = RancidDir(config.rancid.dir)
database = Database(
pathlib.Path(config.database.dir) / pathlib.Path(config.database.file)
)
if not rancid.is_valid():
error = (
f'"{config.rancid.dir}" is not a valid rancid BASEDIR '
"or was not properly initialised with rancid-csv utility"
)
logger.error(error)
sys.exit(1)
# Get groups list found in rancid's base dir
groups = ("group1", "group2")
logger.debug(f"Found rancid groups: {groups}")
for group in groups:
logger.debug(f"Parsing devices in group {group}")
data = rancid.parse_configs(group)
if len(data) > 0:
database.insert(data)
| 23.7625 | 78 | 0.611783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 642 | 0.337717 |
2c8eec45cea80bea499216987a3e5392f86cf4b2 | 3,100 | py | Python | features/FeatureExtract.py | wangqiuli0102/Class-Aware-Multi-Window-Adversarial-Lung-Nodule-Synthesis | 91a88ae2b9012f2bfb915d6f67ac613796b9b006 | [
"MIT"
] | null | null | null | features/FeatureExtract.py | wangqiuli0102/Class-Aware-Multi-Window-Adversarial-Lung-Nodule-Synthesis | 91a88ae2b9012f2bfb915d6f67ac613796b9b006 | [
"MIT"
] | null | null | null | features/FeatureExtract.py | wangqiuli0102/Class-Aware-Multi-Window-Adversarial-Lung-Nodule-Synthesis | 91a88ae2b9012f2bfb915d6f67ac613796b9b006 | [
"MIT"
] | null | null | null | '''
Created by Wang Qiuli
2020/5/23
'''
import cv2
import numpy as np
import matplotlib.pyplot as plt
import slic
import rgb_gr
from PIL import Image
from skimage import io, color
from skimage import data, filters
def truncate_hu(image_array, max = 400, min = -900):
image = image_array.copy()
image[image > max] = max
image[image < min] = min
return image
def hist(img):
'''
return histgram values
1 * 128
'''
img = truncate_hu(img)
hist = cv2.calcHist([img],[0],None,[128],[-900,400])
# print(hist.shape)
# plt.subplot(121)
# plt.imshow(img,'gray')
# plt.xticks([])
# plt.yticks([])
# plt.title("Original")
# plt.subplot(122)
# plt.hist(img.ravel(),128,[-900,400])
# plt.show()
return hist
def gray2rgb(rgb,imggray):
R = rgb[:,:,0]
G = rgb[:,:,1]
B = ((imggray) - 0.299 * R - 0.587 * G) / 0.114
grayRgb = np.zeros((rgb.shape))
grayRgb[:, :, 2] = B
grayRgb[:, :, 0] = R
grayRgb[:, :, 1] = G
return grayRgb
def super_pixel(img):
'''
return super_pixel images
img w * h
'''
img = truncate_hu(img)
# io.imsave('ori.png', img)
img = np.expand_dims(img, 2)
# # print(img.shape)
rgb = np.concatenate((img, img, img), 2)
# io.imsave('ori2.png', rgb)
obj = slic.SLICProcessor(rgb, 4096, 5)
res = obj.iterate_10times()
return res
def standard_deviation(img):
hist_value = hist(img)
std = np.std(hist_value)
# print(std)
return std
def edge_detection(img):
'''
edge detection
'''
img = truncate_hu(img)
# io.imsave('ori.png', img)
# img = np.expand_dims(img, 2)
# # # print(img.shape)
# rgb = np.concatenate((img, img, img), 2)
# gray= cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
x = cv2.Sobel(img, cv2.CV_16S, 1, 0)
y = cv2.Sobel(img, cv2.CV_16S, 0, 1)
z = cv2.Sobel(img, cv2.CV_16S, 1, 1)
return x
# io.imsave('canny1.png', x)
# io.imsave('canny2.png', y)
# io.imsave('canny3.png', z)
def gabor(img):
filt_real, filt_imag = filters.gabor(img,frequency=0.6)
# io.imsave('filt_imag.png', filt_imag)
return filt_imag
def threshold_void(img):
void = truncate_hu(img, -600, -900)
# io.imsave('void.png', void)
return void
def normalization(image_array):
image_array = image_array + 900
max = 1300
min = 0
image_array = (image_array-min)/(max-min) # float cannot apply the compute,or array error will occur
# avg = image_array.mean()
# image_array = image_array-avg
image_array = image_array * 255
return image_array # a bug here, a array must be returned,directly appling function did't work
def toGrey(img):
'''
get grey-level images
0-256
'''
img = truncate_hu(img)
img_nor = normalization(img)
# io.imsave('img_nor.png', img_nor)
return img_nor
def OTSU(img):
gray = toGrey(img)
print(gray)
gray.dtype="int16"
print('int16')
print(gray)
retval, dst = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU)
io.imsave('OTSU.png', dst)
| 21.985816 | 105 | 0.604194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,064 | 0.343226 |
2c8f7f3827a6fb08393ec353db2c4a501d7810bc | 2,713 | py | Python | examples/plotting/plotting_real_indexed_by_date.py | krzpiesiewicz/timeseries | 4f72de240256bc0d42cf3a24d0f7b0fd902c525f | [
"MIT"
] | 1 | 2021-08-20T10:17:18.000Z | 2021-08-20T10:17:18.000Z | examples/plotting/plotting_real_indexed_by_date.py | krzpiesiewicz/timeseries | 4f72de240256bc0d42cf3a24d0f7b0fd902c525f | [
"MIT"
] | null | null | null | examples/plotting/plotting_real_indexed_by_date.py | krzpiesiewicz/timeseries | 4f72de240256bc0d42cf3a24d0f7b0fd902c525f | [
"MIT"
] | null | null | null | import pandas as pd
from matplotlib import dates as mdates
from timeseries import plot_ts
from timeseries.plotting import ax_settings
def main():
# .csv available to download at:
# https://www.investing.com/currencies/gbp-usd-historical-data
gpd_usd_data = pd.read_csv("../data/GBP_USD Historical Data_monthly.csv")
gpd_usd_data["Change %"] = gpd_usd_data["Change %"].apply(
lambda s: float(s[:-1]))
gpd_usd_data["Date"] = pd.to_datetime(gpd_usd_data.Date, format="%b %y")
gpd_usd_data = gpd_usd_data.reindex(
index=gpd_usd_data.index[::-1]).set_index(
gpd_usd_data.index
)
gpd_usd_data
plot_ts(
gpd_usd_data["Change %"],
index_values=gpd_usd_data.Date,
name="GPB in USD",
title="GPB/USD Exchange Rates",
).show()
plot_ts(
[gpd_usd_data.Price, gpd_usd_data["Change %"]],
index=range(100),
index_values=gpd_usd_data.Date[range(100)],
title="GPB/USD Exchange Rates",
round_dates="Y",
color="darkred",
major_xticks_loc=mdates.YearLocator(base=1),
date_fmt=mdates.DateFormatter("%Y"),
).show()
plot_ts(
gpd_usd_data["Change %"],
index=gpd_usd_data.Date,
name="GPB in USD",
title="GPB/USD Exchange Rates",
engine="plotly",
# legend_pos="bottom",
legend_pos="top",
).show()
plot_ts(
[gpd_usd_data.Price, gpd_usd_data["Change %"]],
index=range(200),
index_values=gpd_usd_data.Date[range(200)],
title="GPB/USD Exchange Rates",
).show()
plot_ts(
[gpd_usd_data.Price, gpd_usd_data["Change %"]],
index=gpd_usd_data.Date[range(200)],
title="GPB/USD Exchange Rates",
engine="plotly",
).show()
plot_ts(
gpd_usd_data[["Price", "Change %"]],
index=gpd_usd_data.Date[range(100)],
title="GPB/USD Exchange Rates",
name="GPB in USD",
color="darkred",
engine="plotly",
showlegend=True,
).show()
# COVID
# .csv available to download at:
# https://ourworldindata.org/explorers/coronavirus-data-explorer
covid_data = pd.read_csv("../data/covid-data.csv")
covid_data["date"] = pd.to_datetime(covid_data["date"], format="%Y-%m-%d")
covid_data.set_index("date", inplace=True)
covid_data.sort_index(ascending=True, inplace=True)
loc = "Argentina"
ts = covid_data[covid_data.location == loc]["new_cases"]
ts = ts[~ts.isnull()]
ts = ts[~(ts == 0)]
fig = plot_ts(ts, title=f"Covid-19 {loc}", color="tab:blue")
ax_settings(fig=fig, yscale="log")
fig.show()
if __name__ == "__main__":
main()
| 29.172043 | 78 | 0.613712 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 718 | 0.264652 |
2c8fca97374d20495bf9555941161fa0c29cbc21 | 12,120 | py | Python | tests/tck/conftest.py | tom-chensf/nebula-graph | 0f2f0d02879bfd2421815a26158e8fa030f19b62 | [
"Apache-2.0"
] | null | null | null | tests/tck/conftest.py | tom-chensf/nebula-graph | 0f2f0d02879bfd2421815a26158e8fa030f19b62 | [
"Apache-2.0"
] | null | null | null | tests/tck/conftest.py | tom-chensf/nebula-graph | 0f2f0d02879bfd2421815a26158e8fa030f19b62 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License,
# attached with Common Clause Condition 1.0, found in the LICENSES directory.
import functools
import os
import time
import pytest
import io
import csv
import re
from nebula2.common.ttypes import Value
from nebula2.graph.ttypes import ErrorCode
from pytest_bdd import given, parsers, then, when
from tests.common.dataset_printer import DataSetPrinter
from tests.common.comparator import DataSetComparator
from tests.common.plan_differ import PlanDiffer
from tests.common.configs import DATA_DIR
from tests.common.types import SpaceDesc
from tests.common.utils import (
create_space,
load_csv_data,
space_generator,
check_resp,
response,
)
from tests.tck.utils.table import dataset, table
from tests.tck.utils.nbv import murmurhash2
parse = functools.partial(parsers.parse)
rparse = functools.partial(parsers.re)
example_pattern = re.compile(r"<(\w+)>")
def normalize_outline_scenario(request, name):
for group in example_pattern.findall(name):
fixval = request.getfixturevalue(group)
name = name.replace(f"<{group}>", fixval)
return name
@pytest.fixture
def graph_spaces():
return dict(result_set=None)
@given(parse('a graph with space named "{space}"'))
def preload_space(
request,
space,
load_nba_data,
load_nba_int_vid_data,
load_student_data,
session,
graph_spaces,
):
space = normalize_outline_scenario(request, space)
if space == "nba":
graph_spaces["space_desc"] = load_nba_data
elif space == "nba_int_vid":
graph_spaces["space_desc"] = load_nba_int_vid_data
elif space == "student":
graph_spaces["space_desc"] = load_student_data
else:
raise ValueError(f"Invalid space name given: {space}")
stmt = f'USE {space};'
response(session, stmt)
@given("an empty graph")
def empty_graph(session, graph_spaces):
pass
@given(parse("having executed:\n{query}"))
def having_executed(query, session, request):
ngql = " ".join(query.splitlines())
ngql = normalize_outline_scenario(request, ngql)
response(session, ngql)
@given(parse("create a space with following options:\n{options}"))
def new_space(request, options, session, graph_spaces):
lines = csv.reader(io.StringIO(options), delimiter="|")
opts = {
line[1].strip(): normalize_outline_scenario(request, line[2].strip())
for line in lines
}
name = "EmptyGraph_" + space_generator()
space_desc = SpaceDesc(
name=opts.get("name", name),
partition_num=int(opts.get("partition_num", 7)),
replica_factor=int(opts.get("replica_factor", 1)),
vid_type=opts.get("vid_type", "FIXED_STRING(30)"),
charset=opts.get("charset", "utf8"),
collate=opts.get("collate", "utf8_bin"),
)
create_space(space_desc, session)
graph_spaces["space_desc"] = space_desc
graph_spaces["drop_space"] = True
@given(parse('load "{data}" csv data to a new space'))
def import_csv_data(request, data, graph_spaces, session, pytestconfig):
data_dir = os.path.join(DATA_DIR, normalize_outline_scenario(request, data))
space_desc = load_csv_data(
pytestconfig,
session,
data_dir,
"I" + space_generator(),
)
assert space_desc is not None
graph_spaces["space_desc"] = space_desc
graph_spaces["drop_space"] = True
def exec_query(request, ngql, session, graph_spaces):
ngql = normalize_outline_scenario(request, ngql)
graph_spaces['result_set'] = session.execute(ngql)
graph_spaces['ngql'] = ngql
@when(parse("executing query:\n{query}"))
def executing_query(query, graph_spaces, session, request):
ngql = " ".join(query.splitlines())
exec_query(request, ngql, session, graph_spaces)
@when(parse("profiling query:\n{query}"))
def profiling_query(query, graph_spaces, session, request):
ngql = "PROFILE {" + " ".join(query.splitlines()) + "}"
exec_query(request, ngql, session, graph_spaces)
@given(parse("wait {secs:d} seconds"))
@when(parse("wait {secs:d} seconds"))
@then(parse("wait {secs:d} seconds"))
def wait(secs):
time.sleep(secs)
def line_number(steps, result):
for step in steps:
res_lines = result.split('\n')
if all(l in r for (l, r) in zip(res_lines, step.lines)):
return step.line_number
return -1
# IN literal `1, 2, 3...'
def parse_list(s: str):
numbers = s.split(',')
numbers_list = []
for num in numbers:
numbers_list.append(int(num))
return numbers_list
def hash_columns(ds, hashed_columns):
if len(hashed_columns) == 0:
return ds
for col in hashed_columns:
assert col < len(ds.column_names), "The hashed column should in range."
for row in ds.rows:
for col in hashed_columns:
if row.values[col].getType() != Value.NVAL and row.values[col].getType() != Value.__EMPTY__:
row.values[col] = Value(iVal = murmurhash2(row.values[col]))
return ds
def cmp_dataset(
request,
graph_spaces,
result,
order: bool,
strict: bool,
included=False,
hashed_columns = [],
) -> None:
rs = graph_spaces['result_set']
ngql = graph_spaces['ngql']
check_resp(rs, ngql)
space_desc = graph_spaces.get('space_desc', None)
vid_fn = None
if space_desc is not None:
vid_fn = murmurhash2 if space_desc.vid_type == 'int' else None
ds = dataset(
table(result, lambda x: normalize_outline_scenario(request, x)),
graph_spaces.get("variables", {}),
)
ds = hash_columns(ds, hashed_columns)
dscmp = DataSetComparator(strict=strict,
order=order,
included=included,
decode_type=rs._decode_type,
vid_fn=vid_fn)
def dsp(ds):
printer = DataSetPrinter(rs._decode_type, vid_fn=vid_fn)
return printer.ds_to_string(ds)
def rowp(ds, i):
if i is None or i < 0:
return ""
assert i < len(ds.rows), f"{i} out of range {len(ds.rows)}"
row = ds.rows[i].values
printer = DataSetPrinter(rs._decode_type, vid_fn=vid_fn)
ss = printer.list_to_string(row, delimiter='|')
return f'{i}: |' + ss + '|'
if rs._data_set_wrapper is None:
assert not ds.column_names and not ds.rows, f"Expected result must be empty table: ||"
rds = rs._data_set_wrapper._data_set
res, i = dscmp(rds, ds)
if not res:
scen = request.function.__scenario__
feature = scen.feature.rel_filename
location = f"{feature}:{line_number(scen._steps, result)}"
msg = [
f"Fail to exec: {ngql}",
f"Response: {dsp(rds)}",
f"Expected: {dsp(ds)}",
f"NotFoundRow: {rowp(ds, i)}",
f"Location: {location}",
f"Space: {str(space_desc)}",
f"vid_fn: {vid_fn}",
]
assert res, "\n".join(msg)
@then(parse("define some list variables:\n{text}"))
def define_list_var_alias(text, graph_spaces):
tbl = table(text)
graph_spaces["variables"] = {
column: "[" + ",".join(row[i] for row in tbl['rows'] if row[i]) + "]"
for i, column in enumerate(tbl['column_names'])
}
@then(parse("the result should be, in order:\n{result}"))
def result_should_be_in_order(request, result, graph_spaces):
cmp_dataset(request, graph_spaces, result, order=True, strict=True)
@then(parse("the result should be, in order, and the columns {hashed_columns} should be hashed:\n{result}"))
def result_should_be_in_order_and_hash(request, result, graph_spaces, hashed_columns):
cmp_dataset(request, graph_spaces, result, order=True, strict=True, hashed_columns=parse_list(hashed_columns))
@then(parse("the result should be, in order, with relax comparison:\n{result}"))
def result_should_be_in_order_relax_cmp(request, result, graph_spaces):
cmp_dataset(request, graph_spaces, result, order=True, strict=False)
@then(parse("the result should be, in order, with relax comparison, and the columns {hashed_columns} should be hashed:\n{result}"))
def result_should_be_in_order_relax_cmp_and_hash(request, result, graph_spaces, hashed_columns):
cmp_dataset(request, graph_spaces, result, order=True, strict=False, hashed_columns=parse_list(hashed_columns))
@then(parse("the result should be, in any order:\n{result}"))
def result_should_be(request, result, graph_spaces):
cmp_dataset(request, graph_spaces, result, order=False, strict=True)
@then(parse("the result should be, in any order, and the columns {hashed_columns} should be hashed:\n{result}"))
def result_should_be_and_hash(request, result, graph_spaces, hashed_columns):
cmp_dataset(request, graph_spaces, result, order=False, strict=True, hashed_columns=parse_list(hashed_columns))
@then(parse("the result should be, in any order, with relax comparison:\n{result}"))
def result_should_be_relax_cmp(request, result, graph_spaces):
cmp_dataset(request, graph_spaces, result, order=False, strict=False)
@then(parse("the result should be, in any order, with relax comparison, and the columns {hashed_columns} should be hashed:\n{result}"))
def result_should_be_relax_cmp_and_hash(request, result, graph_spaces, hashed_columns):
cmp_dataset(request, graph_spaces, result, order=False, strict=False, hashed_columns=parse_list(hashed_columns))
@then(parse("the result should include:\n{result}"))
def result_should_include(request, result, graph_spaces):
cmp_dataset(request,
graph_spaces,
result,
order=False,
strict=True,
included=True)
@then(parse("the result should include, and the columns {hashed_columns} should be hashed:\n{result}"))
def result_should_include_and_hash(request, result, graph_spaces, hashed_columns):
cmp_dataset(request,
graph_spaces,
result,
order=False,
strict=True,
included=True,
hashed_columns=parse_list(hashed_columns))
@then("no side effects")
def no_side_effects():
pass
@then("the execution should be successful")
def execution_should_be_succ(graph_spaces):
rs = graph_spaces["result_set"]
stmt = graph_spaces["ngql"]
check_resp(rs, stmt)
@then(rparse(r"a (?P<err_type>\w+) should be raised at (?P<time>runtime|compile time)(?P<sym>:|.)(?P<msg>.*)"))
def raised_type_error(err_type, time, sym, msg, graph_spaces):
res = graph_spaces["result_set"]
ngql = graph_spaces['ngql']
assert not res.is_succeeded(), f"Response should be failed: ngql:{ngql}"
err_type = err_type.strip()
msg = msg.strip()
res_msg = res.error_msg()
if res.error_code() == ErrorCode.E_EXECUTION_ERROR:
assert err_type == "ExecutionError", f'Error code mismatch, ngql:{ngql}"'
expect_msg = "{}".format(msg)
else:
expect_msg = "{}: {}".format(err_type, msg)
m = res_msg.startswith(expect_msg)
assert m, f'Could not find "{expect_msg}" in "{res_msg}" when execute query: "{ngql}"'
@then("drop the used space")
def drop_used_space(session, graph_spaces):
drop_space = graph_spaces.get("drop_space", False)
if not drop_space:
return
space_desc = graph_spaces.get("space_desc", None)
if space_desc is not None:
stmt = space_desc.drop_stmt()
response(session, stmt)
@then(parse("the execution plan should be:\n{plan}"))
def check_plan(plan, graph_spaces):
resp = graph_spaces["result_set"]
expect = table(plan)
column_names = expect.get('column_names', [])
idx = column_names.index('dependencies')
rows = expect.get("rows", [])
for i, row in enumerate(rows):
row[idx] = [
int(cell.strip()) for cell in row[idx].split(",") if len(cell) > 0
]
rows[i] = row
differ = PlanDiffer(resp.plan_desc(), expect)
assert differ.diff(), differ.err_msg()
| 34.927954 | 135 | 0.670792 | 0 | 0 | 0 | 0 | 7,779 | 0.641832 | 0 | 0 | 2,605 | 0.214934 |
2c8fefcc1cd7b12e6c4a468980052f0c5a0a2783 | 1,469 | py | Python | evaluation.py | rdedo099/HonoursProject2021 | 94c61218371587fd4dd9dacaa5e8f0ce7f44875d | [
"MIT"
] | null | null | null | evaluation.py | rdedo099/HonoursProject2021 | 94c61218371587fd4dd9dacaa5e8f0ce7f44875d | [
"MIT"
] | null | null | null | evaluation.py | rdedo099/HonoursProject2021 | 94c61218371587fd4dd9dacaa5e8f0ce7f44875d | [
"MIT"
] | null | null | null | from sklearn import metrics
from prettytable import PrettyTable
def evaluate_clustering(name, X, true_labels, pred_labels):
homogeneity = metrics.homogeneity_score(true_labels, pred_labels)
completeness = metrics.completeness_score(true_labels, pred_labels)
v_measure = metrics.v_measure_score(true_labels, pred_labels)
adj_rand_score = metrics.adjusted_rand_score(true_labels, pred_labels)
norm_mutual_score = metrics.normalized_mutual_info_score(true_labels, pred_labels)
fowlkes_m = metrics.fowlkes_mallows_score(true_labels, pred_labels)
#silhouette = metrics.silhouette_score(X, pred_labels, metric='euclidean')
return [name, "{:.2f}".format(homogeneity), "{:.2f}".format(completeness), "{:.2f}".format(v_measure), "{:.2f}".format(adj_rand_score), "{:.2f}".format(norm_mutual_score), "{:.2f}".format(fowlkes_m)]
def tabulate_results(results):
t = PrettyTable(['Name', 'Homogeneity', 'Completeness', 'V Measure', 'Adj Rand Score', 'Norm Mutual Score', 'Fowlkes Mallows'])
for result in results:
t.add_row(result)
print(t)
def tab_results(header, results):
t = PrettyTable(header)
for result in results:
t.add_row(result)
print(t)
def evaluate_vmeasure(true_labels, pred_labels):
return metrics.v_measure_score(true_labels, pred_labels)
def evaluate_fm(true_labels, pred_labels):
return metrics.fowlkes_mallows_score(true_labels, pred_labels) | 39.702703 | 203 | 0.733833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.1484 |
2c90e3fa15866252a877bac9622e4131af1c6c34 | 1,585 | py | Python | db_model.py | jose-marquez89/cryptovest | e0f04e3cbaa01f2de969ed17462052b23328fff8 | [
"MIT"
] | null | null | null | db_model.py | jose-marquez89/cryptovest | e0f04e3cbaa01f2de969ed17462052b23328fff8 | [
"MIT"
] | null | null | null | db_model.py | jose-marquez89/cryptovest | e0f04e3cbaa01f2de969ed17462052b23328fff8 | [
"MIT"
] | null | null | null | import os
import pymysql
from dotenv import load_dotenv
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import Column, BigInteger, String, Float, ForeignKey
def load_engine():
"""Create the database engine"""
load_dotenv()
DB_UNAME = os.environ["DB_UNAME"]
DB_PWORD = os.environ["DB_PWORD"]
DB_HOST = os.environ["DB_HOST"]
DB_NAME = os.environ["DB_NAME"]
engine = create_engine(f'mysql+pymysql://{DB_UNAME}:{DB_PWORD}@{DB_HOST}/{DB_NAME}')
return engine
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(BigInteger, primary_key=True, nullable=False)
name = Column(String(32), unique=True)
password = Column(String(32))
transaction = relationship("Ledger")
def __repr__(self):
return "<User(name={}, password={}>".format(self.name, self.password)
class Ledger(Base):
__tablename__ = 'ledger'
# TODO: add standardized columns based on coinbase csv
id = Column(BigInteger, primary_key=True, nullable=False)
source = Column(String(20))
asset = Column(String(20))
txn_type = Column(String(20))
amount = Column(Float)
price_at_txn = Column(Float)
user_id = Column(BigInteger, ForeignKey('user.id'), nullable=False)
def __repr__(self):
return "<Ledger(asset={}, amount={}, user_id={})>".format(self.asset, self.amount,self.user_id)
if __name__ == "__main__":
engine = load_engine()
Base.metadata.create_all(engine)
| 28.303571 | 103 | 0.694006 | 860 | 0.542587 | 0 | 0 | 0 | 0 | 0 | 0 | 297 | 0.187382 |
2c9245c4a0d3d6fcf05085c489277adf12ebc45d | 1,982 | py | Python | P2_studies/theta_plus/Analysis/Mapping/match_mcl_to_leiden.py | chackoge/ERNIE_Plus | 7e480c47a69fc2f736ac7fb55ece35dbff919938 | [
"MIT"
] | 6 | 2017-09-26T23:45:52.000Z | 2021-10-18T22:58:38.000Z | P2_studies/theta_plus/Analysis/Mapping/match_mcl_to_leiden.py | NETESOLUTIONS/ERNIE | 454518f28b39a6f37ad8dde4f3be15d4dccc6f61 | [
"MIT"
] | null | null | null | P2_studies/theta_plus/Analysis/Mapping/match_mcl_to_leiden.py | NETESOLUTIONS/ERNIE | 454518f28b39a6f37ad8dde4f3be15d4dccc6f61 | [
"MIT"
] | 9 | 2017-11-22T13:42:32.000Z | 2021-05-16T17:58:03.000Z | import pandas as pd
import mapping_module as mm
import multiprocessing as mp
from sqlalchemy import create_engine
from sys import argv
user_name = argv[1]
password = argv[2]
data_type = argv[3]
start_year = int(argv[4])
end_year = int(argv[5])
leiden_input = argv[6] #quality_func_Res --> CPM_R001
schema = argv[7]
rootdir = argv[8] # "/erniedev_data3/theta_plus/Leiden/"
sql_scheme = 'postgresql://' + user_name + ':' + password + '@localhost:5432/ernie'
engine = create_engine(sql_scheme)
data_name = data_type + str(start_year) + '_' + str(end_year)
# Read from Postgres
mcl_name = data_name + '_cluster_scp_list_unshuffled'
mcl = pd.read_sql_table(table_name= mcl_name, schema=schema, con=engine)
# # Read directly
# mcl_name = data_name + '_cluster_scp_list_unshuffled.csv'
# mcl = pd.read_csv(mcl_name)
leiden_name = data_name + '_cluster_scp_list_leiden_' + leiden_input + '.csv'
leiden = pd.read_csv(leiden_name)
mcl_grouped = mcl.groupby(by='cluster_no',
as_index=False).agg('count').sort_values(by='cluster_no', ascending=True)
# To match clusters between size 30 and 350 only:
mcl_grouped = mcl_grouped[(mcl_grouped['scp'] >= 30) & (mcl_grouped['scp'] <= 350)]
mcl_cluster_list = mcl_grouped['cluster_no'].tolist()
print("Running...")
p = mp.Pool(6)
final_df = pd.DataFrame()
for mcl_cluster_no in mcl_cluster_list:
match_dict = p.starmap(mm.match_mcl_to_leiden, [(mcl_cluster_no, mcl, leiden)])
match_df = pd.DataFrame.from_dict(match_dict)
final_df = final_df.append(match_df, ignore_index=True)
save_name = rootdir + '/' + data_name + '_match_to_leiden_' + leiden_input + '.csv'
final_df.to_csv(save_name, index = None, header=True, encoding='utf-8')
# In case the connection times out:
engine = create_engine(sql_scheme)
save_name_sql = data_name + '_match_to_leiden_' + leiden_input
final_df.to_sql(save_name_sql, con=engine, schema=schema, index=False, if_exists='fail')
print("")
print("All Completed.") | 33.59322 | 99 | 0.734107 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 527 | 0.265893 |
2c92b91a8b5d0fb491d1a97154a0a3c157ee8485 | 7,454 | py | Python | MeanMotionResonance/NbodySimulationUtilities.py | shadden/mean_motion_resonance | 53ab8d83cca22ad6aef8d57cd0aaa06d60b45699 | [
"MIT"
] | 3 | 2020-04-16T22:23:36.000Z | 2020-04-17T03:01:29.000Z | MeanMotionResonance/NbodySimulationUtilities.py | shadden/mean_motion_resonance | 53ab8d83cca22ad6aef8d57cd0aaa06d60b45699 | [
"MIT"
] | null | null | null | MeanMotionResonance/NbodySimulationUtilities.py | shadden/mean_motion_resonance | 53ab8d83cca22ad6aef8d57cd0aaa06d60b45699 | [
"MIT"
] | null | null | null | import numpy as np
import rebound as rb
import reboundx as rbx
def set_timestep(sim,dtfactor):
ps=sim.particles[1:]
tperi=np.min([p.P * (1-p.e)**1.5 / np.sqrt(1+p.e) for p in ps])
dt = tperi * dtfactor
sim.dt = dt
def set_min_distance(sim,rhillfactor):
ps=sim.particles[1:]
mstar = sim.particles[0].m
rhill = np.min([ p.rhill for p in ps if p.m > 0])
mindist = rhillfactor * rhill
sim.exit_min_distance = mindist
def get_simarchive_integration_results(sa):
"""
Read a simulation archive and store orbital elements
as arrays in a dictionary.
Arguments
---------
sa : rebound.SimulationArchive
The simulation archive to read. Can also be a reboundx simulation archive
Returns
-------
sim_results : dict
Dictionary containing time and orbital elements at each
snapshot of the simulation archive.
"""
if type(sa) == rb.simulationarchive.SimulationArchive:
return _get_rebound_simarchive_integration_results(sa)
elif type(sa) == rbx.simulationarchive.SimulationArchive:
return _get_reboundx_simarchive_integration_results(sa)
raise TypeError("{} is not a rebound or reboundx simulation archive!".format(sa))
def _get_rebound_simarchive_integration_results(sa):
N = len(sa)
sim0 = sa[0]
Npl= sim0.N_real - 1
shape = (Npl,N)
sim_results = {
'time':np.zeros(N),
'P':np.zeros(shape),
'e':np.zeros(shape),
'l':np.zeros(shape),
'inc':np.zeros(shape),
'pomega':np.zeros(shape),
'omega':np.zeros(shape),
'Omega':np.zeros(shape),
'a':np.zeros(shape),
'Energy':np.zeros(N)
}
for i,sim in enumerate(sa):
sim_results['time'][i] = sim.t
orbits= sim.calculate_orbits(jacobi_masses=True)
sim_results['Energy'][i] = sim.calculate_energy()
for j,orbit in enumerate(orbits):
sim_results['P'][j,i] = orbit.P
sim_results['e'][j,i] = orbit.e
sim_results['l'][j,i] = orbit.l
sim_results['pomega'][j,i] = orbit.pomega
sim_results['a'][j,i] = orbit.a
sim_results['omega'][j,i] = orbit.omega
sim_results['Omega'][j,i] = orbit.Omega
sim_results['inc'][j,i] = orbit.inc
return sim_results
def _get_reboundx_simarchive_integration_results(sa):
N = len(sa)
sim0,_ = sa[0]
Npl= sim0.N_real - 1
shape = (Npl,N)
sim_results = {
'time':np.zeros(N),
'P':np.zeros(shape),
'e':np.zeros(shape),
'l':np.zeros(shape),
'inc':np.zeros(shape),
'pomega':np.zeros(shape),
'omega':np.zeros(shape),
'Omega':np.zeros(shape),
'a':np.zeros(shape),
'Energy':np.zeros(N)
}
for i,sim_extra in enumerate(sa):
sim,extra = sim_extra
sim_results['time'][i] = sim.t
orbits= sim.calculate_orbits(jacobi_masses=True)
sim_results['Energy'][i] = sim.calculate_energy()
for j,orbit in enumerate(orbits):
sim_results['P'][j,i] = orbit.P
sim_results['e'][j,i] = orbit.e
sim_results['l'][j,i] = orbit.l
sim_results['pomega'][j,i] = orbit.pomega
sim_results['a'][j,i] = orbit.a
sim_results['omega'][j,i] = orbit.omega
sim_results['Omega'][j,i] = orbit.Omega
sim_results['inc'][j,i] = orbit.inc
return sim_results
def get_canonical_heliocentric_orbits(sim):
"""
Compute orbital elements in canconical
heliocentric coordinates.
Arguments:
----------
sim : rb.Simulation
simulation for which to compute orbits
Returns
-------
list of rebound.Orbits
Orbits of particles in canonical heliocentric
coordinates.
"""
star = sim.particles[0]
orbits = []
fictitious_star = rb.Particle(m=star.m)
for planet in sim.particles[1:]:
# Heliocentric position
r = np.array(planet.xyz) - np.array(star.xyz)
# Barycentric momentum
rtilde = planet.m * np.array(planet.vxyz)
# Mapping from (coordinate,momentum) pair to
# orbital elments requires that the 'velocity'
# i defined as the canonical momentum divided
# by 'mu' appearing in the definition of the
# Delauney variables( see, e.g., pg. 34 of
# Morbidelli 2002).
#
# For Laskar & Robutel (1995)'s definition
# of canonical action-angle pairs, this is
# the reduced mass.
#
# For democratic heliocentric elements,
# 'mu' is simply the planet mass.
mu = planet.m * star.m / (planet.m + star.m)
v_for_orbit = rtilde / mu
fictitious_particle = rb.Particle(
m=planet.m,
x = r[0],
y = r[1],
z = r[2],
vx = v_for_orbit[0],
vy = v_for_orbit[1],
vz = v_for_orbit[2]
)
orbit = fictitious_particle.calculate_orbit(primary=fictitious_star,G = sim.G)
orbits.append(orbit)
return orbits
def add_canonical_heliocentric_elements_particle(m,elements,sim):
"""
Add a new particle to a rebound simulation
by specifying its mass and canonical heliocentric
orbital elements.
Arguments
---------
m : float
Mass of particle to add.
elements : dict
Dictionary of orbital elements for particle.
Dictionary must contain keys:
a,e,inc,lmbda,omega,Omega
sim : rebound.Simulation
Simulation to add particle to.
"""
_sim = sim.copy()
star = _sim.particles[0]
_sim.add(
primary=star,
m=m,
a = elements['a'],
e = elements['e'],
inc = elements['inc'],
l = elements['lmbda'],
omega = elements['omega'],
Omega = elements['Omega']
)
_p = _sim.particles[-1]
p = _p.copy()
f = star.m / (p.m + star.m)
p.vx = f * ( _p.vx - star.vx )
p.vy = f * ( _p.vy - star.vy )
p.vz = f * ( _p.vz - star.vz )
sim.add(p)
def _compute_transformation_angles(sim):
Gtot_vec = sim.calculate_angular_momentum()
Gtot_vec = np.array(Gtot_vec)
Gtot = np.sqrt(Gtot_vec @ Gtot_vec)
Ghat = Gtot_vec / Gtot
Ghat_z = Ghat[-1]
Ghat_perp = np.sqrt(1 - Ghat_z**2)
theta1 = np.pi/2 - np.arctan2(Ghat[1],Ghat[0])
theta2 = np.pi/2 - np.arctan2(Ghat_z,Ghat_perp)
return theta1,theta2
def npEulerAnglesTransform(xyz,Omega,I,omega):
x,y,z = xyz
s1,c1 = np.sin(omega),np.cos(omega)
x1 = c1 * x - s1 * y
y1 = s1 * x + c1 * y
z1 = z
s2,c2 = np.sin(I),np.cos(I)
x2 = x1
y2 = c2 * y1 - s2 * z1
z2 = s2 * y1 + c2 * z1
s3,c3 = np.sin(Omega),np.cos(Omega)
x3 = c3 * x2 - s3 * y2
y3 = s3 * x2 + c3 * y2
z3 = z2
return np.array([x3,y3,z3])
def align_simulation(sim):
"""
Change particle positions and velocities
of a rebound simulations so that the z-axis
corresponds with the direction of the angular
momentum.
Arguments
---------
sim : rebound.Simulation
"""
theta1,theta2 = _compute_transformation_angles(sim)
for p in sim.particles:
p.x,p.y,p.z = npEulerAnglesTransform(p.xyz,0,theta2,theta1)
p.vx,p.vy,p.vz = npEulerAnglesTransform(p.vxyz,0,theta2,theta1)
| 30.801653 | 86 | 0.581433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,132 | 0.286021 |
2c946430ac7cc2f5fb807e83c2c1fb3114304f71 | 3,432 | py | Python | spyder/plugins/editor/panels/indentationguides.py | suokunlong/spyder | 2d5d450fdcef232fb7f38e7fefc27f0e7f704c9a | [
"MIT"
] | 3 | 2019-09-27T21:00:00.000Z | 2021-03-07T23:28:32.000Z | spyder/plugins/editor/panels/indentationguides.py | jastema/spyder | 0ef48ea227c53f57556cd8002087dc404b0108b0 | [
"MIT"
] | 3 | 2021-10-06T22:49:31.000Z | 2022-02-27T12:28:12.000Z | spyder/plugins/editor/panels/indentationguides.py | jastema/spyder | 0ef48ea227c53f57556cd8002087dc404b0108b0 | [
"MIT"
] | 2 | 2021-04-30T01:18:22.000Z | 2021-09-19T06:31:42.000Z | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
This module contains the indentation guide panel.
"""
# Third party imports
from qtpy.QtCore import Qt
from qtpy.QtGui import QPainter, QColor
from intervaltree import IntervalTree
# Local imports
from spyder.plugins.editor.utils.editor import TextBlockHelper
from spyder.api.panel import Panel
class IndentationGuide(Panel):
"""Indentation guides to easy identify nested blocks."""
# --- Qt Overrides
# -----------------------------------------------------------------
def __init__(self, editor):
"""Initialize IndentationGuide panel.
i_width(int): identation width in characters.
"""
Panel.__init__(self, editor)
self.color = Qt.darkGray
self.i_width = 4
self.bar_offset = 0
horizontal_scrollbar = editor.horizontalScrollBar()
horizontal_scrollbar.valueChanged.connect(self.update_bar_position)
horizontal_scrollbar.sliderReleased.connect(self.update)
def update_bar_position(self, value):
self.bar_offset = value
def paintEvent(self, event):
"""Override Qt method."""
painter = QPainter(self)
color = QColor(self.color)
color.setAlphaF(.5)
painter.setPen(color)
offset = self.editor.document().documentMargin() + \
self.editor.contentOffset().x()
folding_panel = self.editor.panels.get('FoldingPanel')
folding_regions = folding_panel.folding_regions
folding_status = folding_panel.folding_status
leading_whitespaces = self.editor.leading_whitespaces
for line_number in folding_regions:
post_update = False
end_line = folding_regions[line_number]
start_block = self.editor.document().findBlockByNumber(
line_number)
end_block = self.editor.document().findBlockByNumber(end_line - 1)
top = int(self.editor.blockBoundingGeometry(
start_block).translated(self.editor.contentOffset()).top())
bottom = int(self.editor.blockBoundingGeometry(
end_block).translated(self.editor.contentOffset()).bottom())
total_whitespace = leading_whitespaces.get(max(line_number - 1, 0))
end_whitespace = leading_whitespaces.get(end_line - 1)
if end_whitespace and end_whitespace != total_whitespace:
x = (self.editor.fontMetrics().width(total_whitespace * '9') +
self.bar_offset + offset)
painter.drawLine(x, top, x, bottom)
# --- Other methods
# -----------------------------------------------------------------
def set_enabled(self, state):
"""Toggle edge line visibility."""
self._enabled = state
self.setVisible(state)
# We need to request folding when toggling state so the lines
# are computed when handling the folding response.
self.editor.request_folding()
def update_color(self):
"""Set color using syntax highlighter color for comments."""
self.color = self.editor.highlighter.get_color_name('comment')
def set_indentation_width(self, indentation_width):
"""Set indentation width to be used to draw indent guides."""
self.i_width = indentation_width
| 38.561798 | 79 | 0.638695 | 2,975 | 0.866589 | 0 | 0 | 0 | 0 | 0 | 0 | 889 | 0.258957 |
2c962f2954bff9d6229947f6de4e0917ddfe1361 | 576 | py | Python | lib/DuplicatePairDetector.py | hapsby/deepAPIRevisited | 826c0893dd828380d13e58ac9739a49525e7f001 | [
"MIT"
] | null | null | null | lib/DuplicatePairDetector.py | hapsby/deepAPIRevisited | 826c0893dd828380d13e58ac9739a49525e7f001 | [
"MIT"
] | null | null | null | lib/DuplicatePairDetector.py | hapsby/deepAPIRevisited | 826c0893dd828380d13e58ac9739a49525e7f001 | [
"MIT"
] | null | null | null | import hashlib
class DuplicatePairDetector:
def __init__(self):
self.hashes = set()
def add_if_new(self, description, api_calls):
hash_binary = self.get_hash_binary(description, api_calls)
if hash_binary in self.hashes:
return False
self.hashes.add(hash_binary)
return True
def get_hash_binary(self, description, api_calls):
hasher = hashlib.md5(description.encode('utf-8'))
for api_call in api_calls:
hasher.update(api_call.encode('utf-8'))
return hasher.digest()[0:5]
| 26.181818 | 66 | 0.651042 | 557 | 0.967014 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.024306 |
2c964b266d17c7b782f7971364713668a723fd0e | 328 | py | Python | beginner/sum-of-all-numbers_ChingLingYeung.py | garvitsharma05/hacktoberithms | 25aea28f362de22414569d67436a670bea5a3aeb | [
"MIT"
] | 16 | 2018-10-05T07:35:06.000Z | 2021-10-02T12:12:52.000Z | beginner/sum-of-all-numbers_ChingLingYeung.py | garvitsharma05/hacktoberithms | 25aea28f362de22414569d67436a670bea5a3aeb | [
"MIT"
] | 50 | 2018-10-04T00:04:24.000Z | 2019-10-25T16:29:58.000Z | beginner/sum-of-all-numbers_ChingLingYeung.py | garvitsharma05/hacktoberithms | 25aea28f362de22414569d67436a670bea5a3aeb | [
"MIT"
] | 115 | 2018-10-04T02:42:18.000Z | 2021-01-27T17:34:21.000Z |
def sum_all(ls):
sum = 0
if(len(ls) != 2):
print("Invalid input")
else:
ls.sort()
start = ls[0]
end = ls[1]
if(start == end):
sum = 2 * start
else:
for i in range(start, end+1):
sum += i
return sum
| 15.619048 | 41 | 0.368902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.045732 |
2c96a2b2cfda5dc84f1a1edca8acac30d65af637 | 1,713 | py | Python | examples/regression/survival_prediction/extract_patch_feature.py | Starrah/THU-SuperMoon | 1e6b8ccc207f789fb8426806251cc3d4e1cca35a | [
"MIT"
] | 64 | 2020-06-06T03:30:37.000Z | 2022-03-22T06:57:00.000Z | examples/regression/survival_prediction/extract_patch_feature.py | iMoonLab/HyperG_package | 9394cb5ca31d28df715039ba6e8b4dd852192f71 | [
"MIT"
] | 2 | 2021-01-19T22:01:33.000Z | 2022-03-08T05:37:22.000Z | examples/regression/survival_prediction/extract_patch_feature.py | iMoonLab/HyperG_package | 9394cb5ca31d28df715039ba6e8b4dd852192f71 | [
"MIT"
] | 12 | 2020-06-18T06:35:57.000Z | 2022-03-28T09:23:08.000Z | import openslide
import torch
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from SuperMoon.models import ResNetFeature
def extract_ft(slide_dir: str, patch_coors, depth=34, batch_size=16):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
slide = openslide.open_slide(slide_dir)
model_ft = ResNetFeature(depth=depth, pooling=True, pretrained=True)
model_ft = model_ft.to(device)
model_ft.eval()
dataset = Patches(slide, patch_coors)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=4)
fts = []
with tqdm(total=len(dataset)) as pbar:
for _patches in dataloader:
_patches = _patches.to(device)
with torch.no_grad():
_fts = model_ft(_patches)
fts.append(_fts)
pbar.update(_patches.size(0))
fts = torch.cat(fts, dim=0)
assert fts.size(0) == len(patch_coors)
return fts
class Patches(Dataset):
def __init__(self, slide: openslide, patch_coors) -> None:
super().__init__()
self.slide = slide
self.patch_coors = patch_coors
self.transform = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
def __getitem__(self, idx: int):
coor = self.patch_coors[idx]
img = self.slide.read_region((coor[0], coor[1]), 0, (coor[2], coor[3])).convert('RGB')
return self.transform(img)
def __len__(self) -> int:
return len(self.patch_coors)
| 31.145455 | 94 | 0.635727 | 702 | 0.409807 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.010508 |