hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ac28cc037b37abb77ef41622f5d43a997fc33033
| 690
|
bzl
|
Python
|
runsc/test/build_defs.bzl
|
dna2fork/gvisor
|
eefa817cfdb04ff07e7069396f21bd6ba2c89957
|
[
"Apache-2.0"
] | 2
|
2020-07-24T04:06:44.000Z
|
2021-06-28T00:49:20.000Z
|
runsc/test/build_defs.bzl
|
dna2fork/gvisor
|
eefa817cfdb04ff07e7069396f21bd6ba2c89957
|
[
"Apache-2.0"
] | null | null | null |
runsc/test/build_defs.bzl
|
dna2fork/gvisor
|
eefa817cfdb04ff07e7069396f21bd6ba2c89957
|
[
"Apache-2.0"
] | 1
|
2020-10-07T12:33:19.000Z
|
2020-10-07T12:33:19.000Z
|
"""Defines a rule for runsc test targets."""
load("@io_bazel_rules_go//go:def.bzl", _go_test = "go_test")
# runtime_test is a macro that will create targets to run the given test target
# with different runtime options.
def runtime_test(**kwargs):
"""Runs the given test target with different runtime options."""
name = kwargs["name"]
_go_test(**kwargs)
kwargs["name"] = name + "_hostnet"
kwargs["args"] = ["--runtime-type=hostnet"]
_go_test(**kwargs)
kwargs["name"] = name + "_kvm"
kwargs["args"] = ["--runtime-type=kvm"]
_go_test(**kwargs)
kwargs["name"] = name + "_overlay"
kwargs["args"] = ["--runtime-type=overlay"]
_go_test(**kwargs)
| 34.5
| 79
| 0.649275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 397
| 0.575362
|
ac2bbb2fda911605c3e823751d84b99cb9f30d2f
| 918
|
py
|
Python
|
python/akg/ms/utils.py
|
tianjiashuo/akg
|
a9cbf642063fb1086a93e8bc6be6feb145689817
|
[
"Apache-2.0"
] | 286
|
2020-06-23T06:40:44.000Z
|
2022-03-30T01:27:49.000Z
|
python/akg/ms/utils.py
|
tianjiashuo/akg
|
a9cbf642063fb1086a93e8bc6be6feb145689817
|
[
"Apache-2.0"
] | 10
|
2020-07-31T03:26:59.000Z
|
2021-12-27T15:00:54.000Z
|
python/akg/ms/utils.py
|
tianjiashuo/akg
|
a9cbf642063fb1086a93e8bc6be6feb145689817
|
[
"Apache-2.0"
] | 30
|
2020-07-17T01:04:14.000Z
|
2021-12-27T14:05:19.000Z
|
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""utils"""
# input format begin
DEFAULT = "DefaultFormat"
NCHW = "NCHW"
NHWC = "NHWC"
HWCN = "HWCN"
NC1HWC0 = "NC1HWC0"
FRAC_Z = "FracZ"
# input format end
# fusion type begin
ELEMWISE = "ELEMWISE"
CONVLUTION = "CONVLUTION"
COMMREDUCE = "COMMREDUCE"
SEGMENT = "SEGMENT"
OPAQUE = "OPAQUE"
# fusion type end
BINDS = "binds"
| 27.818182
| 74
| 0.740741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 772
| 0.840959
|
ac2c508dc7ed127c37e761fe6253d817c522f603
| 5,254
|
py
|
Python
|
foursight_core/sqs_utils.py
|
4dn-dcic/foursight-core
|
2e5ea594d38d04ad58f63ee42e5fb4b920bfb63c
|
[
"MIT"
] | null | null | null |
foursight_core/sqs_utils.py
|
4dn-dcic/foursight-core
|
2e5ea594d38d04ad58f63ee42e5fb4b920bfb63c
|
[
"MIT"
] | 3
|
2021-08-11T07:09:24.000Z
|
2022-02-16T18:58:45.000Z
|
foursight_core/sqs_utils.py
|
4dn-dcic/foursight-core
|
2e5ea594d38d04ad58f63ee42e5fb4b920bfb63c
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import boto3
import json
from .stage import Stage
class SQS(object):
"""
class SQS is a collection of utils related to Foursight queues
"""
def __init__(self, foursight_prefix):
self.stage = Stage(foursight_prefix)
def invoke_check_runner(self, runner_input):
"""
Simple function to invoke the next check_runner lambda with runner_input
(dict containing {'sqs_url': <str>})
"""
client = boto3.client('lambda')
# InvocationType='Event' makes asynchronous
# try/except while async invokes are problematic
try:
response = client.invoke(
FunctionName=self.stage.get_runner_name(),
InvocationType='Event',
Payload=json.dumps(runner_input)
)
except:
response = client.invoke(
FunctionName=self.stage.get_runner_name(),
Payload=json.dumps(runner_input)
)
return response
def delete_message_and_propogate(self, runner_input, receipt, propogate=True):
"""
Delete the message with given receipt from sqs queue and invoke the next
lambda runner.
Args:
runner_input (dict): runner info, should minimally have 'sqs_url'
receipt (str): SQS message receipt
propogate (bool): if True (default), invoke another check runner lambda
Returns:
None
"""
sqs_url = runner_input.get('sqs_url')
if not sqs_url or not receipt:
return
client = boto3.client('sqs')
client.delete_message(
QueueUrl=sqs_url,
ReceiptHandle=receipt
)
if propogate is True:
self.invoke_check_runner(runner_input)
def recover_message_and_propogate(self, runner_input, receipt, propogate=True):
"""
Recover the message with given receipt to sqs queue and invoke the next
lambda runner.
Changing message VisibilityTimeout to 15 seconds means the message will be
available to the queue in that much time. This is a slight lag to allow
dependencies to process.
NOTE: VisibilityTimeout should be less than WaitTimeSeconds in run_check_runner
Args:
runner_input (dict): runner info, should minimally have 'sqs_url'
receipt (str): SQS message receipt
propogate (bool): if True (default), invoke another check runner lambda
Returns:
None
"""
sqs_url = runner_input.get('sqs_url')
if not sqs_url or not receipt:
return
client = boto3.client('sqs')
client.change_message_visibility(
QueueUrl=sqs_url,
ReceiptHandle=receipt,
VisibilityTimeout=15
)
if propogate is True:
self.invoke_check_runner(runner_input)
def get_sqs_queue(self):
"""
Returns boto3 sqs resource
"""
queue_name = self.stage.get_queue_name()
sqs = boto3.resource('sqs')
try:
queue = sqs.get_queue_by_name(QueueName=queue_name)
except:
queue = sqs.create_queue(
QueueName=queue_name,
Attributes={
'VisibilityTimeout': '900',
'MessageRetentionPeriod': '3600'
}
)
return queue
@classmethod
def send_sqs_messages(cls, queue, environ, check_vals, uuid=None):
"""
Send messages to SQS queue. Check_vals are entries within a check_group.
Optionally, provide a uuid that will be queued as the uuid for the run; if
not provided, datetime.utcnow is used
Args:
queue: boto3 sqs resource (from get_sqs_queue)
environ (str): foursight environment name
check_vals (list): list of formatted check vals, like those from
check_utils.CheckHandler().get_check_schedule
uuid (str): optional string uuid
Returns:
str: uuid of queued messages
"""
# uuid used as the MessageGroupId
if not uuid:
uuid = datetime.utcnow().isoformat()
# append environ and uuid as first elements to all check_vals
proc_vals = [[environ, uuid] + val for val in check_vals]
for val in proc_vals:
response = queue.send_message(MessageBody=json.dumps(val))
return uuid
@classmethod
def get_sqs_attributes(cls, sqs_url):
"""
Returns a dict of the desired attributes form the queue with given url
"""
backup = {
'ApproximateNumberOfMessages': 'ERROR',
'ApproximateNumberOfMessagesNotVisible': 'ERROR'
}
client = boto3.client('sqs')
try:
result = client.get_queue_attributes(
QueueUrl=sqs_url,
AttributeNames=[
'ApproximateNumberOfMessages',
'ApproximateNumberOfMessagesNotVisible'
]
)
except:
return backup
return result.get('Attributes', backup)
| 33.896774
| 87
| 0.592882
| 5,171
| 0.984203
| 0
| 0
| 1,756
| 0.334222
| 0
| 0
| 2,453
| 0.466882
|
ac2c93e2be1b6adb27d68e1973a9207d9ea0da74
| 1,476
|
py
|
Python
|
setup.py
|
sabraha2/shadho
|
c17109a4526961113933d1189f0ca98eb8119ac1
|
[
"MIT"
] | null | null | null |
setup.py
|
sabraha2/shadho
|
c17109a4526961113933d1189f0ca98eb8119ac1
|
[
"MIT"
] | null | null | null |
setup.py
|
sabraha2/shadho
|
c17109a4526961113933d1189f0ca98eb8119ac1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
from setuptools import setup
from setuptools.command.install import install
LONG_DESCRIPTION = ""
with open(os.path.join(os.path.dirname(__file__), 'README.md'), 'r') as f:
LONG_DESCRIPTION = f.read()
setup(
name='shadho',
version='0.4.3.post2',
description='Hyperparameter optimizer with distributed hardware at heart',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/jeffkinnison/shadho',
author='Jeff Kinnison',
author_email='jkinniso@nd.edu',
python_requires='>=3.5',
packages=['shadho',
'shadho.installers',
'shadho.managers',
'shadho.workers',],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: POSIX',
'Operating System :: Unix',
],
keywords='machine_learning hyperparameters distributed_computing',
install_requires=[
'numpy',
'scipy',
'scikit-learn',
'pyrameter'
],
tests_require=['pytest'],
include_package_data=True,
)
| 31.404255
| 78
| 0.630759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 754
| 0.51084
|
ac2e50e2c0ed0f0cb69c0d20141ee9476a2e3b0c
| 20,837
|
py
|
Python
|
foyer/tests/test_gmso_forcefield.py
|
jennyfothergill/foyer
|
60a738da43fbb18c7207821662688361a1e6017d
|
[
"MIT"
] | 62
|
2017-02-14T19:38:31.000Z
|
2022-02-25T16:09:54.000Z
|
foyer/tests/test_gmso_forcefield.py
|
jennyfothergill/foyer
|
60a738da43fbb18c7207821662688361a1e6017d
|
[
"MIT"
] | 384
|
2017-02-10T05:56:41.000Z
|
2022-03-30T21:47:22.000Z
|
foyer/tests/test_gmso_forcefield.py
|
jennyfothergill/foyer
|
60a738da43fbb18c7207821662688361a1e6017d
|
[
"MIT"
] | 65
|
2017-02-24T16:43:20.000Z
|
2022-01-06T21:01:28.000Z
|
import difflib
import glob
import os
import gmso
import mbuild as mb
import pytest
from pkg_resources import resource_filename
from foyer.exceptions import FoyerError
from foyer.general_forcefield import Forcefield
from foyer.tests.base_test import BaseTest
from foyer.tests.utils import get_fn, register_mock_request
FF_DIR = resource_filename("foyer", "forcefields")
FORCEFIELDS = glob.glob(os.path.join(FF_DIR, "xml/*.xml"))
RESPONSE_BIB_ETHANE_JA962170 = """@article{Jorgensen_1996,
doi = {10.1021/ja9621760},
url = {https://doi.org/10.1021%2Fja9621760},
year = 1996,
month = {jan},
publisher = {American Chemical Society ({ACS})},
volume = {118},
number = {45},
pages = {11225--11236},
author = {William L. Jorgensen and David S. Maxwell and Julian Tirado-Rives},
title = {Development and Testing of the {OPLS} All-Atom Force Field on Conformational Energetics and Properties of Organic Liquids},
journal = {Journal of the American Chemical Society}
}"""
RESPONSE_BIB_ETHANE_JP0484579 = """@article{Jorgensen_2004,
doi = {10.1021/jp0484579},
url = {https://doi.org/10.1021%2Fjp0484579},
year = 2004,
month = {oct},
publisher = {American Chemical Society ({ACS})},
volume = {108},
number = {41},
pages = {16264--16270},
author = {William L. Jorgensen and Jakob P. Ulmschneider and Julian Tirado-Rives},
title = {Free Energies of Hydration from a Generalized Born Model and an All-Atom Force Field},
journal = {The Journal of Physical Chemistry B}
}"""
class TestGeneralForcefield(BaseTest):
@pytest.fixture(scope="session")
def oplsaa(self):
return Forcefield(name="oplsaa", strict=False)
@pytest.mark.parametrize("ff_file", FORCEFIELDS)
def test_load_files(self, ff_file):
ff1 = Forcefield(forcefield_files=ff_file, strict=False)
assert len(ff1.ff.atom_types) > 0
ff2 = Forcefield(forcefield_files=ff_file, strict=False)
assert len(ff1.ff.atom_types) == len(ff2.ff.atom_types)
""" Relies on https://github.com/mosdef-hub/gmso/pull/526
def test_duplicate_type_definitions():
with pytest.raises(ValueError):
ff4 = Forcefield(name='oplsaa', forcefield_files=FORCEFIELDS, strict=False)
"""
def test_missing_type_definitions(self):
with pytest.raises(FoyerError):
FF = Forcefield()
ethane = mb.load(get_fn("ethane.mol2"), backend="parmed")
FF.apply(ethane, assert_improper_params=False)
def test_unsupported_backend(self):
with pytest.raises(FoyerError, match=r"Backend not supported"):
FF = Forcefield(name="oplsaa", backend="void")
def test_from_gmso(self, oplsaa):
mol2 = mb.load(get_fn("ethane.mol2"), backend="parmed")
top = gmso.external.from_mbuild(mol2)
ethane = oplsaa.apply(top, assert_improper_params=False)
assert (
sum((1 for at in ethane.sites if at.atom_type.name == "opls_135"))
== 2
)
assert (
sum((1 for at in ethane.sites if at.atom_type.name == "opls_140"))
== 6
)
assert len(ethane.bonds) == 7
assert all(x.bond_type for x in ethane.bonds)
assert len(ethane.angles) == 12
assert all(x.angle_type for x in ethane.angles)
assert len(ethane.dihedrals) == 9
assert all(x.dihedral_type for x in ethane.dihedrals)
"""
Skip test for box information until mbuild box overhaul PR is completed
mol2 = mb.load(get_fn('ethane.mol2'), backend='parmed')
mol2.box_vectors = [[2, 0, 0], [0, 2, 0], [0, 0, 2]]
oplsaa = Forcefield(name='oplsaa', strict=False)
ethane = oplsaa.apply(mol2, assert_improper_params=False)
assert ethane.box_vectors == mol2.box_vectors
"""
def test_from_mbuild(self, oplsaa):
mol2 = mb.load(get_fn("ethane.mol2"), backend="parmed")
ethane = oplsaa.apply(mol2, assert_improper_params=False)
assert (
sum((1 for at in ethane.sites if at.atom_type.name == "opls_135"))
== 2
)
assert (
sum((1 for at in ethane.sites if at.atom_type.name == "opls_140"))
== 6
)
assert len(ethane.bonds) == 7
assert all(x.bond_type for x in ethane.bonds)
assert len(ethane.angles) == 12
assert all(x.angle_type for x in ethane.angles)
assert len(ethane.dihedrals) == 9
assert all(x.dihedral_type for x in ethane.dihedrals)
@pytest.mark.parametrize("mixing_rule", ["lorentz", "geometric"])
def test_comb_rule(self, mixing_rule, oplsaa):
mol2 = mb.load(get_fn("ethane.mol2"))
ethane = oplsaa.apply(
mol2, combining_rule=mixing_rule, assert_improper_params=False
)
assert ethane.combining_rule == mixing_rule
def test_write_refs(self, requests_mock, oplsaa):
register_mock_request(
mocker=requests_mock,
url="http://api.crossref.org/",
path="works/10.1021/ja9621760/transform/application/x-bibtex",
headers={"accept": "application/x-bibtex"},
text=RESPONSE_BIB_ETHANE_JA962170,
)
mol2 = mb.load(get_fn("ethane.mol2"), backend="parmed")
ethane = oplsaa.apply(
mol2, references_file="ethane.bib", assert_improper_params=False
)
assert os.path.isfile("ethane.bib")
with open(get_fn("ethane.bib")) as file1:
with open("ethane.bib") as file2:
diff = list(
difflib.unified_diff(
file1.readlines(), file2.readlines(), n=0
)
)
assert not diff
def test_write_refs_multiple(self, requests_mock):
register_mock_request(
mocker=requests_mock,
url="http://api.crossref.org/",
path="works/10.1021/ja9621760/transform/application/x-bibtex",
headers={"accept": "application/x-bibtex"},
text=RESPONSE_BIB_ETHANE_JA962170,
)
register_mock_request(
mocker=requests_mock,
url="http://api.crossref.org/",
path="works/10.1021/jp0484579/transform/application/x-bibtex",
headers={"accept": "application/x-bibtex"},
text=RESPONSE_BIB_ETHANE_JP0484579,
)
mol2 = mb.load(get_fn("ethane.mol2"))
oplsaa = Forcefield(
forcefield_files=get_fn("refs-multi.xml"), strict=False
)
ethane = oplsaa.apply(
mol2,
references_file="ethane-multi.bib",
assert_improper_params=False,
)
assert os.path.isfile("ethane-multi.bib")
with open(get_fn("ethane-multi.bib")) as file1:
with open("ethane-multi.bib") as file2:
diff = list(
difflib.unified_diff(
file1.readlines(), file2.readlines(), n=0
)
)
assert not diff
def test_write_bad_ref(self, requests_mock):
register_mock_request(
mocker=requests_mock,
url="http://api.crossref.org/",
path="works/10.1021/garbage_bad_44444444jjjj/transform/application/x-bibtex",
headers={"accept": "application/x-bibtex"},
status_code=404,
)
mol2 = mb.load(get_fn("ethane.mol2"), backend="parmed")
oplsaa = Forcefield(
forcefield_files=get_fn("refs-bad.xml"), strict=False
)
with pytest.warns(UserWarning):
ethane = oplsaa.apply(
mol2, references_file="ethane.bib", assert_improper_params=False
)
"""
These XML files missed the whole nonbonded force section
def test_from_mbuild_customtype():
mol2 = mb.load(get_fn('ethane_customtype.pdb'))
customtype_ff = Forcefield(forcefield_files=get_fn('validate_customtypes.xml'), strict=False)
ethane = customtype_ff.apply(mol2, assert_improper_params=False)
assert sum((1 for at in ethane.sites if at.atom_type.name == 'C3')) == 2
assert sum((1 for at in ethane.sites if at.atom_type.name == 'Hb')) == 6
assert len(ethane.bonds) == 7
assert all(x.bond_type for x in ethane.bonds)
assert len(ethane.angles) == 12
assert all(x.angle_type for x in ethane.angles)
assert len(ethane.dihedrals) == 9
assert all(x.dihedral_type for x in ethane.dihedrals)
def test_improper_dihedral():
untyped_benzene = mb.load(get_fn('benzene.mol2'), backend='parmed')
ff_improper = Forcefield(forcefield_files=get_fn('improper_dihedral.xml'), strict=False)
benzene = ff_improper.apply(untyped_benzene, assert_dihedral_params=False, assert_improper_params=False)
assert len(benzene.dihedrals) == 18
assert len([dih for dih in benzene.dihedrals if dih.improper]) == 6
assert len([dih for dih in benzene.dihedrals if not dih.improper]) == 12
"""
def test_urey_bradley(self):
system = mb.Compound()
first = mb.Particle(name="_CTL2", pos=[-1, 0, 0])
second = mb.Particle(name="_CL", pos=[0, 0, 0])
third = mb.Particle(name="_OBL", pos=[1, 0, 0])
fourth = mb.Particle(name="_OHL", pos=[0, 1, 0])
system.add([first, second, third, fourth])
system.add_bond((first, second))
system.add_bond((second, third))
system.add_bond((second, fourth))
ff = Forcefield(
forcefield_files=[get_fn("charmm36_cooh.xml")], strict=False
)
struc = ff.apply(
system,
assert_angle_params=False,
assert_dihedral_params=False,
assert_improper_params=False,
)
assert len(struc.angles) == 3
assert len(struc.angle_types) == 3 # 1 harmonic, 2 Urey Bradley
def test_charmm_improper(self):
system = mb.Compound()
first = mb.Particle(name="_CTL2", pos=[-1, 0, 0])
second = mb.Particle(name="_CL", pos=[0, 0, 0])
third = mb.Particle(name="_OBL", pos=[1, 0, 0])
fourth = mb.Particle(name="_OHL", pos=[0, 1, 0])
system.add([first, second, third, fourth])
system.add_bond((first, second))
system.add_bond((second, third))
system.add_bond((second, fourth))
ff = Forcefield(
forcefield_files=[get_fn("charmm36_cooh.xml")], strict=False
)
struc = ff.apply(
system,
assert_angle_params=False,
assert_dihedral_params=False,
assert_improper_params=False,
)
assert len(struc.impropers) == 1
assert len(struc.dihedrals) == 0
''' To be implemented -> Lookup connection types with mixed atomtype-atomclass
def test_topology_precedence():
"""Test to see if topology precedence is properly adhered to.
This test uses a force field file where bond, angle, and dihedral
parameters are present with different counts of `type` definitions.
It checks that:
1. The parameters with the higher number of `type` definitions
are assigned (because they are given the highest precedence)
2. That if multiple definitions exist with the same number of
`type` definitions, that the convention from OpenMM is followed
whereby the definitions that occurs earliest in the XML is
assigned.
"""
ethane = mb.load(get_fn('ethane.mol2'), backend='parmed')
ff = Forcefield(forcefield_files=get_fn('ethane-topo-precedence.xml'), strict=False)
typed_ethane = ff.apply(ethane, assert_improper_params=False)
# Need to work on the units of these test
assert len([bond for bond in typed_ethane.bonds
if round(float(bond.bond_type.parameters['r_eq'].value), 3) == 0.115]) == 6
assert len([bond for bond in typed_ethane.bonds
if round(float(bond.bond_type.parameters['r_eq'].value), 2) == 0.16]) == 1
assert len([angle for angle in typed_ethane.angles
if round(float(angle.angle_type.parameters['theta_eq'].value), 3) == 120.321]) == 6
assert len([angle for angle in typed_ethane.angles
if round(float(angle.angle_type.parameters['theta_eq'].value), 3) == 97.403]) == 6
assert len([rb for rb in typed_ethane.dihedral
if round(float(rb.dihedral_type.parameters['c0'].value), 3) == 0.287]) == 9
'''
@pytest.mark.parametrize(
"ff_filename,kwargs",
[
("ethane-angle-typo.xml", {"assert_angle_params": False}),
("ethane-dihedral-typo.xml", {"assert_dihedral_params": False}),
],
)
def test_missing_topo_params(self, ff_filename, kwargs):
"""Test that the user is notified if not all topology parameters are found."""
ethane = mb.load(get_fn("ethane.mol2"))
oplsaa_with_typo = Forcefield(
forcefield_files=get_fn(ff_filename), strict=False
)
with pytest.raises(Exception):
ethane = oplsaa_with_typo.apply(
ethane, assert_improper_params=False
)
with pytest.warns(UserWarning):
ethane = oplsaa_with_typo.apply(
ethane, assert_improper_params=False, **kwargs
)
def test_assert_bonds(self):
ff = Forcefield(name="trappe-ua", strict=False)
derponium = mb.Compound()
at1 = mb.Particle(name="H")
at2 = mb.Particle(name="O")
at3 = mb.Particle(name="_CH4")
derponium.add([at1, at2, at3])
derponium.add_bond((at1, at2))
derponium.add_bond((at2, at3))
with pytest.raises(Exception):
ff.apply(derponium, assert_improper_params=False)
thing = ff.apply(
derponium,
assert_bond_params=False,
assert_angle_params=False,
assert_improper_params=False,
)
assert any(b.bond_type is None for b in thing.bonds)
def test_apply_subfuncs(self, oplsaa):
mol2 = mb.load(get_fn("ethane.mol2"), backend="parmed")
ethane = oplsaa.apply(mol2, assert_improper_params=False)
typemap = oplsaa._run_atomtyping(mol2, use_residue_map=False)
ethane2 = oplsaa._parametrize(
mol2, typemap=typemap, assert_improper_params=False
)
assert ethane.box == ethane2.box
assert (ethane.positions == ethane2.positions).all
for a1, a2 in zip(ethane.sites, ethane2.sites):
assert a1.name == a2.name
assert ethane.get_index(a1) == ethane2.get_index(a2)
assert a1.atom_type == a2.atom_type
for b1, b2 in zip(ethane.bonds, ethane2.bonds):
assert (
b1.connection_members[0].atom_type
== b2.connection_members[0].atom_type
)
assert (
b1.connection_members[1].atom_type
== b2.connection_members[1].atom_type
)
assert b1.bond_type == b2.bond_type
def test_non_zero_charge(self, oplsaa):
compound = mb.load("C1=CC=C2C(=C1)C(C3=CC=CC=C3O2)C(=O)O", smiles=True)
with pytest.warns(UserWarning):
oplsaa.apply(
compound,
assert_dihedral_params=False,
assert_improper_params=False,
)
"""
@pytest.mark.parametrize("filename", ['ethane.mol2', 'benzene.mol2'])
def test_write_xml(filename):
mol = mb.load(get_fn(filename), backend='parmed')
oplsaa = Forcefield(name='oplsaa', strict=False)
typed = oplsaa.apply(mol, assert_improper_params=False)
typed.write_foyer(filename='opls-snippet.xml', forcefield=oplsaa, unique=True)
oplsaa_partial = Forcefield('opls-snippet.xml', strict=False)
typed_by_partial = oplsaa_partial.apply(mol, assert_improper_params=False)
for i in range(len(typed.sites)):
atype1 = typed.sites[i].atom_type
atype2 = typed_by_partial.sites[i].atom_type
assert atype1.expression == atype2.expression
assert atype1.parameters == atype2.parameters
for i in range(len(typed.bonds)):
btype1 = typed.bonds[i].bond_type
btype2 = typed_by_partial.bonds[i].bond_type
assert btype1.expression == btype2.expression
assert btype1.parameters == btype2.parameters
# Do it again but with an XML including periodic dihedrals
mol = mb.load(get_fn(filename), backend='parmed')
oplsaa = Forcefield(get_fn('oplsaa-periodic.xml'), strict=False)
typed = oplsaa.apply(mol, assert_improper_params=False)
typed.write_foyer(filename='opls-snippet.xml', forcefield=oplsaa, unique=True)
oplsaa_partial = Forcefield('opls-snippet.xml', strict=False)
typed_by_partial = oplsaa_partial.apply(mol, assert_improper_params=False)
for i in range(len(typed.sites)):
atype1 = typed.sites[i].atom_type
atype2 = typed_by_partial.sites[i].atom_type
assert atype1.expression == atype2.expression
assert atype1.parameters == atype2.parameters
for i in range(len(typed.bonds)):
btype1 = typed.bonds[i].bond_type
btype2 = typed_by_partial.bonds[i].bond_type
assert btype1.expression == btype2.expression
assert btype1.parameters == btype2.parameters
@pytest.mark.parametrize("filename", ['ethane.mol2', 'benzene.mol2'])
def test_write_xml_multiple_periodictorsions(filename):
cmpd = mb.load(get_fn(filename), backend='parmed')
ff = Forcefield(forcefield_files=get_fn('oplsaa_multiperiodicitytorsion.xml'), strict=False)
typed_struc = ff.apply(cmpd, assert_dihedral_params=False, assert_improper_params=False)
typed_struc.write_foyer(filename='multi-periodictorsions.xml', forcefield=ff, unique=True)
partial_ff = Forcefield(forcefield_files='multi-periodictorsions.xml', strict=False)
typed_by_partial = partial_ff.apply(cmpd, assert_dihedral_params=False, assert_improper_params=False)
assert len(typed_struc.bonds) == len(typed_by_partial.bonds)
assert len(typed_struc.angles) == len(typed_by_partial.angles)
assert len(typed_struc.dihedrals) == len(typed_by_partial.dihedrals)
root = ET.parse('multi-periodictorsions.xml')
periodic_element = root.find('PeriodicTorsionForce')
assert 'periodicity2' in periodic_element[0].attrib
assert 'k2' in periodic_element[0].attrib
assert 'phase2' in periodic_element[0].attrib
@pytest.mark.parametrize("filename", ['ethane.mol2', 'benzene.mol2'])
def test_load_xml(filename):
mol = mb.load(get_fn(filename), backend='parmed')
if filename == 'ethane.mol2':
ff = Forcefield(get_fn('ethane-multiple.xml'), strict=False)
else:
ff = Forcefield(name='oplsaa', strict=False)
typed = ff.apply(mol, assert_improper_params=False)
typed.write_foyer(filename='snippet.xml', forcefield=ff, unique=True)
generated_ff = Forcefield('snippet.xml', strict=False)
def test_write_xml_overrides():
#Test xml_writer new overrides and comments features
mol = mb.load(get_fn('styrene.mol2'), backend='parmed')
oplsaa = Forcefield(name='oplsaa', strict=False)
typed = oplsaa.apply(mol, assert_dihedral_params=False, assert_improper_params=False)
typed.write_foyer(filename='opls-styrene.xml', forcefield=oplsaa, unique=True)
styrene = ET.parse('opls-styrene.xml')
atom_types = styrene.getroot().find('AtomTypes').findall('Type')
for item in atom_types:
attributes = item.attrib
if attributes['name'] == 'opls_145':
assert attributes['overrides'] == 'opls_142'
assert str(item.xpath('comment()')) in {'[<!--Note: original overrides="opls_141,opls_142"-->]',
'[<!--Note: original overrides="opls_142,opls_141"-->]'}
elif attributes['name'] == 'opls_146':
assert attributes['overrides'] == 'opls_144'
assert str(item.xpath('comment()')) == '[<!--Note: original overrides="opls_144"-->]'
def test_load_metadata():
lj_ff = Forcefield(get_fn('lj.xml'), strict=False)
assert lj_ff.version == '0.4.1'
assert lj_ff.name == 'LJ'
lj_ff = Forcefield(forcefield_files=[get_fn('lj.xml'), get_fn('lj2.xml')])
assert lj_ff.version == ['0.4.1', '4.8.2']
assert lj_ff.name == ['LJ', 'JL']
"""
| 41.757515
| 133
| 0.625474
| 19,351
| 0.928685
| 0
| 0
| 1,616
| 0.077554
| 0
| 0
| 11,419
| 0.548016
|
ac2e60dace04a2bf9deac82a5edb304e62120e11
| 2,315
|
py
|
Python
|
visualize/usecases/get_user_info.py
|
RevanthRyo/Alize
|
60f4153c0c4b665e60c02bc90f99f833bf3173c8
|
[
"Unlicense"
] | 160
|
2018-05-08T09:12:35.000Z
|
2021-11-08T14:45:18.000Z
|
visualize/usecases/get_user_info.py
|
RevanthRyo/Alize
|
60f4153c0c4b665e60c02bc90f99f833bf3173c8
|
[
"Unlicense"
] | 15
|
2018-05-08T09:13:53.000Z
|
2022-03-11T23:20:39.000Z
|
visualize/usecases/get_user_info.py
|
RevanthRyo/Alize
|
60f4153c0c4b665e60c02bc90f99f833bf3173c8
|
[
"Unlicense"
] | 12
|
2018-05-08T16:19:11.000Z
|
2021-11-08T14:45:58.000Z
|
import requests
from django.conf import settings
from visualize.utils.api import Client
class GetUserInfo(object):
"""
GetUserInfo :
params : username
response :
{
"login": "torvalds",
"id": 1024025,
"avatar_url": "https://avatars0.githubusercontent.com/u/1024025?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/torvalds",
"html_url": "https://github.com/torvalds",
"followers_url": "https://api.github.com/users/torvalds/followers",
"following_url": "https://api.github.com/users/torvalds/following{/other_user}",
"gists_url": "https://api.github.com/users/torvalds/gists{/gist_id}",
"starred_url": "https://api.github.com/users/torvalds/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/torvalds/subscriptions",
"organizations_url": "https://api.github.com/users/torvalds/orgs",
"repos_url": "https://api.github.com/users/torvalds/repos",
"events_url": "https://api.github.com/users/torvalds/events{/privacy}",
"received_events_url": "https://api.github.com/users/torvalds/received_events",
"type": "User",
"site_admin": false,
"name": "Linus Torvalds",
"company": "Linux Foundation",
"blog": "",
"location": "Portland, OR",
"email": null,
"hireable": null,
"bio": null,
"public_repos": 6,
"public_gists": 0,
"followers": 72049,
"following": 0,
"created_at": "2011-09-03T15:26:22Z",
"updated_at": "2017-11-14T16:54:03Z"
}
"""
def _extract_infos(self, data):
return {
"id": data["id"],
"name": data["name"],
"username": data["login"],
"html_url": data["html_url"],
"url": data["url"],
"avatar": data["avatar_url"],
"total_repos": data["public_repos"],
"followers": data["followers"],
"following": data["following"],
"created_at": data["created_at"],
"company": data["company"],
"bio": data["bio"],
"email": data["email"],
"location": data["location"],
}
def validate(self, username):
if not username:
raise Exception("Invalid username")
def execute(self, username):
self.validate(username)
api_response = Client().user_info(url_params={"username": username})
if "message" in api_response:
return False
response = self._extract_infos(api_response)
return response
| 31.283784
| 84
| 0.651404
| 2,224
| 0.960691
| 0
| 0
| 0
| 0
| 0
| 0
| 1,671
| 0.721814
|
ac2ed7f7134d3ec9fcd2d668ed386c6b314f071b
| 1,306
|
py
|
Python
|
FusionIIIT/applications/counselling_cell/migrations/0002_auto_20210501_1036.py
|
sabhishekpratap5/sonarcubeTest2
|
9bd8105e457f6feb8c38fa94b335e54783fca99e
|
[
"bzip2-1.0.6"
] | 1
|
2021-08-05T10:31:35.000Z
|
2021-08-05T10:31:35.000Z
|
FusionIIIT/applications/counselling_cell/migrations/0002_auto_20210501_1036.py
|
sabhishekpratap5/sonarcubeTest2
|
9bd8105e457f6feb8c38fa94b335e54783fca99e
|
[
"bzip2-1.0.6"
] | 1
|
2021-05-05T09:50:22.000Z
|
2021-05-05T09:50:22.000Z
|
FusionIIIT/applications/counselling_cell/migrations/0002_auto_20210501_1036.py
|
sabhishekpratap5/sonarcubeTest2
|
9bd8105e457f6feb8c38fa94b335e54783fca99e
|
[
"bzip2-1.0.6"
] | 4
|
2021-03-16T08:11:42.000Z
|
2021-05-06T11:03:44.000Z
|
# Generated by Django 3.1.5 on 2021-05-01 10:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('globals', '0003_auto_20191024_1242'),
('counselling_cell', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='counsellingfaq',
old_name='counseliing_category',
new_name='counselling_category',
),
migrations.RenameField(
model_name='studentmeetingrequest',
old_name='requested_faculty_invitie',
new_name='requested_faculty_invitee',
),
migrations.RenameField(
model_name='studentmeetingrequest',
old_name='requested_student_invitie',
new_name='requested_student_invitee',
),
migrations.AlterField(
model_name='counsellingissuecategory',
name='category_id',
field=models.CharField(max_length=40, unique=True),
),
migrations.AlterField(
model_name='counsellingmeeting',
name='meeting_host',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='globals.extrainfo'),
),
]
| 31.853659
| 128
| 0.624043
| 1,180
| 0.903522
| 0
| 0
| 0
| 0
| 0
| 0
| 419
| 0.320827
|
ac2f7b7236d2c24e643b4339626cb713bad31a0c
| 2,650
|
py
|
Python
|
config.py
|
KodeInWork/openue_exp
|
31b46999188bf73160af84c0e6f5d3e75c0c52ea
|
[
"MIT"
] | null | null | null |
config.py
|
KodeInWork/openue_exp
|
31b46999188bf73160af84c0e6f5d3e75c0c52ea
|
[
"MIT"
] | null | null | null |
config.py
|
KodeInWork/openue_exp
|
31b46999188bf73160af84c0e6f5d3e75c0c52ea
|
[
"MIT"
] | null | null | null |
import os
class config():
input_dir = ''
max_len = '128'
pretrain_model_dir = ''
home_dir = os.getcwd() + '/'
data_dir = home_dir + 'raw_data/ske/'
tf_serving_addr = '127.0.0.1:8501'
bert_vocab_dir = home_dir + 'pretrained_model/chinese_wwm_ext_L-12_H-768_A-12/vocab.txt'
bert_config_dir =home_dir + 'pretrained_model/chinese_wwm_ext_L-12_H-768_A-12/bert_config.json'
class_model_dir = 'output/predicate_classification_model/epochs6/model.ckpt-6000'
seq_model_dir = 'output/sequnce_labeling_model/epochs9/model.ckpt-85304'
middle_out_dir = './output/predicate_infer_out'
out_dir = './output/sequnce_infer_out/epochs9/ckpt22000'
token_label = ["[Padding]", "[category]", "[##WordPiece]", "[CLS]", "[SEP]", "B-SUB", "I-SUB", "B-OBJ", "I-OBJ", "O"] #id 0 --> [Paddding]
#class_label = ['所需检查', '推荐用药', '疾病症状', '治疗方式']
class_label = ['丈夫', '上映时间', '专业代码', '主持人', '主演', '主角', '人口数量', '作曲', '作者', '作词', '修业年限', '出品公司', '出版社', '出生地', '出生日期','创始人', '制片人', '占地面积', '号', '嘉宾', '国籍', '妻子', '字', '官方语言', '导演', '总部地点', '成立日期', '所在城市', '所属专辑', '改编自', '朝代', '歌手', '母亲', '毕业院校', '民族', '气候', '注册资本', '海拔', '父亲', '目', '祖籍', '简称', '编剧', '董事长', '身高', '连载网站','邮政编码', '面积', '首都']
schema = {
'父亲': [('人物', '人物')],
'妻子': [('人物', '人物')],
'母亲': [('人物', '人物')],
'丈夫': [('人物', '人物')],
'祖籍': [('地点', '人物')],
'总部地点': [('地点', '企业')],
'出生地': [('地点', '人物')],
'目': [('目', '生物')],
'面积': [('Number', '行政区')],
'简称': [('Text', '机构')],
'上映时间': [('Date', '影视作品')],
'所属专辑': [('音乐专辑', '歌曲')],
'注册资本': [('Number', '企业')],
'首都': [('城市', '国家')],
'导演': [('人物', '影视作品')],
'字': [('Text', '历史人物')],
'身高': [('Number', '人物')],
'出品公司': [('企业', '影视作品')],
'修业年限': [('Number', '学科专业')],
'出生日期': [('Date', '人物')],
'制片人': [('人物', '影视作品')],
'编剧': [('人物', '影视作品')],
'国籍': [('国家', '人物')],
'海拔': [('Number', '地点')],
'连载网站': [('网站', '网络小说')],
'朝代': [('Text', '历史人物')],
'民族': [('Text', '人物')],
'号': [('Text', '历史人物')],
'出版社': [('出版社', '书籍')],
'主持人': [('人物', '电视综艺')],
'专业代码': [('Text', '学科专业')],
'歌手': [('人物', '歌曲')],
'作词': [('人物', '歌曲')],
'主角': [('人物', '网络小说')],
'董事长': [('人物', '企业')],
'成立日期': [('Date', '机构'), ('Date', '企业')],
'毕业院校': [('学校', '人物')],
'占地面积': [('Number', '机构')],
'官方语言': [('语言', '国家')],
'邮政编码': [('Text', '行政区')],
'人口数量': [('Number', '行政区')],
'所在城市': [('城市', '景点')],
'作者': [('人物', '图书作品')],
'作曲': [('人物', '歌曲')],
'气候': [('气候', '行政区')],
'嘉宾': [('人物', '电视综艺')],
'主演': [('人物', '影视作品')],
'改编自': [('作品', '影视作品')],
'创始人': [('人物', '企业')]}
| 38.970588
| 347
| 0.441132
| 3,615
| 0.996966
| 0
| 0
| 0
| 0
| 0
| 0
| 2,446
| 0.674573
|
ac3000c2861cfc54577cebc22127160c6d8e95fb
| 86
|
py
|
Python
|
ipfsApi/__init__.py
|
dardevelin/python-ipfs-api
|
841595f7d2b07db511eef34aa046163ee4a3020e
|
[
"MIT"
] | 1
|
2018-08-14T02:10:35.000Z
|
2018-08-14T02:10:35.000Z
|
ipfsApi/__init__.py
|
ipfs-client-libraries/python-ipfs-api
|
841595f7d2b07db511eef34aa046163ee4a3020e
|
[
"MIT"
] | null | null | null |
ipfsApi/__init__.py
|
ipfs-client-libraries/python-ipfs-api
|
841595f7d2b07db511eef34aa046163ee4a3020e
|
[
"MIT"
] | 1
|
2022-01-28T13:37:31.000Z
|
2022-01-28T13:37:31.000Z
|
from __future__ import absolute_import
from .client import *
__version__ = '0.2.2'
| 12.285714
| 38
| 0.755814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0.081395
|
ac31a080b5043ec20246c62e223e567243898b56
| 440
|
py
|
Python
|
leprikon/models/useragreement.py
|
leprikon-cz/leprikon
|
b1bec36fb6bcf0220bffccca53b6f200f9e95910
|
[
"BSD-3-Clause"
] | 4
|
2018-10-29T17:46:09.000Z
|
2021-12-16T08:57:48.000Z
|
leprikon/models/useragreement.py
|
leprikon-cz/leprikon
|
b1bec36fb6bcf0220bffccca53b6f200f9e95910
|
[
"BSD-3-Clause"
] | 68
|
2016-07-11T07:48:54.000Z
|
2022-03-18T01:32:06.000Z
|
leprikon/models/useragreement.py
|
leprikon-cz/leprikon
|
b1bec36fb6bcf0220bffccca53b6f200f9e95910
|
[
"BSD-3-Clause"
] | 2
|
2016-07-12T20:39:53.000Z
|
2020-10-10T03:14:42.000Z
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from ..conf import settings
class UserAgreement(models.Model):
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
primary_key=True,
verbose_name=_("user"),
related_name="agreement",
on_delete=models.CASCADE,
)
granted = models.DateTimeField(_("time of agreement"), editable=False, auto_now=True)
| 27.5
| 89
| 0.706818
| 323
| 0.734091
| 0
| 0
| 0
| 0
| 0
| 0
| 36
| 0.081818
|
ac3349d8016af9897cd5989665c953a2b04c0963
| 2,076
|
py
|
Python
|
fair-api-datasets-update.py
|
RossBarnie/fair-api-beta-utilities
|
4642d2d0cf5f601c55b343e788f79824acf227ee
|
[
"MIT"
] | 3
|
2020-09-04T08:21:09.000Z
|
2021-03-04T00:19:57.000Z
|
fair-api-datasets-update.py
|
RossBarnie/fair-api-beta-utilities
|
4642d2d0cf5f601c55b343e788f79824acf227ee
|
[
"MIT"
] | 9
|
2020-09-02T11:23:04.000Z
|
2021-09-27T15:12:56.000Z
|
fair-api-datasets-update.py
|
RossBarnie/fair-api-beta-utilities
|
4642d2d0cf5f601c55b343e788f79824acf227ee
|
[
"MIT"
] | 2
|
2021-03-25T16:48:53.000Z
|
2021-05-19T10:00:16.000Z
|
import json
import sys
import os
import requests
from datasets.diff_helper import DiffHelper
from common.constants import BASE_HEADERS, FAIR_API_ENDPOINT, SSL_VERIFY, FAIR_URL, DRY_RUN
def dataset_url(code):
return f"{FAIR_API_ENDPOINT}{code}"
def get_request(dataset_code):
resp = requests.get(
dataset_url(dataset_code), headers=BASE_HEADERS, verify=SSL_VERIFY
)
if resp.status_code != 200:
data = resp.json()
print(f'\nFailed to get dataset: Status code: {resp.status_code}, Error message: {data["error"]["message"]}')
exit(1)
return resp
def patch_request(data):
dataset_code = data['catalogue']['id']
resp = get_request(dataset_code)
original = resp.json()
diff = DiffHelper.dataset_diff(original, data)
print (f'\nPATCH {dataset_url(dataset_code)} --data {json.dumps(diff, indent=2)}')
if DRY_RUN:
return # In dry-run mode we do nothing past this point
print('Sending request...')
response = requests.patch(
dataset_url(dataset_code),
headers=BASE_HEADERS,
json=diff,
verify=SSL_VERIFY
)
data = response.json()
if response.status_code != 200:
print(f'Failed to patch dataset: Status code: {response.status_code}, Error message: {data["error"]["message"]}')
exit(1)
if len(data) != 1:
print(f'Patched dataset: {data["code"]}')
print(f'View on the web at: {FAIR_URL}#/data/datasets/{data["code"]}')
else:
print(f'Expected 1 dataset in response - received {(data)}')
# Script must be run with at least 1 argument
if len(sys.argv) < 2:
print(f'Usage: {sys.argv[0]} <path to dataset definition json file> <--dry-run>')
exit(1)
# First argument must be a path to a file
definition_file = sys.argv[1]
if not os.path.isfile(definition_file):
print(f'Provided path "{definition_file}" does not seem to be a file, ensure the path is correct and try again')
exit(1)
with open(definition_file) as fh:
payload=fh.read()
data=json.loads(payload)
patch_request(data)
| 34.032787
| 121
| 0.67341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 807
| 0.388728
|
ac343909a3ad8704870bfb000e6780fc9350783a
| 18,207
|
py
|
Python
|
scripts/convert_excel_files_to_json.py
|
sheecegardezi/sifra
|
a7af896159ea7db231e23aeab187b7493887a080
|
[
"Apache-2.0"
] | null | null | null |
scripts/convert_excel_files_to_json.py
|
sheecegardezi/sifra
|
a7af896159ea7db231e23aeab187b7493887a080
|
[
"Apache-2.0"
] | null | null | null |
scripts/convert_excel_files_to_json.py
|
sheecegardezi/sifra
|
a7af896159ea7db231e23aeab187b7493887a080
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
from collections import OrderedDict
import pandas as pd
def write_json_to_file(json_data, json_file_path):
parsed = json.loads(json_data)
parsed = json.dumps(parsed, indent=4, sort_keys=True)
obj = open(json_file_path, 'w')
obj.write(parsed)
obj.close()
# replace " with ' if the occur within brackets
# eg {"key":"["Key":"value"]"} => {"key":"['Key':'value']"}
def standardize_json_string(json_string):
inside_brackets_flag = False
standard_json_string = ""
for i in range(0, len(json_string)):
if json_string[i] == '[':
inside_brackets_flag = True
if json_string[i] == ']':
inside_brackets_flag = False
if inside_brackets_flag:
if json_string[i] == '\"':
standard_json_string += "\'"
else:
standard_json_string += json_string[i]
else:
standard_json_string += json_string[i]
# Note: json object cant have python lists as keys
# standard_json_string \
# = standard_json_string.replace("\"[","[").replace("]\"","]")
return standard_json_string
def update_json_structure(main_json_obj):
system_meta = main_json_obj["system_meta"]
sysout_setup = main_json_obj["sysout_setup"]
sysinp_setup = main_json_obj["sysinp_setup"]
node_conn_df = main_json_obj["node_conn_df"]
component_list = main_json_obj["component_list"]
damage_state_df = main_json_obj["damage_state_df"]
fragility_data = main_json_obj["fragility_data"]
new_json_structure = OrderedDict()
new_json_structure["system_meta"] = system_meta
new_json_structure["sysout_setup"] = sysout_setup
new_json_structure["sysinp_setup"] = sysinp_setup
new_json_structure["node_conn_df"] = node_conn_df
new_json_structure["component_list"] = OrderedDict()
for component in component_list:
new_json_structure["component_list"][component] = OrderedDict()
new_json_structure["component_list"][component]["component_class"] \
= component_list[component]["component_class"]
new_json_structure["component_list"][component]["component_type"] \
= component_list[component]["component_type"]
new_json_structure["component_list"][component]["cost_fraction"] \
= component_list[component]["cost_fraction"]
new_json_structure["component_list"][component]["node_cluster"] \
= component_list[component]["node_cluster"]
new_json_structure["component_list"][component]["node_type"] \
= component_list[component]["node_type"]
new_json_structure["component_list"][component]["operating_capacity"] \
= component_list[component]["op_capacity"]
new_json_structure["component_list"][component]["longitude"] \
= component_list[component]["pos_x"]
new_json_structure["component_list"][component]["latitude"] \
= component_list[component]["pos_y"]
new_json_structure["component_list"][component]\
["damages_states_constructor"] = OrderedDict()
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"] = OrderedDict()
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"]["damage_state_name"] \
= "DS0 None"
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"]["functionality"]\
= 1.0
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"]["damage_ratio"]\
= 0.0
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"]\
["response_function_constructor"]\
= OrderedDict()
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"]\
["response_function_constructor"]\
["function_name"] \
= "Level0Response"
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"]\
["response_function_constructor"]\
["damage_state_definition"]\
= "Not Available."
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"]\
["recovery_function_constructor"]\
= OrderedDict()
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"]\
["recovery_function_constructor"]["function_name"]\
= "Level0Response"
new_json_structure["component_list"][component]\
["damages_states_constructor"]["0"]\
["recovery_function_constructor"]["recovery_state_definition"]\
= "Not Available."
counter = 0
for key in fragility_data.keys():
component_type = eval(key)[1]
damage_state = eval(key)[2]
if component_type == component_list[component]["component_type"]:
damage_states_in_component = [
new_json_structure["component_list"][component]\
["damages_states_constructor"][ds]["damage_state_name"]
for ds in
new_json_structure["component_list"][component]\
["damages_states_constructor"]
]
if damage_state not in damage_states_in_component:
counter = counter + 1
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
= OrderedDict()
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["damage_state_name"]\
= damage_state
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["functionality"]\
= fragility_data[key]["functionality"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["damage_ratio"]\
= fragility_data[key]["damage_ratio"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
= OrderedDict()
if fragility_data[key]["is_piecewise"] == "no":
# -----------------------------------------------------
# <BEGIN> Non-piecewise damage function
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["function_name"]\
= fragility_data[key]["damage_function"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["median"]\
= fragility_data[key]["median"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["beta"]\
= fragility_data[key]["beta"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["location"]\
= fragility_data[key]["location"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["fragility_source"]\
= fragility_data[key]["fragility_source"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["minimum"]\
= fragility_data[key]["minimum"]
if key in damage_state_df.keys():
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["damage_state_definition"]\
= damage_state_df[str(eval(key).pop(0))]
else:
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["damage_state_definition"]\
= "Not Available."
# <END> Non-piecewise damage function
# ---------------------------------------------------------
# <BEGIN> Piecewise defined damage function
else:
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["function_name"] = "PiecewiseFunction"
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["piecewise_function_constructor"] = []
tempDic = OrderedDict()
tempDic["function_name"]\
= fragility_data[key]["damage_function"]
tempDic["median"]\
= fragility_data[key]["median"]
tempDic["beta"]\
= fragility_data[key]["beta"]
tempDic["location"]\
= fragility_data[key]["location"]
tempDic["fragility_source"]\
= fragility_data[key]["fragility_source"]
tempDic["minimum"]\
= fragility_data[key]["minimum"]
if key in damage_state_df.keys():
tempDic["damage_state_definition"]\
= damage_state_df[str(eval(key).pop(0))]
else:
tempDic["damage_state_definition"]\
= "Not Available."
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["piecewise_function_constructor"].append(tempDic)
# <END> Piecewise defined damage function
# ---------------------------------------------------------
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["recovery_function_constructor"]\
= OrderedDict()
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["recovery_function_constructor"]\
["function_name"]\
= fragility_data[key]["recovery_function"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["recovery_function_constructor"]\
["norm_mean"]\
= fragility_data[key]["recovery_mean"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["recovery_function_constructor"]\
["norm_stddev"]\
= fragility_data[key]["recovery_std"]
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["recovery_function_constructor"]\
["recovery_state_definition"]\
= "Not Available."
else:
tempDic = OrderedDict()
tempDic["function_name"]\
= fragility_data[key]["damage_function"]
tempDic["median"]\
= fragility_data[key]["median"]
tempDic["beta"]\
= fragility_data[key]["beta"]
tempDic["location"]\
= fragility_data[key]["location"]
tempDic["fragility_source"]\
= fragility_data[key]["fragility_source"]
tempDic["minimum"]\
= fragility_data[key]["minimum"]
if key in damage_state_df.keys():
tempDic["damage_state_definition"]\
= damage_state_df[str(eval(key).pop(0))]
else:
tempDic["damage_state_definition"]\
= "Not Available."
new_json_structure["component_list"][component]\
["damages_states_constructor"][counter]\
["response_function_constructor"]\
["piecewise_function_constructor"].append(tempDic)
return new_json_structure
def read_excel_to_json(excel_file_path):
system_meta = pd.read_excel(
excel_file_path, sheet_name='system_meta',
index_col=0, header=0,
skiprows=0, skipinitialspace=True)
system_meta = system_meta.to_json(orient='index')
system_meta = standardize_json_string(system_meta)
component_list = pd.read_excel(
excel_file_path, sheet_name='component_list',
index_col=0, header=0,
skiprows=0, skipinitialspace=True)
component_list = component_list.to_json(orient='index')
component_list = standardize_json_string(component_list)
node_conn_df = pd.read_excel(
excel_file_path, sheet_name='component_connections',
index_col=None, header=0,
skiprows=0, skipinitialspace=True)
node_conn_df = node_conn_df.to_json(orient='index')
node_conn_df = standardize_json_string(node_conn_df)
sysinp_setup = pd.read_excel(
excel_file_path, sheet_name='supply_setup',
index_col=0, header=0,
skiprows=0, skipinitialspace=True)
sysinp_setup = sysinp_setup.to_json(orient='index')
sysinp_setup = standardize_json_string(sysinp_setup)
sysout_setup = pd.read_excel(
excel_file_path, sheet_name='output_setup',
index_col=0, header=0,
skiprows=0, skipinitialspace=True)
sysout_setup = sysout_setup.sort_values(by=['priority'], ascending=True)
sysout_setup = sysout_setup.to_json(orient='index')
sysout_setup = standardize_json_string(sysout_setup)
fragility_data = pd.read_excel(
excel_file_path, sheet_name='comp_type_dmg_algo',
index_col=[0, 1, 2], header=0,
skiprows=0, skipinitialspace=True)
fragility_data = fragility_data.to_json(orient='index')
fragility_data = standardize_json_string(fragility_data)
damage_state_df = pd.read_excel(
excel_file_path, sheet_name='damage_state_def',
index_col=[0, 1], header=0,
skiprows=0, skipinitialspace=True)
damage_state_df = damage_state_df.to_json(orient='index')
damage_state_df = standardize_json_string(damage_state_df)
sys_model_json = '{ ' \
'"system_meta": ' + system_meta + ',' \
'"component_list": ' + component_list + ',' \
'"node_conn_df": ' + node_conn_df + ',' \
'"sysinp_setup": ' + sysinp_setup + ',' \
'"sysout_setup": ' + sysout_setup + ',' \
'"fragility_data": ' + fragility_data + ',' \
'"damage_state_df": ' + damage_state_df + \
' }'
return sys_model_json
def main():
# get list off all the excel files
model_file_paths = []
for root, dir_names, file_names in os.walk(os.path.dirname(os.getcwd())):
for file_name in file_names:
if file_name.endswith('.xlsx'):
if 'models' in root:
excel_file_path = os.path.join(root, file_name)
model_file_paths.append(excel_file_path)
for excel_file_path in model_file_paths:
parent_folder_name = os.path.dirname(excel_file_path)
file_name = os.path.splitext(os.path.basename(excel_file_path))[0]
json_obj = json.loads(read_excel_to_json(excel_file_path),
object_pairs_hook=OrderedDict)
new_json_structure_obj = update_json_structure(json_obj)
parsed = json.dumps(new_json_structure_obj, indent=4, sort_keys=True)
json_file_path = os.path.join(parent_folder_name, file_name + '.json')
write_json_to_file(parsed, json_file_path)
if __name__ == "__main__":
main()
| 43.766827
| 79
| 0.543417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,040
| 0.276817
|
ac36030e8e89e493e372409b81e3e6f1ab9b3e03
| 1,173
|
py
|
Python
|
QUANTAXIS/example/DataFetcher.py
|
cyy1229/QUANTAXIS
|
320eff53dfa2cde8032a5e066499f4da0b5064a2
|
[
"MIT"
] | null | null | null |
QUANTAXIS/example/DataFetcher.py
|
cyy1229/QUANTAXIS
|
320eff53dfa2cde8032a5e066499f4da0b5064a2
|
[
"MIT"
] | null | null | null |
QUANTAXIS/example/DataFetcher.py
|
cyy1229/QUANTAXIS
|
320eff53dfa2cde8032a5e066499f4da0b5064a2
|
[
"MIT"
] | null | null | null |
from QUANTAXIS import QA_fetch_stock_day_adv, QA_fetch_stock_list_adv, QA_fetch_stock_day_full_adv, QA_Setting
import pandas as pd
QASETTING = QA_Setting()
DATABASE = QASETTING.client.quantaxis
# def getAllTradeCal():
# return pd.DataFrame(DATABASE.trade_date.find({"is_open": 1}))
class MongoDataLoader:
def __init__(self):
pass
def load_stock_day(self,
code,
start='all',
end=None
):
QA_fetch_stock_day_adv(code, start, end)
def load_stock_list(self):
return QA_fetch_stock_list_adv()
def load_trade_cal(self):
return pd.DataFrame(DATABASE.trade_date.find({"is_open": 1}))
def load_stock_day_full(self, date):
return QA_fetch_stock_day_full_adv(date)
'''根据日期范围加载tushare日线数据'''
def load_tushare_stock_day(self, end, start='20150101'):
return pd.DataFrame(DATABASE.tushare_stock_day.find({"trade_date": {
"$lte": end,
"$gte": start
}}))
if __name__ == '__main__':
print(MongoDataLoader().load_tushare_stock_day(end='20210630'))
| 27.27907
| 110
| 0.627451
| 808
| 0.675021
| 0
| 0
| 0
| 0
| 0
| 0
| 207
| 0.172932
|
ac36a8837c04b4a05f6772531a3bc4400bce36fc
| 2,934
|
py
|
Python
|
temp-uplift-submission/sparkml/adult_spark.py
|
damslab/reproducibility
|
f7804b2513859f7e6f14fa7842d81003d0758bf8
|
[
"Apache-2.0"
] | 4
|
2021-12-10T17:20:26.000Z
|
2021-12-27T14:38:40.000Z
|
temp-uplift-submission/sparkml/adult_spark.py
|
damslab/reproducibility
|
f7804b2513859f7e6f14fa7842d81003d0758bf8
|
[
"Apache-2.0"
] | null | null | null |
temp-uplift-submission/sparkml/adult_spark.py
|
damslab/reproducibility
|
f7804b2513859f7e6f14fa7842d81003d0758bf8
|
[
"Apache-2.0"
] | null | null | null |
import sys
import time
import numpy as np
import scipy as sp
import pandas as pd
import math
import warnings
from pyspark.sql import SparkSession
from pyspark import StorageLevel
from pyspark.ml.feature import Normalizer
from pyspark.ml.feature import StringIndexer, StandardScaler
from pyspark.ml.feature import OneHotEncoder, VectorAssembler
from pyspark.ml.feature import QuantileDiscretizer
from pyspark.sql.types import StringType, DoubleType
from pyspark.ml import Pipeline
def readNprep(spark):
# Read and isolate target and training data
adult = spark.read.options(inferSchema='True', delimiter=',') \
.csv("file:/home/aphani/datasets/adult.data")
adult.persist(StorageLevel.MEMORY_ONLY)
print(adult.printSchema())
print(adult.show(5))
return adult
def transform(adult):
# Seperate out the numeric, categorical inputs
numeric_df = adult.select([f.name for f in adult.schema.fields if isinstance(f.dataType, DoubleType)])
cat_df = adult.select([f.name for f in adult.schema.fields if isinstance(f.dataType, StringType)])
# Bin the numerical columns
outCols = ['{0}_bin'.format(out) for out in numeric_df.columns]
binner = QuantileDiscretizer(inputCols=numeric_df.columns, outputCols=outCols, numBuckets=5)
# Recode and dummycode the categorical features
outCols = ['{0}_rc'.format(out) for out in cat_df.columns]
indexer = StringIndexer(inputCols=cat_df.columns, outputCols=outCols)
inCols = binner.getOutputCols() + indexer.getOutputCols()
outCols = ['{0}_dc'.format(out) for out in inCols]
one_hot = OneHotEncoder(dropLast=False, inputCols=inCols, outputCols=outCols)
# Make a pipeline and apply
pipe = Pipeline(stages=[binner, indexer, one_hot])
encoded = pipe.fit(adult).transform(adult)
return encoded
spark = SparkSession\
.builder\
.master("local[*]")\
.config("spark.driver.memory", "110g")\
.config("spark.kryoserializer.buffer.max", "1024m")\
.appName("CriteoBySparkML")\
.getOrCreate()
spark.sparkContext.setLogLevel('ERROR')
X = readNprep(spark)
# The 1st call may read the dataset. Don't count the 1st call
t1 = time.time()
X_prep1 = transform(X)
print("Elapsed time for transformations via sparkml = %s sec" % (time.time() - t1))
# Average of three calls
totTime = 0
t1 = time.time()
X_prep2 = transform(X)
totTime = totTime + ((time.time() - t1) * 1000) #millisec
print("Elapsed time for transformations via sparkml = %s sec" % (time.time() - t1))
t1 = time.time()
X_prep3 = transform(X)
totTime = totTime + ((time.time() - t1) * 1000) #millisec
print("Elapsed time for transformations via sparkml = %s sec" % (time.time() - t1))
t1 = time.time()
X_prep4 = transform(X)
totTime = totTime + ((time.time() - t1) * 1000) #millisec
print("Elapsed time for transformations via sparkml = %s sec" % (time.time() - t1))
print("Average elapsed time = %s millisec" % (totTime/3))
| 36.675
| 106
| 0.720177
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 732
| 0.249489
|
ac373cedd278bb0dc68d30fdfd250e87a1a074b4
| 800
|
py
|
Python
|
populate/entities/SmellEmission.py
|
Odeuropa/knowledge-graph
|
c9c10cb984e79760b202325fe4314c8706de26fa
|
[
"Apache-2.0"
] | null | null | null |
populate/entities/SmellEmission.py
|
Odeuropa/knowledge-graph
|
c9c10cb984e79760b202325fe4314c8706de26fa
|
[
"Apache-2.0"
] | null | null | null |
populate/entities/SmellEmission.py
|
Odeuropa/knowledge-graph
|
c9c10cb984e79760b202325fe4314c8706de26fa
|
[
"Apache-2.0"
] | null | null | null |
import re
from .Entity import Entity
from .SmellSource import SmellSource
from .ontologies import ODEUROPA
class SmellEmission(Entity):
def __init__(self, seed, smell, source='', carrier='', lang='en'):
super().__init__(seed)
self.setclass(ODEUROPA.L12_Smell_Emission)
self.add(ODEUROPA.F1_generated, smell)
for i, x in enumerate(source.split(' | ')):
if not x:
break
s = re.sub(r'^of ', '', x).strip()
self.add(ODEUROPA.F3_had_source, SmellSource(seed + str(i), s, lang))
for i, x in enumerate(carrier.split(' | ')):
if not x:
break
c = re.sub(r'^of ', '', x).strip()
self.add(ODEUROPA.F4_had_carrier, SmellSource(seed + str(i) + 'c', c, lang))
| 30.769231
| 88
| 0.57125
| 689
| 0.86125
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.04875
|
ac37bd5c4fb9186e20a9020b619a9482fbce7644
| 489
|
py
|
Python
|
constants.py
|
duongntbk/ToyMachineLearning
|
0a9cb02ddaebc3e4064d7ecb04bc654a08f4b1ee
|
[
"MIT"
] | null | null | null |
constants.py
|
duongntbk/ToyMachineLearning
|
0a9cb02ddaebc3e4064d7ecb04bc654a08f4b1ee
|
[
"MIT"
] | null | null | null |
constants.py
|
duongntbk/ToyMachineLearning
|
0a9cb02ddaebc3e4064d7ecb04bc654a08f4b1ee
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
MNIST_DATASET_PATH = 'raw_data/mnist.pkl.gz'
TEST_FOLDER = 'test/'
TRAIN_FOLDER = 'train/'
MODEL_FILE_PATH = 'model/recognizer.pickle'
LABEL_ENCODER_FILE_PATH = 'model/label_encoder.pickle'
# Manual
DEMO_HELP_MSG = '\n' + \
'Input parameter is incorrect\n' + \
'Display help: \'python demo.py -h\''
TRAINER_HELP_MSG = '\n' + \
'Input parameter is incorrect\n' + \
'Display help: \'python extractor.py -h\''
| 30.5625
| 58
| 0.619632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 273
| 0.558282
|
ac3856d097cb7252fe1c11137bb19dafaaf4bd31
| 435
|
py
|
Python
|
python3/cut_the_sticks.py
|
ahavrylyuk/hackerrank
|
a8be83c8166a05f6f91bdd86cca3d4c544428b4b
|
[
"MIT"
] | null | null | null |
python3/cut_the_sticks.py
|
ahavrylyuk/hackerrank
|
a8be83c8166a05f6f91bdd86cca3d4c544428b4b
|
[
"MIT"
] | null | null | null |
python3/cut_the_sticks.py
|
ahavrylyuk/hackerrank
|
a8be83c8166a05f6f91bdd86cca3d4c544428b4b
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
def cut_the_sticks(a):
cuts = []
while len(a) > 0:
cutter = a.pop()
if cutter == 0:
continue
for i in range(len(a)):
a[i] -= cutter
cuts.append(len(a) + 1)
return cuts
if __name__ == '__main__':
_ = input()
value = map(int, input().split(' '))
res = cut_the_sticks(sorted(value, reverse=True))
for v in res:
print(v)
| 20.714286
| 53
| 0.508046
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 35
| 0.08046
|
ac3afa06674bd280c370406d538274f60a4acaa0
| 2,330
|
py
|
Python
|
ex01_search.py
|
tbaptista/pacman
|
f30213e1104b794996204fa0a4ac90c583f8a2e4
|
[
"Apache-2.0"
] | 1
|
2019-01-10T05:37:10.000Z
|
2019-01-10T05:37:10.000Z
|
ex01_search.py
|
tbaptista/pacman
|
f30213e1104b794996204fa0a4ac90c583f8a2e4
|
[
"Apache-2.0"
] | null | null | null |
ex01_search.py
|
tbaptista/pacman
|
f30213e1104b794996204fa0a4ac90c583f8a2e4
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# -----------------------------------------------------------------------------
# Copyright (c) 2015 Tiago Baptista
# All rights reserved.
# -----------------------------------------------------------------------------
"""
Path-finding exercise using the pac-man game. Using the mouse, choose a target
location for the pac-man agent. Given this target the agent should compute the
path to that location.
"""
from __future__ import division
__docformat__ = 'restructuredtext'
__author__ = 'Tiago Baptista'
__version__ = '1.0'
import pacman
import pyafai
from pyglet.window import mouse
class SearchAgent(pacman.PacmanAgent):
def __init__(self, x, y, cell):
super(SearchAgent, self).__init__(x, y, cell)
self._target = None
self._path = []
@property
def target(self):
return self._target
@target.setter
def target(self, value):
self._target = value
# We are changing destination, so invalidate current path
if value is not None and self._path:
self._path = []
def _think(self, delta):
# If a target has been set
if self._target is not None:
self._path = [] # TODO: execute the search algorithm
self._target = None
# If we have a non empty path
if self._path:
# Execute the next action on the path
next_action = self._path.pop(0)
return [self._actions[next_action]]
class SearchDisplay(pacman.PacmanDisplay):
def on_mouse_release(self, x, y, button, modifiers):
super(SearchDisplay, self).on_mouse_release(x, y, button, modifiers)
if button == mouse.LEFT:
x1, y1 = self.world.get_cell(x, y)
# send agent to x1, y1
if isinstance(self.world.player, SearchAgent):
self.world.player.target = (x1, y1)
elif button == mouse.RIGHT:
x1, y1 = self.world.get_cell(x, y)
print("Cell: ({}, {})".format(x1, y1))
print("Valid neighbours:", self.world.graph.get_connections((x1, y1)))
def setup():
world = pacman.PacmanWorld(20, 'levels/pacman.txt')
display = SearchDisplay(world)
# create pacman agent
world.spawn_player(SearchAgent)
if __name__ == '__main__':
setup()
pyafai.run()
| 28.072289
| 82
| 0.584979
| 1,492
| 0.640343
| 0
| 0
| 271
| 0.116309
| 0
| 0
| 750
| 0.321888
|
ac3bc1d1f68c8f2adb204c9c5f0374180c3d4c1e
| 3,867
|
py
|
Python
|
site_search/tests/test_permissions.py
|
AccentDesign/djangocms-site-search
|
90ed1e5ab5fe96be8f1a4a74994f18164a7363aa
|
[
"MIT"
] | 1
|
2019-06-06T12:56:30.000Z
|
2019-06-06T12:56:30.000Z
|
site_search/tests/test_permissions.py
|
AccentDesign/djangocms-site-search
|
90ed1e5ab5fe96be8f1a4a74994f18164a7363aa
|
[
"MIT"
] | null | null | null |
site_search/tests/test_permissions.py
|
AccentDesign/djangocms-site-search
|
90ed1e5ab5fe96be8f1a4a74994f18164a7363aa
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.contrib.auth.models import User
from django.test import TestCase
from cms.api import assign_user_to_page, create_page
from ..helpers import get_request
from ..views import SearchResultsView
class PermissionsTestCase(TestCase):
def setUp(self):
self.view = SearchResultsView()
self.request = get_request('en')
self.request.GET = self.request.GET.copy()
self.request.GET['q'] = 'test_page'
self.view.request = self.request
self.user = User.objects.create_user(
username='jacob', email='jacob@…', password='top_secret')
self.other_user = User.objects.create_user(
username='fred', email='fred@…', password='top_secret')
def _create_page(self, **data):
return create_page(
title='test_page',
reverse_id='testpage',
template='test.html',
language='en',
**data
)
####################################################################
# login_required #
####################################################################
def test_not_included_when_login_required_and_user_anonymous(self):
page = self._create_page(login_required=True)
page.publish('en')
self.assertEqual(len(self.view.get_queryset()), 0)
def test_included_when_login_required_when_user_logged_in(self):
self.view.request.user = self.user
page = self._create_page(login_required=True)
page.publish('en')
self.assertEqual(len(self.view.get_queryset()), 1)
####################################################################
# page permissions #
####################################################################
def test_included_when_perm_set_and_this_user_included(self):
self.view.request.user = self.user
page = self._create_page(login_required=True)
page.publish('en')
assign_user_to_page(page, self.user, can_view=True)
self.assertEqual(len(self.view.get_queryset()), 1)
def test_not_included_when_perm_set_and_this_user_not_included(self):
self.view.request.user = self.user
page = self._create_page(login_required=True)
page.publish('en')
assign_user_to_page(page, self.other_user, can_view=True)
self.assertEqual(len(self.view.get_queryset()), 0)
def test_included_when_no_perm_set(self):
self.view.request.user = self.user
page = self._create_page(login_required=True)
page.publish('en')
self.assertEqual(len(self.view.get_queryset()), 1)
####################################################################
# ensure perms still valid when login_required was not ticked #
####################################################################
def test_included_when_perm_set_and_this_user_included_2(self):
self.view.request.user = self.user
page = self._create_page(login_required=False)
page.publish('en')
assign_user_to_page(page, self.user, can_view=True)
self.assertEqual(len(self.view.get_queryset()), 1)
def test_not_included_when_perm_set_and_this_user_not_included_2(self):
self.view.request.user = self.user
page = self._create_page(login_required=False)
page.publish('en')
assign_user_to_page(page, self.other_user, can_view=True)
self.assertEqual(len(self.view.get_queryset()), 0)
def test_included_when_no_perm_set_2(self):
self.view.request.user = self.user
page = self._create_page(login_required=False)
page.publish('en')
self.assertEqual(len(self.view.get_queryset()), 1)
| 40.28125
| 75
| 0.58495
| 3,599
| 0.929734
| 0
| 0
| 0
| 0
| 0
| 0
| 779
| 0.20124
|
ac3bc471644b6e8784c772369a7f273ad6a22e32
| 12,179
|
py
|
Python
|
FSO_Comm_Demo.py
|
MansourM61/FSO-Comm-GnuRadio-Module
|
44bfefaa95fb9af19f9817029f663892b0f84417
|
[
"MIT"
] | 6
|
2019-10-31T10:02:49.000Z
|
2022-03-03T21:42:19.000Z
|
FSO_Comm_Demo.py
|
MansourM61/FSO-Comm-GnuRadio-Module
|
44bfefaa95fb9af19f9817029f663892b0f84417
|
[
"MIT"
] | null | null | null |
FSO_Comm_Demo.py
|
MansourM61/FSO-Comm-GnuRadio-Module
|
44bfefaa95fb9af19f9817029f663892b0f84417
|
[
"MIT"
] | 2
|
2022-01-03T07:59:44.000Z
|
2022-01-30T11:25:21.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: FSO Communication Block Modules Test
# Author: M Mansour Abadi
# Description: Modules from FSO_Comm are used in a simple FSO comunication link including various channel effects.
# Generated: Tue Oct 29 17:50:38 2019
##################################################
from distutils.version import StrictVersion
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt5 import Qt
from PyQt5 import Qt, QtCore
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
import FSO_Comm
import numpy
import sip
import sys
from gnuradio import qtgui
class FSO_Comm_Demo(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "FSO Communication Block Modules Test")
Qt.QWidget.__init__(self)
self.setWindowTitle("FSO Communication Block Modules Test")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "FSO_Comm_Demo")
if StrictVersion(Qt.qVersion()) < StrictVersion("5.0.0"):
self.restoreGeometry(self.settings.value("geometry").toByteArray())
else:
self.restoreGeometry(self.settings.value("geometry", type=QtCore.QByteArray))
##################################################
# Variables
##################################################
self.wavelength = wavelength = 850e-9
self.vis = vis = 1000
self.samp_rate = samp_rate = 32000
self.link_len = link_len = 100
self.jitter = jitter = 0.05
self.Z_0 = Z_0 = 50
self.Tx_Dia = Tx_Dia = 3e-3
self.Theta_0 = Theta_0 = 0.05
self.T_0 = T_0 = 50e-3
self.Sample_Offset = Sample_Offset = 0
self.Rx_Dia = Rx_Dia = 50e-3
self.Resp = Resp = 0.7
self.P_n = P_n = 1e-6
self.P_0 = P_0 = 1e-3
self.Gain = Gain = 1e3
self.Ext_Ratio = Ext_Ratio = 10
self.Cn2 = Cn2 = 5e-12
self.Chunck_Size = Chunck_Size = 200
self.BW = BW = 2.5e3
##################################################
# Blocks
##################################################
self.qtgui_time_sink_x_0 = qtgui.time_sink_f(
1024*20, #size
samp_rate, #samp_rate
"", #name
1 #number of inputs
)
self.qtgui_time_sink_x_0.set_update_time(0.10)
self.qtgui_time_sink_x_0.set_y_axis(-0.1e-3, 2.5e-3)
self.qtgui_time_sink_x_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_0.enable_autoscale(False)
self.qtgui_time_sink_x_0.enable_grid(False)
self.qtgui_time_sink_x_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0.enable_control_panel(False)
self.qtgui_time_sink_x_0.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_time_sink_x_0_win)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_char*1, samp_rate,True)
self.blocks_repeat_0 = blocks.repeat(gr.sizeof_float*1, 4)
self.blocks_char_to_float_0 = blocks.char_to_float(1, 1)
self.analog_random_source_x_0 = blocks.vector_source_b(map(int, numpy.random.randint(0, 2, 1000)), True)
self.FSO_Comm_Turbulence_ff_0 = FSO_Comm.Turbulence_ff(Cn2, wavelength, link_len, Rx_Dia, T_0, samp_rate)
self.FSO_Comm_Pointing_Errors_ff_0 = FSO_Comm.Pointing_Errors_ff(jitter, link_len, Tx_Dia, Theta_0, Rx_Dia, T_0, samp_rate)
self.FSO_Comm_Optical_Receiver_ff_0 = FSO_Comm.Optical_Receiver_ff(Resp, Gain, Z_0, P_n)
self.FSO_Comm_Laser_ff_0 = FSO_Comm.Laser_ff(P_0, wavelength, Ext_Ratio)
self.FSO_Comm_Geometric_Loss_ff_0 = FSO_Comm.Geometric_Loss_ff(Tx_Dia, Theta_0, link_len, Rx_Dia)
self.FSO_Comm_FogSmoke_Loss_ff_0 = FSO_Comm.FogSmoke_Loss_ff(wavelength, link_len, vis)
##################################################
# Connections
##################################################
self.connect((self.FSO_Comm_FogSmoke_Loss_ff_0, 0), (self.FSO_Comm_Optical_Receiver_ff_0, 0))
self.connect((self.FSO_Comm_Geometric_Loss_ff_0, 0), (self.FSO_Comm_Turbulence_ff_0, 0))
self.connect((self.FSO_Comm_Laser_ff_0, 0), (self.FSO_Comm_Geometric_Loss_ff_0, 0))
self.connect((self.FSO_Comm_Optical_Receiver_ff_0, 0), (self.qtgui_time_sink_x_0, 0))
self.connect((self.FSO_Comm_Pointing_Errors_ff_0, 0), (self.FSO_Comm_FogSmoke_Loss_ff_0, 0))
self.connect((self.FSO_Comm_Turbulence_ff_0, 0), (self.FSO_Comm_Pointing_Errors_ff_0, 0))
self.connect((self.analog_random_source_x_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_char_to_float_0, 0), (self.blocks_repeat_0, 0))
self.connect((self.blocks_repeat_0, 0), (self.FSO_Comm_Laser_ff_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.blocks_char_to_float_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "FSO_Comm_Demo")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_wavelength(self):
return self.wavelength
def set_wavelength(self, wavelength):
self.wavelength = wavelength
self.FSO_Comm_Turbulence_ff_0.set_Wavelen(self.wavelength)
self.FSO_Comm_Laser_ff_0.set_Wavelen(self.wavelength)
self.FSO_Comm_FogSmoke_Loss_ff_0.set_Wavelen(self.wavelength)
def get_vis(self):
return self.vis
def set_vis(self, vis):
self.vis = vis
self.FSO_Comm_FogSmoke_Loss_ff_0.set_Vis(self.vis)
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.qtgui_time_sink_x_0.set_samp_rate(self.samp_rate)
self.blocks_throttle_0.set_sample_rate(self.samp_rate)
self.FSO_Comm_Turbulence_ff_0.set_SampRate(self.samp_rate)
self.FSO_Comm_Pointing_Errors_ff_0.set_SampRate(self.samp_rate)
def get_link_len(self):
return self.link_len
def set_link_len(self, link_len):
self.link_len = link_len
self.FSO_Comm_Turbulence_ff_0.set_LinkLen(self.link_len)
self.FSO_Comm_Pointing_Errors_ff_0.set_LinkLen(self.link_len)
self.FSO_Comm_Geometric_Loss_ff_0.set_LinkLen(self.link_len)
self.FSO_Comm_FogSmoke_Loss_ff_0.set_LinkLen(self.link_len)
def get_jitter(self):
return self.jitter
def set_jitter(self, jitter):
self.jitter = jitter
self.FSO_Comm_Pointing_Errors_ff_0.set_Jitter(self.jitter)
def get_Z_0(self):
return self.Z_0
def set_Z_0(self, Z_0):
self.Z_0 = Z_0
self.FSO_Comm_Optical_Receiver_ff_0.set_Imp(self.Z_0)
def get_Tx_Dia(self):
return self.Tx_Dia
def set_Tx_Dia(self, Tx_Dia):
self.Tx_Dia = Tx_Dia
self.FSO_Comm_Pointing_Errors_ff_0.set_Tx_Dia(self.Tx_Dia)
self.FSO_Comm_Geometric_Loss_ff_0.set_Tx_Dia(self.Tx_Dia)
def get_Theta_0(self):
return self.Theta_0
def set_Theta_0(self, Theta_0):
self.Theta_0 = Theta_0
self.FSO_Comm_Pointing_Errors_ff_0.set_Tx_Theta(self.Theta_0)
self.FSO_Comm_Geometric_Loss_ff_0.set_Tx_DivAng(self.Theta_0)
def get_T_0(self):
return self.T_0
def set_T_0(self, T_0):
self.T_0 = T_0
self.FSO_Comm_Turbulence_ff_0.set_TempCorr(self.T_0)
self.FSO_Comm_Pointing_Errors_ff_0.set_TempCorr(self.T_0)
def get_Sample_Offset(self):
return self.Sample_Offset
def set_Sample_Offset(self, Sample_Offset):
self.Sample_Offset = Sample_Offset
def get_Rx_Dia(self):
return self.Rx_Dia
def set_Rx_Dia(self, Rx_Dia):
self.Rx_Dia = Rx_Dia
self.FSO_Comm_Turbulence_ff_0.set_Rx_Dia(self.Rx_Dia)
self.FSO_Comm_Pointing_Errors_ff_0.set_Rx_Dia(self.Rx_Dia)
self.FSO_Comm_Geometric_Loss_ff_0.set_Rx_Dia(self.Rx_Dia)
def get_Resp(self):
return self.Resp
def set_Resp(self, Resp):
self.Resp = Resp
self.FSO_Comm_Optical_Receiver_ff_0.set_Resp(self.Resp)
def get_P_n(self):
return self.P_n
def set_P_n(self, P_n):
self.P_n = P_n
self.FSO_Comm_Optical_Receiver_ff_0.set_P_n(self.P_n)
def get_P_0(self):
return self.P_0
def set_P_0(self, P_0):
self.P_0 = P_0
self.FSO_Comm_Laser_ff_0.set_P_avg(self.P_0)
def get_Gain(self):
return self.Gain
def set_Gain(self, Gain):
self.Gain = Gain
self.FSO_Comm_Optical_Receiver_ff_0.set_G_TIA(self.Gain)
def get_Ext_Ratio(self):
return self.Ext_Ratio
def set_Ext_Ratio(self, Ext_Ratio):
self.Ext_Ratio = Ext_Ratio
self.FSO_Comm_Laser_ff_0.set_ExtRatio(self.Ext_Ratio)
def get_Cn2(self):
return self.Cn2
def set_Cn2(self, Cn2):
self.Cn2 = Cn2
def get_Chunck_Size(self):
return self.Chunck_Size
def set_Chunck_Size(self, Chunck_Size):
self.Chunck_Size = Chunck_Size
def get_BW(self):
return self.BW
def set_BW(self, BW):
self.BW = BW
def main(top_block_cls=FSO_Comm_Demo, options=None):
if StrictVersion("4.5.0") <= StrictVersion(Qt.qVersion()) < StrictVersion("5.0.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.aboutToQuit.connect(quitting)
qapp.exec_()
if __name__ == '__main__':
main()
| 36.247024
| 131
| 0.642417
| 10,598
| 0.870186
| 0
| 0
| 0
| 0
| 0
| 0
| 1,175
| 0.096478
|
ac3ca086610d59e10a3cca75b54708abf363a598
| 6,194
|
py
|
Python
|
Script/WDI_writer_functions.py
|
Riemer1818/Cattlelyst_wikibase_2021
|
1f3e3199391844206e6621e63756461bf984bf36
|
[
"MIT"
] | null | null | null |
Script/WDI_writer_functions.py
|
Riemer1818/Cattlelyst_wikibase_2021
|
1f3e3199391844206e6621e63756461bf984bf36
|
[
"MIT"
] | null | null | null |
Script/WDI_writer_functions.py
|
Riemer1818/Cattlelyst_wikibase_2021
|
1f3e3199391844206e6621e63756461bf984bf36
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from wikidataintegrator import wdi_core, wdi_login
import logging
import pickle
__author__ = "Riemer van der Vliet"
__copyright__ = "Copyright 2020, Laboratory of Systems and Synthetic Biology"
__credits__ = ["Riemer van der Vliet", "Jasper Koehorst"]
__license__ = "GPL"
__version__ = "2.0.0"
__maintainer__ = "Riemer van der Vliet"
__email__ = "riemer.vandervliet@wur.nl"
__status__ = "Development"
"""
functions used by WDI writer file
"""
def get_properties(endpoint_url: str) -> dict:
"""Finds properties on the endpoint url and returns the IDs
:param endpoint_url: Wikibase SPARQL endpoint
:return: Property lookup dictionary of key property string and value property ID of Wikibase
"""
# placeholder for dictionary
property_lookup = {}
# creates query
query = """SELECT ?property ?label WHERE {
?property a wikibase:Property .
?property rdfs:label ?label .
FILTER (LANG(?label) = "en" )}
"""
# gets results
results = wdi_core.WDItemEngine.execute_sparql_query(query=query, endpoint=endpoint_url)
# iterates iterates data
for result in results["results"]["bindings"]:
label = result["label"]["value"].split("/")[-1]
property_lookup[label] = result["property"]["value"].split("/")[-1]
return property_lookup
def get_items(items: list, endpoint_url: str) -> dict:
"""Gets the IDs for each of the items in the item list. First tries to find it in the pickle file.
:param items: list of items of which IDs need to be traced
:param endpoint_url: Wikibase SPARQL endpoint
:return: item_lookup dictionary of key item string and value item ID of Wikibase
"""
if os.path.isfile("../Parts/item_lookup.pickle"):
with open('../Parts/item_lookup.pickle', 'rb') as handle:
item_lookup = pickle.load(handle)
else:
item_lookup = {}
for item_x in items:
logging.info("Retrieving item " + item_x)
if item_x in item_lookup: continue
item_lookup[item_x] = get_item_by_name(item_x, endpoint_url)
with open('../Parts/item_lookup.pickle', 'wb') as handle:
pickle.dump(item_lookup, handle, protocol=pickle.DEFAULT_PROTOCOL)
return item_lookup
def get_item_by_name(label: str, endpoint_url: str) -> str or None:
"""Finds items on the endpoint url and returns the IDs
:param label: Item label
:param endpoint_url: Wikibase SPARQL endpoint
:return: string of Wikibase ID or None
"""
# set query
query = """
SELECT DISTINCT ?item WHERE {
VALUES ?label { \"""" + label + """\"@en }
?item rdfs:label ?label .
}"""
# get results
try:
results = wdi_core.WDItemEngine.execute_sparql_query(query, endpoint=endpoint_url)
except:
print("Query failed: ")
raise Exception("Query failed")
# parse and return results
for result in results["results"]["bindings"]:
return result["item"]["value"].split("/")[-1]
return None
def prepare(items: list, endpoint_url: str) -> list:
"""Returns a list of lists of items ID and property IDs
:param items: list of items of which IDs need to be traced
:param endpoint_url: Wikibase SPARQL endpoint
:return: list of item dictionary and of property dictionary
"""
return [get_items(items, endpoint_url), get_properties(endpoint_url)]
def get_properties(endpoint_url: str) -> dict:
"""Finds properties on the endpoint url and returns the IDs
:param endpoint_url: Wikibase SPARQL endpoint
:return: property_lookup dictionary of key property string and value property ID of Wikibase
"""
# placeholder for dictionary
property_lookup = {}
# set query
query = """SELECT ?property ?label WHERE {
?property a wikibase:Property .
?property rdfs:label ?label .
FILTER (LANG(?label) = "en" )}
"""
# get results
results = wdi_core.WDItemEngine.execute_sparql_query(query=query, endpoint=endpoint_url)
# parse results
for result in results["results"]["bindings"]:
label = result["label"]["value"].split("/")[-1]
property_lookup[label] = result["property"]["value"].split("/")[-1]
return property_lookup
def get_items(items: list, endpoint_url: str) -> dict:
"""Gets the IDs for each of the items in the item list. First tries to find it in the pickle file.
:param items: list of items of which IDs need to be traced
:param endpoint_url: Wikibase SPARQL endpoint
:return: item_lookup dictionary with item strings and value IDs
"""
# checks if there is a pickle file under name item_lookup.pickle,
# otherwise creates dictionary placeholder
if os.path.isfile("../Parts/item_lookup.pickle"):
with open('../Parts/item_lookup.pickle', 'rb') as handle:
item_lookup = pickle.load(handle)
else:
item_lookup = {}
# iterates items and gets the item ID by name
for item_x in items:
logging.info("Retrieving item " + item_x)
if item_x in item_lookup: continue
item_lookup[item_x] = get_item_by_name(item_x, endpoint_url)
# dumps object as pickle file
with open('../Parts/item_lookup.pickle', 'wb') as handle:
pickle.dump(item_lookup, handle, protocol=pickle.DEFAULT_PROTOCOL)
return item_lookup
def get_item_by_name(label: str, endpoint_url: str) -> str or bool:
"""Finds items on the endpoint url and returns the IDs
:param label: Item label
:param endpoint_url: Wikibase SPARQL endpoint
:return: result string of wikibase ID or None
"""
# sets query
query = """
SELECT DISTINCT ?item WHERE {
VALUES ?label { \"""" + label + """\"@en }
?item rdfs:label ?label .
}"""
# gets results
try:
results = wdi_core.WDItemEngine.execute_sparql_query(query, endpoint=endpoint_url)
except:
print("Query failed: ")
raise Exception("Query failed")
# iterates results
for result in results["results"]["bindings"]:
return result["item"]["value"].split("/")[-1]
return None
| 30.512315
| 102
| 0.66516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,380
| 0.545689
|
ac3d7ed801511882792da9a883bfb2bc98512cd5
| 3,449
|
py
|
Python
|
kitt/callbacks.py
|
David-Ciz/kitt
|
44a6faf7fcf1bc6f3db082debeab0ef4dfcc9c4a
|
[
"MIT"
] | null | null | null |
kitt/callbacks.py
|
David-Ciz/kitt
|
44a6faf7fcf1bc6f3db082debeab0ef4dfcc9c4a
|
[
"MIT"
] | null | null | null |
kitt/callbacks.py
|
David-Ciz/kitt
|
44a6faf7fcf1bc6f3db082debeab0ef4dfcc9c4a
|
[
"MIT"
] | null | null | null |
import heapq
import logging
import os
from tensorflow.keras.callbacks import Callback
class ModelCheckpoint(Callback):
def __init__(
self,
filepath: str,
monitor: str,
mode: str = "max",
save_every_n_epochs: int = None,
save_n_best=1,
save_optimizer=False,
):
"""
:param filepath: Filepath where to save the model. Can contain "epoch" and "<monitor>"
formatting placeholders.
:param monitor: What metric to observe.
:param mode: One of {"min", "max"}. Whether to consider the monitored metric to improve
if it gets lower or higher.
:param save_n_best: Save last N best models.
:param save_every_n_epochs: Save the model every N epochs.
:param save_optimizer: Include optimizer state in the saved model checkpoint.
"""
super().__init__()
self.filepath = str(filepath)
self.monitor = monitor
self.save_n_best = save_n_best or 0
self.save_every_n_epochs = save_every_n_epochs
self.epochs_since_save = 0
self.save_optimizer = save_optimizer
assert self.save_every_n_epochs is None or self.save_every_n_epochs > 0
assert self.save_n_best >= 0
if mode == "max":
self.metric_map_fn = lambda x: x
elif mode == "min":
self.metric_map_fn = lambda x: -x
else:
raise Exception(f"Unknown mode {mode}")
# Invariants
# self.best_queue[0] is the worst saved model
# self.best_queue[-1] is the best saved model
self.best_queue = []
def on_epoch_end(self, epoch, logs=None):
self.epochs_since_save += 1
metric_value = logs[self.monitor]
path = self.get_filepath(epoch + 1, logs=logs)
saved = False
if self.save_every_n_epochs:
if self.epochs_since_save % self.save_every_n_epochs == 0:
self.epochs_since_save = 0
self.save_model(path)
saved = True
if self.save_n_best > 0 and self.is_better(metric_value):
self.push_better(epoch, metric_value, path, saved)
if not saved:
self.save_model(path)
def on_train_end(self, logs=None):
directory = os.path.dirname(self.filepath)
self.save_model(os.path.join(directory, "final.hdf5"))
def is_better(self, metric_value: float):
if len(self.best_queue) < self.save_n_best:
return True
value = self.metric_map_fn(metric_value)
return value > self.best_queue[0][0]
def push_better(self, epoch: int, metric_value: float, path: str, pin=False):
value = self.metric_map_fn(metric_value)
heapq.heappush(self.best_queue, (value, epoch, path, pin))
if len(self.best_queue) > self.save_n_best:
_, _, previous_path, is_pinned = heapq.heappop(self.best_queue)
if not is_pinned:
try:
os.unlink(previous_path)
except IOError as e:
logging.error(
f"Could not remove previously stored model path {previous_path}: {e}"
)
def save_model(self, path: str):
self.model.save(path, overwrite=True, include_optimizer=self.save_optimizer)
def get_filepath(self, epoch, logs):
return self.filepath.format(epoch=epoch, **logs)
| 35.556701
| 95
| 0.612351
| 3,359
| 0.973905
| 0
| 0
| 0
| 0
| 0
| 0
| 749
| 0.217164
|
ac3e07e4760f9a790f20faf0e15bb7e637cec1a9
| 3,851
|
py
|
Python
|
migrations/versions/2018_03_30_1f4385bac8f9_change_activity_picture_paths_to_file_.py
|
tch1bo/viaduct
|
bfd37b0a8408b2dd66fb01138163b80ce97699ff
|
[
"MIT"
] | 11
|
2015-04-23T21:57:56.000Z
|
2019-04-28T12:48:58.000Z
|
migrations/versions/2018_03_30_1f4385bac8f9_change_activity_picture_paths_to_file_.py
|
tch1bo/viaduct
|
bfd37b0a8408b2dd66fb01138163b80ce97699ff
|
[
"MIT"
] | 1
|
2016-10-05T14:10:58.000Z
|
2016-10-05T14:12:23.000Z
|
migrations/versions/2018_03_30_1f4385bac8f9_change_activity_picture_paths_to_file_.py
|
tch1bo/viaduct
|
bfd37b0a8408b2dd66fb01138163b80ce97699ff
|
[
"MIT"
] | 3
|
2016-10-05T14:00:42.000Z
|
2019-01-16T14:33:43.000Z
|
"""Change activity picture paths to file ids.
Revision ID: 1f4385bac8f9
Revises: c8cd32037cde
Create Date: 2018-03-30 16:01:56.532893
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
import os
import re
from app import hashfs
from app.models.base_model import BaseEntity
from app.enums import FileCategory
# revision identifiers, used by Alembic.
revision = '1f4385bac8f9'
down_revision = 'c8cd32037cde'
Base = declarative_base()
db = sa
db.Model = Base
db.relationship = relationship
filename_regex = re.compile(r'(.+)\.([^\s.]+)')
class File(db.Model, BaseEntity):
__tablename__ = 'file'
hash = db.Column(db.String(200), nullable=False)
extension = db.Column(db.String(20), nullable=False)
category = db.Column(db.Enum(FileCategory, name='file_category'),
nullable=False)
display_name = db.Column(db.String(200))
class IntermediateActivity(db.Model, BaseEntity):
__tablename__ = 'activity'
picture = db.Column(db.String(255))
picture_file_id = db.Column(db.Integer, db.ForeignKey('file.id'))
picture_file = db.relationship(File, foreign_keys=[picture_file_id],
lazy='joined')
def migrate_files():
picture_dir = 'app/static/activity_pictures/'
activities = db.session.query(IntermediateActivity).all()
total = len(activities)
stepsize = 10
for i, activity in enumerate(activities):
if (i + 1) % stepsize == 0:
print("{}/{}".format(i + 1, total))
if activity.picture is None:
continue
path = os.path.join(picture_dir, activity.picture)
if not os.path.isfile(path):
print("File does not exist:", path)
activity.picture_file = None
continue
with open(path, 'rb') as file_reader:
address = hashfs.put(file_reader)
f = File()
f.category = FileCategory.ACTIVITY_PICTURE
f.hash = address.id
m = filename_regex.match(activity.picture)
if m is not None:
f.extension = m.group(2).lower()
else:
f.extension = ""
activity.picture_file = f
db.session.add(f)
db.session.commit()
def create_session():
connection = op.get_bind()
session_maker = sa.orm.sessionmaker()
session = session_maker(bind=connection)
db.session = session
def upgrade():
create_session()
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('activity', sa.Column('picture_file_id', sa.Integer(), nullable=True))
op.create_foreign_key(op.f('fk_activity_picture_file_id_file'), 'activity', 'file', ['picture_file_id'], ['id'])
# Change ACTIVITY_PICTURES -> ACTIVITY_PICTURE
op.alter_column('file', 'category',
existing_type=mysql.ENUM('UPLOADS', 'EXAMINATION', 'ACTIVITY_PICTURE', 'ALV_DOCUMENT', 'COMPANY_LOGO', 'USER_AVATAR'),
nullable=False)
try:
migrate_files()
except:
op.drop_constraint(op.f('fk_activity_picture_file_id_file'), 'activity', type_='foreignkey')
op.drop_column('activity', 'picture_file_id')
raise
op.drop_column('activity', 'picture')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
raise Exception("Undoing this migration is impossible")
# op.add_column('activity', sa.Column('picture', mysql.VARCHAR(length=255), nullable=True))
# op.drop_constraint(op.f('fk_activity_picture_file_id_file'), 'activity', type_='foreignkey')
# op.drop_column('activity', 'picture_file_id')
# ### end Alembic commands ###
# vim: ft=python
| 27.905797
| 138
| 0.65879
| 643
| 0.16697
| 0
| 0
| 0
| 0
| 0
| 0
| 1,150
| 0.298624
|
ac3e3b4c92edc26c0c53ec7942b32aed32b778c8
| 1,165
|
py
|
Python
|
fapistrano/signal.py
|
liwushuo/fapistrano
|
2a31aad01a04d7ea9108dc6f95aee9a53290459f
|
[
"MIT"
] | 18
|
2016-03-25T09:40:20.000Z
|
2022-02-23T02:09:50.000Z
|
fapistrano/signal.py
|
liwushuo/fapistrano
|
2a31aad01a04d7ea9108dc6f95aee9a53290459f
|
[
"MIT"
] | null | null | null |
fapistrano/signal.py
|
liwushuo/fapistrano
|
2a31aad01a04d7ea9108dc6f95aee9a53290459f
|
[
"MIT"
] | 3
|
2016-03-22T07:41:15.000Z
|
2021-02-25T04:27:53.000Z
|
# -*- coding: utf-8 -*-
from functools import wraps
from .utils import run_function
class Signal(object):
def __init__(self, name, doc=''):
self.name = name
self.doc = doc
self.receivers = {}
class Namespace(dict):
def signal(self, name, doc=None):
try:
return self[name]
except KeyError:
return self.setdefault(name, Signal(name, doc))
namespace = Namespace()
def clear():
namespace.clear()
def emit(event, **data):
if event not in namespace:
return
for id, func in namespace[event].receivers.items():
run_function(func, **data)
def register(event, function):
assert callable(function), 'Function must be callable.'
namespace.signal(event).receivers[id(function)] = function
def listen(event):
def decorator(f):
@wraps(f)
def deco(*args, **kwargs):
register(event, f)
return f(*args, **kwargs)
return deco
return decorator
if __name__ == '__main__':
def handle_hello(**data):
print 'received data:', data
register('hello', handle_hello)
emit('hello', keyword='world')
| 22.843137
| 62
| 0.611159
| 325
| 0.27897
| 0
| 0
| 113
| 0.096996
| 0
| 0
| 100
| 0.085837
|
ac3e6405e38364554897a98ee0697c92ce3335ab
| 463
|
py
|
Python
|
Pyon exercicios/Exercicios/011.py
|
alefbispo/Exercicios-do-curso-de-Python
|
16cd569ab16542135b834ac8d0cfb0ae84836d53
|
[
"MIT"
] | null | null | null |
Pyon exercicios/Exercicios/011.py
|
alefbispo/Exercicios-do-curso-de-Python
|
16cd569ab16542135b834ac8d0cfb0ae84836d53
|
[
"MIT"
] | null | null | null |
Pyon exercicios/Exercicios/011.py
|
alefbispo/Exercicios-do-curso-de-Python
|
16cd569ab16542135b834ac8d0cfb0ae84836d53
|
[
"MIT"
] | null | null | null |
#pedir a altura e a largura de uma parede e dizer quantos litros de tinta vai gastar sabendo que cada litro de tinta pinta 2m2
altura = float(input('Qual a altura da parede? '))
largura = float(input('Qual a largura da parede? '))
area = altura * largura
tinta = (altura * largura) / 2
print('Voce tem a area de {}x{} e sua parede tem a area de: {}M² \n voce vai precisar de {:.2f} litros de tinta pra pintar a parede!!'.format(altura, largura, area, tinta))
| 42.090909
| 172
| 0.708423
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 310
| 0.668103
|
ac3fd84e905bc1166a7d4dcb6bd2d1a33b2c8e12
| 148
|
py
|
Python
|
textutils/pages/views.py
|
sohanur-shanto/Django-Play-With-Text
|
e81177c22e409a584daebd8a826e2aaee14fb59c
|
[
"BSD-3-Clause-Attribution"
] | 2
|
2021-04-09T12:54:26.000Z
|
2021-04-10T07:36:22.000Z
|
textutils/pages/views.py
|
sohanur-shanto/Django-Play-With-Text
|
e81177c22e409a584daebd8a826e2aaee14fb59c
|
[
"BSD-3-Clause-Attribution"
] | null | null | null |
textutils/pages/views.py
|
sohanur-shanto/Django-Play-With-Text
|
e81177c22e409a584daebd8a826e2aaee14fb59c
|
[
"BSD-3-Clause-Attribution"
] | null | null | null |
from django.http import HttpResponse
from django.shortcuts import render
def funwithmath(request):
return render (request, 'funwithmath.html')
| 24.666667
| 47
| 0.797297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 18
| 0.121622
|
ac40152e1997a785e6185c250577f8e42a50d310
| 1,155
|
py
|
Python
|
main.py
|
fthome/repetier_ui
|
49be402ac479f5e066ae27eaa2a8a02473bb8045
|
[
"MIT"
] | null | null | null |
main.py
|
fthome/repetier_ui
|
49be402ac479f5e066ae27eaa2a8a02473bb8045
|
[
"MIT"
] | null | null | null |
main.py
|
fthome/repetier_ui
|
49be402ac479f5e066ae27eaa2a8a02473bb8045
|
[
"MIT"
] | null | null | null |
# -*-coding:Utf-8 -*
from repetier_ui import *
import time
import set_ifttt
from FUTIL.my_logging import *
my_logging(console_level = DEBUG, logfile_level = INFO)
HD = repetier_printer (repetier_api(api_key='142a8eed-7d86-4bea-96bc-cfcf5b3ca742'),'HD')
sys.path.insert(0,'/home/pi')
import iftt_key
ifttt0 = set_ifttt.ifttt(iftt_key.key)
def wake_up():
ifttt0.send_cmd("HD_on")
UI = repetier_ui(debug=False, wake_up = wake_up ) #debug = True : pas d'envoie des gcode
UI.add_action(22,repetier_file_action("extract.gcode",HD))
UI.add_action(27,repetier_file_action("extrude_100_vite.gcode",HD))
UI.add_action(17,repetier_file_action("extrude_50.gcode",HD))
UI.add_action(10,repetier_file_action("goto_z_max.gcode",HD, only_if_has_axis = True))
UI.add_action(19,repetier_file_action("stop_all.gcode",HD))
UI.add_action(18,repetier_file_action("pause.gcode", HD, only_if_printing = True)) # Detection de présence fil
UI.add_successive_actions(26,repetier_file_action("pause.gcode",HD), repetier_action_action("continueJob",HD))
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
print('interrupted!')
finally:
UI.close()
| 31.216216
| 110
| 0.768831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 289
| 0.25
|
ac40231ed6f638e7905b68291dba05edd0eca13c
| 5,195
|
py
|
Python
|
cows/model/wms.py
|
cedadev/cows
|
db9ed729c886b271ce85355b97e39243081e8246
|
[
"BSD-2-Clause"
] | 2
|
2018-05-09T16:12:43.000Z
|
2018-08-21T17:10:22.000Z
|
cows/model/wms.py
|
cedadev/cows
|
db9ed729c886b271ce85355b97e39243081e8246
|
[
"BSD-2-Clause"
] | null | null | null |
cows/model/wms.py
|
cedadev/cows
|
db9ed729c886b271ce85355b97e39243081e8246
|
[
"BSD-2-Clause"
] | null | null | null |
# BSD Licence
# Copyright (c) 2009, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
# Copyright (C) 2007 STFC & NERC (Science and Technology Facilities Council).
# This software may be distributed under the terms of the
# Q Public License, version 1.0 or later.
# http://ndg.nerc.ac.uk/public_docs/QPublic_license.txt
"""
Extends cows classes where necessary for implementing WMS 1.3.0
:author: Stephen Pascoe
"""
from cows.model.contents import DatasetSummary
from cows.model.domain import Domain
class WmsDatasetSummary(DatasetSummary):
"""
We don't know how WMS will map Layer attributes onto an extension of DatasetSummary
so a pragmatic approach is taken here.
:ivar CRSs:
:type CRSs: iterable of str
:ivar styles: Style descriptors, default ['']
:type styles: iterable of Style objects
:ivar dimensions:
:type dimensions: dictionary mapping dimension names to Dimension objects
:ivar attribution:
:type attribution: None or Attribution object
:ivar authorityURLs:
:type authorityURLs: iterable of AuthorityUrl objects
:ivar dataURLs:
:type dataURLs: dictionary mapping layer names to DataUrl objects
:ivar featureListURLs:
:type featureListURLS: iterable of FeatureListURL objects
:ivar minScaleDenominator:
:type minScaleDenominator: None or double
:ivar maxScaleDenominator:
:type maxScaleDenominator: None or double
:ivar queryable:
:type queryable: Boolean
"""
def __init__(self, CRSs=[], styles=[''], dimensions={}, attribution=None, authorityURLs=[],
dataURLs=[], featureListURLs=[], metadataURLs=[], children=None,
minScaleDenominator=None, maxScaleDenominator=None,
queryable=False, **kw):
super(WmsDatasetSummary, self).__init__(**kw)
self.CRSs = CRSs
self.styles = styles
self.dimensions = dimensions
self.attribution = attribution
self.authorityURLs = authorityURLs
self.dataURLs = dataURLs
self.featureListURLs = featureListURLs
self.minScaleDenominator = minScaleDenominator
self.maxScaleDenominator = maxScaleDenominator
self.queryable = queryable
self.metadataURLs = metadataURLs
if children is None:
self.children = []
else:
self.children = children
class Style(object):
"""
:ivar name:
:type name: str
:ivar title:
:type title: str
:ivar abstract:
:type abstract: None or str
:ivar legendURLs:
:type legendURLS: iterable of LegendURL objests
:ivar styleSheetURL:
:type styleSheetURL: None or FormattedURL object
:ivar styleURL:
:type styleURL: None or FormattedURL object
"""
def __init__(self, name, title, abstract=None, legendURLs=[], styleSheetURL=None,
styleURL=None):
self.name = name
self.title = title
self.abstract = abstract
self.legendURLs = legendURLs
self.styleSheetURL = styleSheetURL
self.styleURL = styleURL
class FormattedURL(object):
"""
:ivar format:
:type format: str
:ivar onlineResource:
:type onlineResource: OnlineResource object
"""
def __init__(self, format, onlineResource):
self.format = format
self.onlineResource = onlineResource
class LegendURL(FormattedURL):
"""
:ivar width:
:type width: None or int
:ivar height:
:type height: None or int
"""
def __init__(self, width, height, **kw):
super(LegendURL, self).__init__(**kw)
self.width = width
self.height = height
class Dimension(Domain):
"""
Use Domain attributes where possible.
:ivar multipleValues:
:type multipleValues: boolean (default False)
:ivar nearestValue:
:type nearestValue: boolean (default False)
:ivar current:
:type current: boolean (default False)
:ivar unitSymbol: Unit symbol could probably be taken from the Domain
structure but it isn't clear where (the meaning or dataType
attributes?).
:type unitSymbol: str
"""
def __init__(self, multipleValues=False, nearestValue=False,
current=False, unitSymbol=None, **kw):
super(Dimension, self).__init__(**kw)
self.multipleValues = multipleValues
self.nearestValue = nearestValue
self.current = current
self.unitSymbol = unitSymbol
class DataURL(FormattedURL):
"""
:ivar width:
:type width: None or int
:ivar height:
:type height: None or int
"""
def __init__(self, width=None, height=None, **kw):
super(DataURL, self).__init__(**kw)
self.width = width
self.height = height
class MetadataURL(FormattedURL):
"""
:ivar metadataType:
:type metadataType: None or str
"""
def __init__(self, metadataType=None, **kw):
super(MetadataURL, self).__init__(**kw)
self.type = metadataType
#
#!TODO: Other objects referenced by WmsDatasetSummary
#
| 30.739645
| 95
| 0.663523
| 4,502
| 0.866603
| 0
| 0
| 0
| 0
| 0
| 0
| 2,734
| 0.526275
|
ac42226253879560e3b7ec3fc4e10b477bc7a82f
| 2,942
|
py
|
Python
|
utilities/prev_projects/DATAFIT/VIEW/WINDOWSVIEW/GraphView.py
|
Saldenisov/pyconlyse
|
1de301b4a4c15ee0bd19034aa8d5da1beacfd124
|
[
"MIT"
] | null | null | null |
utilities/prev_projects/DATAFIT/VIEW/WINDOWSVIEW/GraphView.py
|
Saldenisov/pyconlyse
|
1de301b4a4c15ee0bd19034aa8d5da1beacfd124
|
[
"MIT"
] | null | null | null |
utilities/prev_projects/DATAFIT/VIEW/WINDOWSVIEW/GraphView.py
|
Saldenisov/pyconlyse
|
1de301b4a4c15ee0bd19034aa8d5da1beacfd124
|
[
"MIT"
] | null | null | null |
from PyQt5.Qt import QMainWindow
from prev_projects.DATAFIT.UTILITY.OBSERVER import GraphObserver
from prev_projects.DATAFIT.UTILITY.META import Meta
from prev_projects.DATAFIT.VIEW.UI import Ui_GraphWindow
class GraphView(QMainWindow, GraphObserver, metaclass=Meta):
"""
Represents graphical view of experimental datastructures.
"""
def __init__(self, in_controller, in_parameters=None, parent=None):
super(QMainWindow, self).__init__(parent)
self.controller = in_controller
self.model = self.controller.model
self.ui = Ui_GraphWindow()
self.ui.setupUi(self, in_parameters)
self.model.addObserver(self)
#
self.ui.datacanvas.mpl_connect('key_press_event', self.controller.key_pressed)
self.ui.datacanvas.mpl_connect('key_release_event', self.controller.key_released)
self.ui.datacanvas.mpl_connect('button_press_event', self.controller.mouse_pressed)
self.ui.datacanvas.mpl_connect('motion_notify_event', self.controller.mouse_moved)
self.ui.kinetics_slider.ValueChanged.connect(self.controller.slider_moved_Y)
self.ui.kineticscanvas.mpl_connect('key_press_event',self.controller.key_pressed_kinetics)
self.ui.kineticscanvas.mpl_connect('pick_event', self.controller.on_pick_kinetics)
self.ui.spectracanvas.mpl_connect('pick_event', self.controller.on_pick_spectrum)
self.ui.spectrum_slider.ValueChanged.connect(self.controller.slider_moved_X)
self.ui.data_colorbar_slider.ValueChanged.connect(self.controller.slider_colorbar_moved)
self.ui.spectracanvas.mpl_connect('key_press_event',self.controller.key_pressed_spectra)
self.ui.button_Fit.clicked.connect(self.controller.fit_clicked)
self.ui.list_fits.itemDoubleClicked.connect(self.controller.lfits_clicked)
self.ui.button_Delete.clicked.connect(self.controller.delete_clicked)
self.ui.button_Save.clicked.connect(self.controller.save_clicked)
def closeEvent(self, event):
self.controller.quit_clicked(event)
def cursorsChanged(self, who=None):
"""
Called when cursors positions are change
"""
cursors = self.model.cursors
if not who:
self.ui.datacanvas.update_figure()
self.ui.spectracanvas.update_figure()
self.ui.kineticscanvas.update_figure()
self.ui.kinetics_slider.setStart(int(cursors['y1']))
self.ui.kinetics_slider.setEnd(int(cursors['y2']))
self.ui.kinetics_slider.update_Sliderpos()
self.ui.spectrum_slider.setStart(int(cursors['x1']))
self.ui.spectrum_slider.setEnd(int(cursors['x2']))
self.ui.spectrum_slider.update_Sliderpos()
elif who == 'kinetics':
self.ui.kineticscanvas.update_figure()
elif who == 'spectra':
self.ui.spectracanvas.update_figure()
| 40.30137
| 98
| 0.71516
| 2,731
| 0.92828
| 0
| 0
| 0
| 0
| 0
| 0
| 308
| 0.104691
|
ac435b3434ef327610b43e5ed8a12c8f4b36a43d
| 1,205
|
py
|
Python
|
src/normalizer.py
|
lucassouzamatos/water-potability-ai
|
6714e894b4575a58e35cc6e1cac699f0f5f1e9bc
|
[
"MIT"
] | null | null | null |
src/normalizer.py
|
lucassouzamatos/water-potability-ai
|
6714e894b4575a58e35cc6e1cac699f0f5f1e9bc
|
[
"MIT"
] | null | null | null |
src/normalizer.py
|
lucassouzamatos/water-potability-ai
|
6714e894b4575a58e35cc6e1cac699f0f5f1e9bc
|
[
"MIT"
] | null | null | null |
import pandas as pd
class Normalizer:
csv_data = 'dataset/water_potability.csv' # file from work data
def __init__(self) -> None:
self.dataset = pd.read_csv(self.csv_data)
self.__normalize_data__()
self.__separate__()
'''
@ convert all info to number
'''
def __normalize_data__(self) -> None:
self.dataset = self.dataset.apply(pd.to_numeric)
self.dataset['ph'] = self.dataset['ph'].fillna(self.dataset.groupby('Potability')['ph'].transform('mean'))
self.dataset['Sulfate'] = self.dataset['Sulfate'].fillna(self.dataset.groupby('Potability')['Sulfate'].transform('mean'))
self.dataset['Trihalomethanes'] = self.dataset['Trihalomethanes'].fillna(self.dataset.groupby('Potability')['Trihalomethanes'].transform('mean'))
'''
separates the dataset where clause potable or unpotable
'''
def __separate__(self):
self.dataset_potable = self.dataset.loc[self.dataset['Potability'] == 1]
self.dataset_unpotable = self.dataset.loc[self.dataset['Potability'] == 0]
self.dataset_potable = self.dataset_potable.reset_index()
self.dataset_unpotable = self.dataset_unpotable.reset_index()
if __name__ == '__main__':
normalizer = Normalizer()
| 40.166667
| 149
| 0.712863
| 1,128
| 0.9361
| 0
| 0
| 0
| 0
| 0
| 0
| 340
| 0.282158
|
ac44378df9c1e3bccd01c971db20ecdd0d460d5a
| 541
|
py
|
Python
|
CoProcessing/PythonCatalyst/Testing/Cxx/outputcheck.py
|
brown-ccv/paraview-scalable
|
64b221a540737d2ac94a120039bd8d1e661bdc8f
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 2
|
2019-09-27T08:04:34.000Z
|
2019-10-16T22:30:54.000Z
|
CoProcessing/PythonCatalyst/Testing/Cxx/outputcheck.py
|
sakjain92/paraview
|
f3af0cd9f6750e24ad038eac573b870c88d6b7dd
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
CoProcessing/PythonCatalyst/Testing/Cxx/outputcheck.py
|
sakjain92/paraview
|
f3af0cd9f6750e24ad038eac573b870c88d6b7dd
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2021-03-13T03:35:01.000Z
|
2021-03-13T03:35:01.000Z
|
import sys
if len(sys.argv) != 2:
print("command is 'python <vtk file>'")
sys.exit(1)
from paraview.simple import *
proxy = OpenDataFile(sys.argv[1])
r = proxy.GetClientSideObject()
r.Update()
g = r.GetOutput()
if g.GetNumberOfPoints() != 441 or g.GetNumberOfCells() != 800:
print('Output grid is incorrect. The number of points is %d '\
'but should be 441 and the number of cells is %d ' \
'but should be 800.' % (g.GetNumberOfPoints(), g.GetNumberOfCells()))
sys.exit(1)
else:
print("All's good!!!!")
| 25.761905
| 77
| 0.64695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 173
| 0.319778
|
ac455afb457dd1a64725218027c80809e20d17f1
| 3,594
|
py
|
Python
|
mrl/g_models/generative_base.py
|
DarkMatterAI/mrl
|
e000c3570d4461c3054c882697cce55217ede552
|
[
"MIT"
] | 4
|
2021-11-16T09:29:55.000Z
|
2021-12-27T17:55:32.000Z
|
mrl/g_models/generative_base.py
|
DarkMatterAI/mrl
|
e000c3570d4461c3054c882697cce55217ede552
|
[
"MIT"
] | null | null | null |
mrl/g_models/generative_base.py
|
DarkMatterAI/mrl
|
e000c3570d4461c3054c882697cce55217ede552
|
[
"MIT"
] | 3
|
2021-11-16T09:41:41.000Z
|
2021-12-27T17:55:33.000Z
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/09_generative_models.generative_base.ipynb (unless otherwise specified).
__all__ = ['GenerativeModel', 'beam_search']
# Cell
from ..imports import *
from ..torch_imports import *
from ..torch_core import *
from ..layers import *
# Cell
class GenerativeModel(nn.Module):
'''
GenerativeModel - base generative model class
'''
def __init__(self):
super().__init__()
def forward(self, x):
raise NotImplementedError
def x_to_latent(self, x):
'''
x_to_latent - convert `x` to a latent vector
Inputs:
- `x`: `x` comes from a Dataloader. The specific
form of `x` depends on the dataloader used
Returns:
If the model in question makes use of latent vectors
for sampling or reconstruction, the function should
return a batch of latent vectors. If latent vectors
are not compatible, the function should return None
'''
raise NotImplementedError
def sample(self, **sample_kwargs):
'''
sample - sample items from tthe model
'''
raise NotImplementedError
def sample_no_grad(self, **sample_kwargs):
'no grad wrapper for sample'
with torch.no_grad():
return self.sample(**sample_kwargs)
def get_rl_tensors(self):
'''
get_rl_tensors - generate tensors needed in the training loop
'''
raise NotImplementedError
# Cell
def beam_search(model, seed_ints, k, beam_size, sl, temperature, pad_idx=None):
'''
beam_search - perform beam search using `model`
Inputs:
- `model nn.Module`: model
- `seed_ints torch.Longtensor`: seed sequence
- `k int`: top k beam sampling
- `beam_size int`: maximum number of beams to retain
- `sl int`: max sequence length
- `temperature float`: sample temperature
- `pad_idx Optional[int]`: pad index if applicable
'''
# currently only works for LSTM_LM. TODO: work for all generative models
current_device = next(model.parameters()).device
if seed_ints.ndim==1:
seed_ints = seed_ints.unsqueeze(0)
preds = seed_ints.repeat(k,1)
preds = to_device(preds, current_device)
idxs = preds[:,-1].unsqueeze(-1)
lps = idxs.new_zeros((k, 1)).float()
with torch.no_grad():
for i in range(sl):
x, hiddens, encoded = model._forward(idxs, hiddens)
x.div_(temperature)
log_probs = F.log_softmax(x, -1)
values, indices = log_probs.topk(k, dim=-1)
lps = torch.cat([lps.unsqueeze(-1).repeat(1,1,values.shape[-1]), -values], 1)
current_sl = lps.shape[1]
lps = lps.permute(0,2,1).reshape(-1,current_sl)
preds = torch.cat([preds[:,None].expand(preds.size(0), k , preds.size(1)),
indices.squeeze(1)[:,:,None].expand(preds.size(0), k, 1),], dim=2)
preds = preds.view(-1, preds.size(2))
scores = lps.sum(-1)
indices_idx = torch.arange(0,preds.size(0))[:,None].expand(preds.size(0), k).contiguous().view(-1)
sort_idx = scores.argsort()[:beam_size]
preds = preds[sort_idx]
lps = lps[sort_idx]
idxs = preds[:,-1].unsqueeze(-1)
hiddens = [(i[0][:, indices_idx[sort_idx], :],
i[1][:, indices_idx[sort_idx], :]) for i in hiddens]
if pad_idx is not None:
if (preds[:,-1]==pad_idx).all():
break
return preds, -lps
| 28.299213
| 120
| 0.598219
| 1,206
| 0.335559
| 0
| 0
| 0
| 0
| 0
| 0
| 1,323
| 0.368114
|
ac45e6abbbb88f1ae1939bcf71db99437c006d19
| 154
|
py
|
Python
|
desafios/des003/des003_p03.py
|
brenoedl0/python
|
92ee4ea141584e0bd140449c093f871c2140b1a5
|
[
"MIT"
] | null | null | null |
desafios/des003/des003_p03.py
|
brenoedl0/python
|
92ee4ea141584e0bd140449c093f871c2140b1a5
|
[
"MIT"
] | null | null | null |
desafios/des003/des003_p03.py
|
brenoedl0/python
|
92ee4ea141584e0bd140449c093f871c2140b1a5
|
[
"MIT"
] | null | null | null |
nota1 = float(input('nota 1: '))
nota2 = float(input('nota 2: '))
media = (nota1 + nota2)/2
print('A media entre a nota 1 e a nota 2 é {}'.format(media))
| 30.8
| 61
| 0.62987
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 0.393548
|
ac460ebb2a0293670e0c132534dbc9da8b9efb13
| 5,588
|
py
|
Python
|
rio_tiler/mosaic/methods/defaults.py
|
vincentsarago/rio-tiler
|
21022a0766009a64acf0038dc6adae33d9831a41
|
[
"BSD-3-Clause"
] | 77
|
2017-10-12T18:17:14.000Z
|
2019-01-17T15:39:24.000Z
|
rio_tiler/mosaic/methods/defaults.py
|
vincentsarago/rio-tiler
|
21022a0766009a64acf0038dc6adae33d9831a41
|
[
"BSD-3-Clause"
] | 40
|
2017-10-17T08:31:51.000Z
|
2019-01-11T22:00:44.000Z
|
rio_tiler/mosaic/methods/defaults.py
|
vincentsarago/rio-tiler
|
21022a0766009a64acf0038dc6adae33d9831a41
|
[
"BSD-3-Clause"
] | 23
|
2017-10-13T21:41:08.000Z
|
2019-01-09T06:08:27.000Z
|
"""rio_tiler.mosaic.methods.defaults: default mosaic filling methods."""
import numpy
from .base import MosaicMethodBase
class FirstMethod(MosaicMethodBase):
"""Feed the mosaic tile with the first pixel available."""
def __init__(self):
"""Overwrite base and init First method."""
super(FirstMethod, self).__init__()
self.exit_when_filled = True
def feed(self, tile):
"""Add data to tile."""
if self.tile is None:
self.tile = tile
pidex = self.tile.mask & ~tile.mask
mask = numpy.where(pidex, tile.mask, self.tile.mask)
self.tile = numpy.ma.where(pidex, tile, self.tile)
self.tile.mask = mask
class HighestMethod(MosaicMethodBase):
"""Feed the mosaic tile with the highest pixel values."""
def feed(self, tile):
"""Add data to tile."""
if self.tile is None:
self.tile = tile
pidex = (
numpy.bitwise_and(tile.data > self.tile.data, ~tile.mask) | self.tile.mask
)
mask = numpy.where(pidex, tile.mask, self.tile.mask)
self.tile = numpy.ma.where(pidex, tile, self.tile)
self.tile.mask = mask
class LowestMethod(MosaicMethodBase):
"""Feed the mosaic tile with the lowest pixel values."""
def feed(self, tile):
"""Add data to tile."""
if self.tile is None:
self.tile = tile
pidex = (
numpy.bitwise_and(tile.data < self.tile.data, ~tile.mask) | self.tile.mask
)
mask = numpy.where(pidex, tile.mask, self.tile.mask)
self.tile = numpy.ma.where(pidex, tile, self.tile)
self.tile.mask = mask
class MeanMethod(MosaicMethodBase):
"""Stack the tiles and return the Mean pixel value."""
def __init__(self, enforce_data_type=True):
"""Overwrite base and init Mean method."""
super(MeanMethod, self).__init__()
self.enforce_data_type = enforce_data_type
self.tile = []
@property
def data(self):
"""Return data and mask."""
if self.tile:
tile = numpy.ma.mean(numpy.ma.stack(self.tile, axis=0), axis=0)
if self.enforce_data_type:
tile = tile.astype(self.tile[0].dtype)
return tile.data, (~tile.mask[0] * 255).astype(tile.dtype)
else:
return None, None
def feed(self, tile):
"""Add data to tile."""
self.tile.append(tile)
class MedianMethod(MosaicMethodBase):
"""Stack the tiles and return the Median pixel value."""
def __init__(self, enforce_data_type=True):
"""Overwrite base and init Median method."""
super(MedianMethod, self).__init__()
self.enforce_data_type = enforce_data_type
self.tile = []
@property
def data(self):
"""Return data and mask."""
if self.tile:
tile = numpy.ma.median(numpy.ma.stack(self.tile, axis=0), axis=0)
if self.enforce_data_type:
tile = tile.astype(self.tile[0].dtype)
return tile.data, (~tile.mask[0] * 255).astype(tile.dtype)
else:
return None, None
def feed(self, tile):
"""Create a stack of tile."""
self.tile.append(tile)
class StdevMethod(MosaicMethodBase):
"""Stack the tiles and return the Standard Deviation value."""
def __init__(self, enforce_data_type=True):
"""Overwrite base and init Stdev method."""
super(StdevMethod, self).__init__()
self.tile = []
@property
def data(self):
"""Return data and mask."""
if self.tile:
tile = numpy.ma.std(numpy.ma.stack(self.tile, axis=0), axis=0)
return tile.data, (~tile.mask[0] * 255).astype(tile.dtype)
else:
return None, None
def feed(self, tile):
"""Add data to tile."""
self.tile.append(tile)
class LastBandHigh(MosaicMethodBase):
"""Feed the mosaic tile using the last band as decision factor."""
@property
def data(self):
"""Return data and mask."""
if self.tile is not None:
return (
self.tile.data[:-1],
(~self.tile.mask[0] * 255).astype(self.tile.dtype),
)
else:
return None, None
def feed(self, tile: numpy.ma.MaskedArray):
"""Add data to tile."""
if self.tile is None:
self.tile = tile
return
pidex = (
numpy.bitwise_and(tile.data[-1] > self.tile.data[-1], ~tile.mask)
| self.tile.mask
)
mask = numpy.where(pidex, tile.mask, self.tile.mask)
self.tile = numpy.ma.where(pidex, tile, self.tile)
self.tile.mask = mask
class LastBandLow(MosaicMethodBase):
"""Feed the mosaic tile using the last band as decision factor."""
@property
def data(self):
"""Return data and mask."""
if self.tile is not None:
return (
self.tile.data[:-1],
(~self.tile.mask[0] * 255).astype(self.tile.dtype),
)
else:
return None, None
def feed(self, tile: numpy.ma.MaskedArray):
"""Add data to tile."""
if self.tile is None:
self.tile = tile
return
pidex = (
numpy.bitwise_and(tile.data[-1] < self.tile.data[-1], ~tile.mask)
| self.tile.mask
)
mask = numpy.where(pidex, tile.mask, self.tile.mask)
self.tile = numpy.ma.where(pidex, tile, self.tile)
self.tile.mask = mask
| 29.256545
| 86
| 0.573193
| 5,441
| 0.973694
| 0
| 0
| 1,589
| 0.284359
| 0
| 0
| 1,044
| 0.186829
|
ac4628419c7ff8fb2c36d8b816ed31e520537c50
| 13,442
|
py
|
Python
|
apps/render_data_.py
|
ckxz/PIFu
|
d1cf528652ba538368ec114ddafcbea5c73d7e3d
|
[
"MIT"
] | null | null | null |
apps/render_data_.py
|
ckxz/PIFu
|
d1cf528652ba538368ec114ddafcbea5c73d7e3d
|
[
"MIT"
] | null | null | null |
apps/render_data_.py
|
ckxz/PIFu
|
d1cf528652ba538368ec114ddafcbea5c73d7e3d
|
[
"MIT"
] | null | null | null |
import os, sys
import random
import argparse
from pathlib import Path
from tqdm import tqdm
import cv2
import math
import pyexr
import shutil
import numpy as np
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from lib.renderer.camera import Camera
from lib.renderer.mesh import load_obj_mesh, compute_tangent, compute_normal, load_obj_mesh_mtl
# from data.config import raw_dataset, render_dataset, archive_dataset, model_list, zip_path
def make_rotate(rx, ry, rz):
sinX = np.sin(rx)
sinY = np.sin(ry)
sinZ = np.sin(rz)
cosX = np.cos(rx)
cosY = np.cos(ry)
cosZ = np.cos(rz)
Rx = np.zeros((3, 3))
Rx[0, 0] = 1.0
Rx[1, 1] = cosX
Rx[1, 2] = -sinX
Rx[2, 1] = sinX
Rx[2, 2] = cosX
Ry = np.zeros((3, 3))
Ry[0, 0] = cosY
Ry[0, 2] = sinY
Ry[1, 1] = 1.0
Ry[2, 0] = -sinY
Ry[2, 2] = cosY
Rz = np.zeros((3, 3))
Rz[0, 0] = cosZ
Rz[0, 1] = -sinZ
Rz[1, 0] = sinZ
Rz[1, 1] = cosZ
Rz[2, 2] = 1.0
R = np.matmul(np.matmul(Rz, Ry), Rx)
return R
def rotateSH(SH, R):
SHn = SH
# 1st order
SHn[1] = R[1, 1] * SH[1] - R[1, 2] * SH[2] + R[1, 0] * SH[3]
SHn[2] = -R[2, 1] * SH[1] + R[2, 2] * SH[2] - R[2, 0] * SH[3]
SHn[3] = R[0, 1] * SH[1] - R[0, 2] * SH[2] + R[0, 0] * SH[3]
# 2nd order
SHn[4:, 0] = rotateBand2(SH[4:, 0], R)
SHn[4:, 1] = rotateBand2(SH[4:, 1], R)
SHn[4:, 2] = rotateBand2(SH[4:, 2], R)
return SHn
def rotateBand2(x, R):
s_c3 = 0.94617469575
s_c4 = -0.31539156525
s_c5 = 0.54627421529
s_c_scale = 1.0 / 0.91529123286551084
s_c_scale_inv = 0.91529123286551084
s_rc2 = 1.5853309190550713 * s_c_scale
s_c4_div_c3 = s_c4 / s_c3
s_c4_div_c3_x2 = (s_c4 / s_c3) * 2.0
s_scale_dst2 = s_c3 * s_c_scale_inv
s_scale_dst4 = s_c5 * s_c_scale_inv
sh0 = x[3] + x[4] + x[4] - x[1]
sh1 = x[0] + s_rc2 * x[2] + x[3] + x[4]
sh2 = x[0]
sh3 = -x[3]
sh4 = -x[1]
r2x = R[0][0] + R[0][1]
r2y = R[1][0] + R[1][1]
r2z = R[2][0] + R[2][1]
r3x = R[0][0] + R[0][2]
r3y = R[1][0] + R[1][2]
r3z = R[2][0] + R[2][2]
r4x = R[0][1] + R[0][2]
r4y = R[1][1] + R[1][2]
r4z = R[2][1] + R[2][2]
sh0_x = sh0 * R[0][0]
sh0_y = sh0 * R[1][0]
d0 = sh0_x * R[1][0]
d1 = sh0_y * R[2][0]
d2 = sh0 * (R[2][0] * R[2][0] + s_c4_div_c3)
d3 = sh0_x * R[2][0]
d4 = sh0_x * R[0][0] - sh0_y * R[1][0]
sh1_x = sh1 * R[0][2]
sh1_y = sh1 * R[1][2]
d0 += sh1_x * R[1][2]
d1 += sh1_y * R[2][2]
d2 += sh1 * (R[2][2] * R[2][2] + s_c4_div_c3)
d3 += sh1_x * R[2][2]
d4 += sh1_x * R[0][2] - sh1_y * R[1][2]
sh2_x = sh2 * r2x
sh2_y = sh2 * r2y
d0 += sh2_x * r2y
d1 += sh2_y * r2z
d2 += sh2 * (r2z * r2z + s_c4_div_c3_x2)
d3 += sh2_x * r2z
d4 += sh2_x * r2x - sh2_y * r2y
sh3_x = sh3 * r3x
sh3_y = sh3 * r3y
d0 += sh3_x * r3y
d1 += sh3_y * r3z
d2 += sh3 * (r3z * r3z + s_c4_div_c3_x2)
d3 += sh3_x * r3z
d4 += sh3_x * r3x - sh3_y * r3y
sh4_x = sh4 * r4x
sh4_y = sh4 * r4y
d0 += sh4_x * r4y
d1 += sh4_y * r4z
d2 += sh4 * (r4z * r4z + s_c4_div_c3_x2)
d3 += sh4_x * r4z
d4 += sh4_x * r4x - sh4_y * r4y
dst = x
dst[0] = d0
dst[1] = -d1
dst[2] = d2 * s_scale_dst2
dst[3] = -d3
dst[4] = d4 * s_scale_dst4
return dst
def render_prt_ortho(out_path, obj_uv_filespath, prep_filespath, shs, rndr, rndr_uv, im_size, angl_step=4, n_light=1,
pitch=[0]):
geo_path = Path(os.path.join(out_path, 'GEO', 'OBJ', objnuv_filepath.split('/')[-1]))
param_path = Path(os.path.join(out_path, 'PARAM', objnuv_filepath.split('/')[-1]))
# print(param_path)
render_path = Path(os.path.join(out_path, 'RENDER', objnuv_filepath.split('/')[-1]))
# print(render_path)
mask_path = Path(os.path.join(out_path, 'MASK', objnuv_filepath.split('/')[-1]))
# print(mask_path)
uv_render_path = Path(os.path.join(out_path, 'UV_RENDER', objnuv_filepath.split('/')[-1]))
# print(uv_render_path)
uv_mask_path = Path(os.path.join(out_path, 'UV_MASK', objnuv_filepath.split('/')[-1]))
# print(uv_mask_path)
uv_pos_path = Path(os.path.join(out_path, 'UV_POS', objnuv_filepath.split('/')[-1]))
# print(uv_pos_path)
uv_normal_path = Path(os.path.join(out_path, 'UV_NORMAL', objnuv_filepath.split('/')[-1]))
# print(uv_normal_path)
if os.path.exists(os.path.join(geo_path, objnuv_filepath.split('/')[-1] + '.obj')) and \
os.path.exists(os.path.join(param_path, '359_0_00.npy')) and \
os.path.exists(os.path.join(render_path, '359_0_00.jpg')) and \
os.path.exists(os.path.join(mask_path, '359_0_00.png')) and \
os.path.exists(os.path.join(uv_render_path, '359_0_00.jpg')) and \
os.path.exists(os.path.join(uv_mask_path, '00.png')) and \
os.path.exists(os.path.join(uv_pos_path, '00.exr')) and \
os.path.exists(os.path.join(uv_normal_path, '00.png')):
print('Files exist, stepping out.')
return
else:
os.makedirs(geo_path, exist_ok=True)
os.makedirs(param_path, exist_ok=True)
os.makedirs(render_path, exist_ok=True)
os.makedirs(mask_path, exist_ok=True)
os.makedirs(uv_render_path, exist_ok=True)
os.makedirs(uv_mask_path, exist_ok=True)
os.makedirs(uv_pos_path, exist_ok=True)
os.makedirs(uv_normal_path, exist_ok=True)
cam = Camera(width=im_size, height=im_size)
cam.ortho_ratio = 0.4 * (512 / im_size)
cam.near = -100
cam.far = 100
cam.sanity_check()
# set path for obj, prt
mesh_file = obj_uv_filespath + '.obj'
# mesh_file = '/content/drive/My Drive/untitled.obj'
if not os.path.exists(mesh_file):
print('ERROR: obj file does not exist!!', mesh_file)
return
shutil.copy(mesh_file, os.path.join(geo_path, mesh_file.split('/')[-1]))
#with open ('/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/PIFu/mesh.txt', 'w') as f:
# f.write('Mesh copied.')
text_file = obj_uv_filespath + '.png'
# text_file = '/content/drive/My Drive/PIFuHD/decimated_dataset/0/Bust/"The Younger Memnon", Colossal bust of Ramesses II.png'
if not os.path.exists(text_file):
print('ERROR: dif file does not exist!!', text_file)
return
prt_file = prep_filespath + 'bounce.txt'
print(prt_file)
# prt_file = '/content/drive/My Drive/PIFuHD/preprocessing/prt_util_decimated/0/Bust/"The Younger Memnon", Colossal bust of Ramesses II__bounce.txt'
if not os.path.exists(prt_file):
print('ERROR: prt file does not exist!!!', prt_file)
return
face_prt_file = prep_filespath + 'face.npy'
# face_prt_file = '/content/drive/My Drive/PIFuHD/preprocessing/prt_util_decimated/0/Bust/"The Younger Memnon", Colossal bust of Ramesses II__face.npy'
if not os.path.exists(face_prt_file):
print('ERROR: face prt file does not exist!!!', prt_file)
return
texture_image = cv2.imread(text_file)
texture_image = cv2.cvtColor(texture_image, cv2.COLOR_BGR2RGB)
vertices, faces, normals, faces_normals, textures, face_textures = load_obj_mesh(mesh_file, with_normal=True,
with_texture=True)
print(
f'vertices: {vertices.shape}, faces: {faces.shape}, normals: {normals.shape}, face_normals: {faces_normals.shape}, textures: {textures.shape}, face_textures: {face_textures.shape}')
# vertices, faces, normals, face_normals = load_obj_mesh(mesh_file, with_normal=True, with_texture=False)
vmin = vertices.min(0)
vmax = vertices.max(0)
up_axis = 1 if (vmax - vmin).argmax() == 1 else 2
vmed = np.median(vertices, 0)
vmed[up_axis] = 0.5 * (vmax[up_axis] + vmin[up_axis])
y_scale = 180 / (vmax[up_axis] - vmin[up_axis])
rndr.set_norm_mat(y_scale, vmed)
rndr_uv.set_norm_mat(y_scale, vmed)
tan, bitan = compute_tangent(vertices, faces, normals, textures, face_textures)
# tan, bitan = compute_tangent_(vertices, faces, normals)
prt = np.loadtxt(prt_file)
face_prt = np.load(face_prt_file)
rndr.set_mesh(vertices, faces, normals, faces_normals, textures, face_textures, prt, face_prt, tan, bitan)
rndr.set_albedo(texture_image)
rndr_uv.set_mesh(vertices, faces, normals, faces_normals, textures, face_textures, prt, face_prt, tan, bitan)
rndr_uv.set_albedo(texture_image)
if not os.path.exists(os.path.join(out_path, 'val.txt')):
f = open(os.path.join(out_path, 'val.txt'), 'w')
f.close()
# copy obj file
cmd = 'cp %s %s' % (mesh_file, os.path.join(out_path, 'GEO', 'OBJ', objnuv_filepath.split('/')[-1]))
print(cmd)
os.system(cmd)
for p in pitch:
for y in tqdm(range(0, 360, angl_step)):
R = np.matmul(make_rotate(math.radians(p), 0, 0), make_rotate(0, math.radians(y), 0))
if up_axis == 2:
R = np.matmul(R, make_rotate(math.radians(-90), 0, 0))
rndr.rot_matrix = R
rndr_uv.rot_matrix = R
rndr.set_camera(cam)
rndr_uv.set_camera(cam)
for j in range(n_light):
sh_id = random.randint(0, shs.shape[0] - 1)
sh = shs[sh_id]
sh_angle = 0.2 * np.pi * (random.random() - 0.5)
sh = rotateSH(sh, make_rotate(0, sh_angle, 0).T)
dic = {'sh': sh, 'ortho_ratio': cam.ortho_ratio, 'scale': y_scale, 'center': vmed, 'R': R}
rndr.set_sh(sh)
rndr.analytic = False
rndr.use_inverse_depth = False
rndr.display()
out_all_f = rndr.get_color(0)
out_mask = out_all_f[:, :, 3]
out_all_f = cv2.cvtColor(out_all_f, cv2.COLOR_RGBA2BGR)
np.save(os.path.join(param_path, '%d_%d_%02d.npy' % (y, p, j)), dic, allow_pickle=True)
cv2.imwrite(os.path.join(render_path, '%d_%d_%02d.jpg' % (y, p, j)),
255.0 * out_all_f)
cv2.imwrite(os.path.join(mask_path, '%d_%d_%02d.png' % (y, p, j)),
255.0 * out_mask)
rndr_uv.set_sh(sh)
rndr_uv.analytic = False
rndr_uv.use_inverse_depth = False
rndr_uv.display()
uv_color = rndr_uv.get_color(0)
uv_color = cv2.cvtColor(uv_color, cv2.COLOR_RGBA2BGR)
cv2.imwrite(os.path.join(uv_render_path, '%d_%d_%02d.jpg' % (y, p, j)),
255.0 * uv_color)
if y == 0 and j == 0 and p == pitch[0]:
uv_pos = rndr_uv.get_color(1)
uv_mask = uv_pos[:, :, 3]
cv2.imwrite(os.path.join(uv_mask_path, '00.png'), 255.0 * uv_mask)
data = {'default': uv_pos[:, :, :3]} # default is a reserved name
pyexr.write(os.path.join(uv_pos_path, '00.exr'), data)
uv_nml = rndr_uv.get_color(2)
uv_nml = cv2.cvtColor(uv_nml, cv2.COLOR_RGBA2BGR)
cv2.imwrite(os.path.join(uv_normal_path, '00.png'), 255.0 * uv_nml)
# RUN
#wtight_bust = [x[:-1] for x in open('/Volumes/CKXZ 1/@City/363, FP/Dataset(s)/decimated_obj-dataset/watertight_BUSTS.txt', 'r').readlines() if '.obj' in x] # Local
#wtight_statue = [x[:-1] for x in open('/Volumes/CKXZ 1/@City/363, FP/Dataset(s)/decimated_obj-dataset/watertight_STATUES.txt', 'r').readlines() if '.obj' in x] # Local
wtight_bust = [x[:-1] for x in open('/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/data/decimated_reoriented_vt/watertight_BUST.txt', 'r').readlines() if '.obj' in x] # Camber
wtight_statue = [x[:-1] for x in open('/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/data/decimated_reoriented_vt/watertight_STATUE.txt', 'r').readlines() if '.obj' in x] # Camber
#file_src = '/Volumes/CKXZ 1/@City/363, FP/Dataset(s)/decimated_obj-dataset_vt' # Local
file_src = '/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/data/decimated_reoriented_vt' # Camber
#prep_src = '/Volumes/CKXZ 1/@City/363, FP/AISculpture/PIFuHD/DS-Related/preprocessd_data/prt_util' # Local
prep_src = '/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/data/prt_util_reoriented' # Camber
#dst = '/Volumes/CKXZ 1/@City/363, FP/AISculpture/PIFuHD/DS-Related/preprocessd_data/output_tryitlocal' # Local
dst = '/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/data/pp_output_reoriented' # Camber
#env_sh = '/Users/ckxz/Desktop/@City/363, FP/PIFu/env_sh.npy' # Local
env_sh = '/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/PIFu/env_sh.npy'
folders = sorted([x for x in os.listdir(file_src) if not x.startswith('.') and not x.endswith(('.txt', '.zip'))], key=int)
shs = np.load(env_sh)
from lib.renderer.gl.init_gl import initialize_GL_context
initialize_GL_context(width=512, height=512, egl=True)
from lib.renderer.gl.prt_render import PRTRender
rndr = PRTRender(width=512, height=512, ms_rate=1, egl=True)
rndr_uv = PRTRender(width=512, height=512, uv_mode=True, egl=True)
#ccount = 0
#fcount = 0
#ftcount = 0
for folder in folders:
#if not os.path.exists(os.path.join(dst, folder)):
# os.mkdir(os.path.join(dst, folder))
reps = [x for x in os.listdir(f'{file_src}/{folder}') if not x.startswith('.')]
for rep in reps:
if not os.path.exists(os.path.join(dst, rep)):
os.mkdir(os.path.join(dst, rep))
files = [x for x in os.listdir(os.path.join(file_src, folder, rep)) if not x.startswith('.') and not x.endswith(('.mtl', '.png'))]
for fname in files:
if os.path.join(folder, rep, fname) not in wtight_bust and os.path.join(folder, rep, fname) not in wtight_statue:
#ccount += 1
#with open('/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/data/ccount.txt', 'w') as f:
# f.write(str(ccount))
continue
else:
#fcount += 1
#with open('/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/data/fcount.txt', 'w') as f:
# f.write(str(fcount))
objnuv_filepath = os.path.join(file_src, folder, rep, fname[:-4])
print(objnuv_filepath.split('/')[-1])
prep_filespath = os.path.join(prep_src, folder, rep, fname[:-4] + '__')
dst_path = os.path.join(dst, rep)
render_prt_ortho(dst_path, objnuv_filepath, prep_filespath, shs, rndr, rndr_uv, 512, 1, 1, pitch=[0])
#ftcount += 1
#with open('/home/enterprise.internal.city.ac.uk/adbb120/pifuhd/data/ftcount.txt', 'w') as f:
# f.write(str(ftcount))
| 35.096606
| 185
| 0.668725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,678
| 0.27362
|
ac474bcb1cc36e8c400164e2a77001ca5f025265
| 498
|
py
|
Python
|
venv/lib/python3.8/site-packages/webargs/__init__.py
|
mrunix1998/booking-flights-system
|
4eab3d845c4ba6742bd550604fe69b7f101c8da4
|
[
"MIT"
] | 1
|
2022-03-28T16:37:17.000Z
|
2022-03-28T16:37:17.000Z
|
venv/venv/lib/python3.8/site-packages/webargs/__init__.py
|
mrunix1998/booking-flights-system
|
4eab3d845c4ba6742bd550604fe69b7f101c8da4
|
[
"MIT"
] | null | null | null |
venv/venv/lib/python3.8/site-packages/webargs/__init__.py
|
mrunix1998/booking-flights-system
|
4eab3d845c4ba6742bd550604fe69b7f101c8da4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from distutils.version import LooseVersion
from marshmallow.utils import missing
# Make marshmallow's validation functions importable from webargs
from marshmallow import validate
from webargs.core import dict2schema, ValidationError
from webargs import fields
__version__ = "5.3.2"
__version_info__ = tuple(LooseVersion(__version__).version)
__author__ = "Steven Loria"
__license__ = "MIT"
__all__ = ("dict2schema", "ValidationError", "fields", "missing", "validate")
| 27.666667
| 77
| 0.783133
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 171
| 0.343373
|
ac483bee0ecf390755efd9546940d7a56a66bf85
| 483
|
py
|
Python
|
scripts/imageio_remove_bin-script.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | 1
|
2021-11-25T02:14:23.000Z
|
2021-11-25T02:14:23.000Z
|
scripts/imageio_remove_bin-script.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | null | null | null |
scripts/imageio_remove_bin-script.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | null | null | null |
#!C:\Users\stpny\Downloads\grasp_public-master\grasp_public-master\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'imageio==2.5.0','console_scripts','imageio_remove_bin'
__requires__ = 'imageio==2.5.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('imageio==2.5.0', 'console_scripts', 'imageio_remove_bin')()
)
| 37.153846
| 86
| 0.689441
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 278
| 0.575569
|
ac4880579ecf2bb75288ce5118717d81f57be27a
| 1,351
|
py
|
Python
|
mw2fcitx/build_dict.py
|
outloudvi/mw2fcitx
|
a4fbbcd5e8068ee1f08714f0e18b46c8b289a42c
|
[
"Unlicense"
] | 67
|
2020-08-13T13:58:03.000Z
|
2022-03-29T11:33:51.000Z
|
mw2fcitx/build_dict.py
|
outloudvi/fcitx5-pinyin-moegirl
|
c62d3f7d049143a4d8726f408bdd345f53ff3347
|
[
"Unlicense"
] | 5
|
2020-11-16T01:48:32.000Z
|
2022-02-18T08:04:32.000Z
|
mw2fcitx/build_dict.py
|
outloudvi/fcitx5-pinyin-moegirl
|
c62d3f7d049143a4d8726f408bdd345f53ff3347
|
[
"Unlicense"
] | 3
|
2020-10-08T15:44:30.000Z
|
2022-03-23T12:40:11.000Z
|
import logging
import sys
from .pipeline import MWFPipeline
def build(config):
config["source"] = config["source"] or {}
config["tweaks"] = config["tweaks"] or []
config["converter"] = config["converter"] or {}
config["generator"] = config["generator"] or []
pipeline = MWFPipeline(config["source"].get("api_path"))
if config["source"].get("api_path") is not None:
pipeline.fetch_titles(**config["source"].get("kwargs"))
if config["source"].get("file_path") is not None:
title_file_path = config["source"].get("file_path")
if title_file_path is None:
logging.error("No api_path or file_path provided. Stop.")
sys.exit(1)
if isinstance(title_file_path, str):
title_file_path = [title_file_path]
for i in title_file_path:
pipeline.load_titles_from_file(i,
**config["source"].get("kwargs"))
pipeline.convert_to_words(config["tweaks"])
pipeline.export_words(config["converter"].get("use"),
**config["converter"].get("kwargs"))
generators = config["generator"]
if not isinstance(generators, list):
generators = [generators]
for gen in generators:
pipeline.generate_dict(gen.get("use"), **gen.get("kwargs"))
return pipeline.dict
| 39.735294
| 76
| 0.61658
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 291
| 0.215396
|
ac495d1405722c44232ce6b138bdc896307b81e8
| 21,345
|
py
|
Python
|
pythonweb/user/views.py
|
onwebbe/rasiberryPiWebManager
|
14ff9f14f3f873457666fa1669fae715148538c9
|
[
"Apache-2.0"
] | null | null | null |
pythonweb/user/views.py
|
onwebbe/rasiberryPiWebManager
|
14ff9f14f3f873457666fa1669fae715148538c9
|
[
"Apache-2.0"
] | 7
|
2020-09-07T07:51:28.000Z
|
2022-02-26T17:54:49.000Z
|
pythonweb/user/views.py
|
onwebbe/rasiberryPiWebManager
|
14ff9f14f3f873457666fa1669fae715148538c9
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
import json
# Create your views here.
def info(request):
userInfo = {
'id': '4291d7da9005377ec9aec4a71ea837f',
'name': '天野远子',
'username': 'admin',
'password': '',
'avatar': '/avatar2.jpg',
'status': 1,
'telephone': '',
'lastLoginIp': '27.154.74.117',
'lastLoginTime': 1534837621348,
'creatorId': 'admin',
'createTime': 1497160610259,
'merchantCode': 'TLif2btpzg079h15bk',
'deleted': 0,
'roleId': 'admin',
'role': {}
}
roleObj = {
'permissions': [],
'id': 'admin',
'name': '管理员',
'describe': '拥有所有权限',
'status': 1,
'creatorId': 'system',
'createTime': 1497160610259,
'deleted': 0,
'permissions': [{
'roleId': 'admin',
'permissionId': 'dashboard',
'permissionName': '仪表盘',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"query","defaultCheck":false,"describe":"查询"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'query',
'describe': '查询',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'exception',
'permissionName': '异常页面权限',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"query","defaultCheck":false,"describe":"查询"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'query',
'describe': '查询',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'result',
'permissionName': '结果权限',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"query","defaultCheck":false,"describe":"查询"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'query',
'describe': '查询',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'profile',
'permissionName': '详细页权限',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"query","defaultCheck":false,"describe":"查询"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'query',
'describe': '查询',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'table',
'permissionName': '表格权限',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"import","defaultCheck":false,"describe":"导入"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'import',
'describe': '导入',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'form',
'permissionName': '表单权限',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"query","defaultCheck":false,"describe":"查询"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'query',
'describe': '查询',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'order',
'permissionName': '订单管理',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"query","defaultCheck":false,"describe":"查询"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'query',
'describe': '查询',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'permission',
'permissionName': '权限管理',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'role',
'permissionName': '角色管理',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'table',
'permissionName': '桌子管理',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"query","defaultCheck":false,"describe":"查询"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'query',
'describe': '查询',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'user',
'permissionName': '用户管理',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"import","defaultCheck":false,"describe":"导入"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"},{"action":"export","defaultCheck":false,"describe":"导出"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'import',
'describe': '导入',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}, {
'action': 'export',
'describe': '导出',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}],
'permissions': [{
'roleId': 'admin',
'permissionId': 'support',
'permissionName': '超级模块',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"import","defaultCheck":false,"describe":"导入"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"},{"action":"export","defaultCheck":false,"describe":"导出"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'import',
'describe': '导入',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}, {
'action': 'export',
'describe': '导出',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}, {
'roleId': 'admin',
'permissionId': 'pioverview',
'permissionName': 'pioverview',
'actions': '[{"action":"add","defaultCheck":false,"describe":"新增"},{"action":"query","defaultCheck":false,"describe":"查询"},{"action":"get","defaultCheck":false,"describe":"详情"},{"action":"update","defaultCheck":false,"describe":"修改"},{"action":"delete","defaultCheck":false,"describe":"删除"}]',
'actionEntitySet': [{
'action': 'add',
'describe': '新增',
'defaultCheck': False
}, {
'action': 'query',
'describe': '查询',
'defaultCheck': False
}, {
'action': 'get',
'describe': '详情',
'defaultCheck': False
}, {
'action': 'update',
'describe': '修改',
'defaultCheck': False
}, {
'action': 'delete',
'describe': '删除',
'defaultCheck': False
}],
'actionList': None,
'dataAccess': None
}]
}
userInfo = {
'result': {
'role': roleObj
}
}
return HttpResponse(json.dumps(userInfo, indent=4))
def nav(request):
nav = [
# // dashboard
{
'name': 'dashboard',
'parentId': -1,
'id': 1,
'meta': {
'icon': 'dashboard',
'title': '仪表盘',
'show': True
},
'component': 'RouteView',
'redirect': '/dashboard/workplace'
},
{
'name': 'workplace',
'parentId': 1,
'id': 7,
'meta': {
'title': '工作台',
'show': True
},
'component': 'Workplace'
},
{
'name': 'monitor',
'path': 'https://www.baidu.com/',
'parentId': 1,
'id': 3,
'meta': {
'title': '监控页(外部)',
'target': '_blank',
'show': True
}
},
{
'name': 'analysis',
'parentId': 1,
'id': 2,
'meta': {
'title': '分析页',
'show': True
},
'component': 'Analysis'
},
{
'name': 'tests',
'parentId': 1,
'id': 8,
'meta': {
'title': '测试功能',
'show': True
},
'component': 'TestWork'
},
# //pi overview
{
'name': 'pioverview',
'parentId': -1,
'id': 100,
'meta': {
'icon': 'dashboard',
'title': 'Pi Overview',
'show': True
},
'component': 'RouteView',
'redirect': '/pioverview/gpioOverview'
},
{
'name': 'gpioOverview',
'parentId': 100,
'id': 6,
'meta': {
'title': 'GPIO Overview'
},
'component': 'PiGPIOStatus'
},
{
'name': 'workingOverview',
'parentId': 100,
'id': 7,
'meta': {
'title': 'Woring Overview'
},
'component': 'PiWorkingStatus'
},
# // form
{
'name': 'form',
'parentId': -1,
'id': 10,
'meta': {
'icon': 'form',
'title': '表单页'
},
'redirect': '/form/base-form',
'component': 'PageView'
},
{
'name': 'basic-form',
'parentId': 10,
'id': 6,
'meta': {
'title': '基础表单'
},
'component': 'BasicForm'
},
{
'name': 'step-form',
'parentId': 10,
'id': 5,
'meta': {
'title': '分步表单'
},
'component': 'StepForm'
},
{
'name': 'advanced-form',
'parentId': 10,
'id': 4,
'meta': {
'title': '高级表单'
},
'component': 'AdvanceForm'
},
# // list
{
'name': 'list',
'parentId': -1,
'id': 10010,
'meta': {
'icon': 'table',
'title': '列表页',
'show': True
},
'redirect': '/list/table-list',
'component': 'PageView'
},
{
'name': 'table-list',
'parentId': 10010,
'id': 10011,
'path': '/list/table-list/:pageNo([1-9]\\d*)?',
'meta': {
'title': '查询表格',
'show': True
},
'component': 'TableList'
},
{
'name': 'basic-list',
'parentId': 10010,
'id': 10012,
'meta': {
'title': '标准列表',
'show': True
},
'component': 'StandardList'
},
{
'name': 'card',
'parentId': 10010,
'id': 10013,
'meta': {
'title': '卡片列表',
'show': True
},
'component': 'CardList'
},
{
'name': 'search',
'parentId': 10010,
'id': 10014,
'meta': {
'title': '搜索列表',
'show': True
},
'redirect': '/list/search/article',
'component': 'SearchLayout'
},
{
'name': 'article',
'parentId': 10014,
'id': 10015,
'meta': {
'title': '搜索列表(文章)',
'show': True
},
'component': 'SearchArticles'
},
{
'name': 'project',
'parentId': 10014,
'id': 10016,
'meta': {
'title': '搜索列表(项目)',
'show': True
},
'component': 'SearchProjects'
},
{
'name': 'application',
'parentId': 10014,
'id': 10017,
'meta': {
'title': '搜索列表(应用)',
'show': True
},
'component': 'SearchApplications'
},
# // profile
{
'name': 'profile',
'parentId': -1,
'id': 10018,
'meta': {
'title': '详情页',
'icon': 'profile',
'show': True
},
'redirect': '/profile/basic',
'component': 'RouteView'
},
{
'name': 'basic',
'parentId': 10018,
'id': 10019,
'meta': {
'title': '基础详情页',
'show': True
},
'component': 'ProfileBasic'
},
{
'name': 'advanced',
'parentId': 10018,
'id': 10020,
'meta': {
'title': '高级详情页',
'show': True
},
'component': 'ProfileAdvanced'
},
# // result
{
'name': 'result',
'parentId': -1,
'id': 10021,
'meta': {
'title': '结果页',
'icon': 'check-circle-o',
'show': True
},
'redirect': '/result/success',
'component': 'PageView'
},
{
'name': 'success',
'parentId': 10021,
'id': 10022,
'meta': {
'title': '成功',
'hiddenHeaderContent': True,
'show': True
},
'component': 'ResultSuccess'
},
{
'name': 'fail',
'parentId': 10021,
'id': 10023,
'meta': {
'title': '失败',
'hiddenHeaderContent': True,
'show': True
},
'component': 'ResultFail'
},
# // Exception
{
'name': 'exception',
'parentId': -1,
'id': 10024,
'meta': {
'title': '异常页',
'icon': 'warning',
'show': True
},
'redirect': '/exception/403',
'component': 'RouteView'
},
{
'name': '403',
'parentId': 10024,
'id': 10025,
'meta': {
'title': '403',
'show': True
},
'component': 'Exception403'
},
{
'name': '404',
'parentId': 10024,
'id': 10026,
'meta': {
'title': '404',
'show': True
},
'component': 'Exception404'
},
{
'name': '500',
'parentId': 10024,
'id': 10027,
'meta': {
'title': '500',
'show': True
},
'component': 'Exception500'
},
# // account
{
'name': 'account',
'parentId': -1,
'id': 10028,
'meta': {
'title': '个人页',
'icon': 'user',
'show': True
},
'redirect': '/account/center',
'component': 'RouteView'
},
{
'name': 'center',
'parentId': 10028,
'id': 10029,
'meta': {
'title': '个人中心',
'show': True
},
'component': 'AccountCenter'
},
# // 特殊三级菜单
{
'name': 'settings',
'parentId': 10028,
'id': 10030,
'meta': {
'title': '个人设置',
'hideHeader': True,
'hideChildren': True,
'show': True
},
'redirect': '/account/settings/base',
'component': 'AccountSettings'
},
{
'name': 'BaseSettings',
'path': '/account/settings/base',
'parentId': 10030,
'id': 10031,
'meta': {
'title': '基本设置',
'show': False
},
'component': 'BaseSettings'
},
{
'name': 'SecuritySettings',
'path': '/account/settings/security',
'parentId': 10030,
'id': 10032,
'meta': {
'title': '安全设置',
'show': False
},
'component': 'SecuritySettings'
},
{
'name': 'CustomSettings',
'path': '/account/settings/custom',
'parentId': 10030,
'id': 10033,
'meta': {
'title': '个性化设置',
'show': False
},
'component': 'CustomSettings'
},
{
'name': 'BindingSettings',
'path': '/account/settings/binding',
'parentId': 10030,
'id': 10034,
'meta': {
'title': '账户绑定',
'show': False
},
'component': 'BindingSettings'
},
{
'name': 'NotificationSettings',
'path': '/account/settings/notification',
'parentId': 10030,
'id': 10034,
'meta': {
'title': '新消息通知',
'show': False
},
'component': 'NotificationSettings'
}
]
navResult = {
'result': nav
}
return HttpResponse(json.dumps(navResult, indent=4))
| 25.747889
| 357
| 0.469056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12,974
| 0.582761
|
ac4bb093b09ad6b3234a1c157636387e7fbb5f98
| 3,278
|
py
|
Python
|
WHI_long_term_size_distr_including_fresh_emissions_plotting.py
|
annahs/atmos_research
|
b5853c9b12e327492f8f8ba5069bca3fd2e981c8
|
[
"MIT"
] | 2
|
2018-08-17T15:25:26.000Z
|
2019-04-17T16:50:00.000Z
|
WHI_long_term_size_distr_including_fresh_emissions_plotting.py
|
annahs/atmos_research
|
b5853c9b12e327492f8f8ba5069bca3fd2e981c8
|
[
"MIT"
] | null | null | null |
WHI_long_term_size_distr_including_fresh_emissions_plotting.py
|
annahs/atmos_research
|
b5853c9b12e327492f8f8ba5069bca3fd2e981c8
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from matplotlib import dates
import os
import pickle
from datetime import datetime
from pprint import pprint
import sys
import math
import traceback
import time
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/')
file = open('raw size and number distributions by air mass for 69.76nm to 220.11nm.binpickl', 'r')
distr_data = pickle.load(file)
file.close()
modified_distr_data = {}
interval_length = 5.0
fit_bins = []
for x in range (30,800,5):
fit_bins.append(x+2)
def lognorm(x_vals, A, w, xc):
return A/(np.sqrt(2*math.pi)*w*x_vals)*np.exp(-(np.log(x_vals/xc))**2/(2*w**2))
for air_mass, distribution_data in distr_data.iteritems():
print air_mass
#distribution_data.pop(70, None)
distr_bins_p = []
mass_distr_values = []
numb_distr_values = []
for bin, distr_values in distribution_data.iteritems(): #normalize
n_mass_val = distr_values[0]/(math.log(bin+interval_length)-math.log(bin)) #dM/dlog(VED)
mass_distr_values.append(n_mass_val)
n_numb_val = distr_values[1]/(math.log(bin+interval_length)-math.log(bin)) #d/dlog(VED)
numb_distr_values.append(n_numb_val)
distr_bins_p.append(bin+interval_length/2.0) #correction for our binning code recording bin starts as keys instead of midpoints
norm_mass_distr_values_p = []
for mass in mass_distr_values:
norm_mass = mass/np.max(mass_distr_values)
norm_mass_distr_values_p.append(norm_mass)
norm_mass_distr_values = np.array(norm_mass_distr_values_p)
norm_numb_distr_values_p = []
for numb in numb_distr_values:
norm_numb = numb/np.max(numb_distr_values)
norm_numb_distr_values_p.append(norm_numb)
norm_numb_distr_values = np.array(norm_numb_distr_values_p)
distr_bins = np.array(distr_bins_p)
fit_failure = False
try:
popt, pcov = curve_fit(lognorm, distr_bins, norm_numb_distr_values)
perr = np.sqrt(np.diag(pcov)) #from docs: To compute one standard deviation errors on the parameters use perr = np.sqrt(np.diag(pcov))
err_variables = [popt[0]-perr[0], popt[1]-perr[1], popt[2]-perr[2]]
except:
print 'fit_failure'
fit_failure = True
fit_y_vals = []
for bin in fit_bins:
if fit_failure == True:
fit_val = np.nan
else:
fit_val = lognorm(bin, popt[0], popt[1], popt[2])
fit_y_vals.append(fit_val)
err_fit_y_vals = []
for bin in fit_bins:
if fit_failure == True:
err_fit_val = np.nan
else:
err_fit_val = lognorm(bin, err_variables[0], err_variables[1], err_variables[2])
err_fit_y_vals.append(err_fit_val)
modified_distr_data[air_mass] = [distr_bins,norm_numb_distr_values,fit_bins,fit_y_vals]
pprint(modified_distr_data['GBPS'])
#plotting
fig = plt.figure()
ax1 = fig.add_subplot(111)
colors=['magenta', 'red', 'green', 'cyan', 'blue', 'black']
i=0
for air_mass, distr in modified_distr_data.iteritems():
bins = modified_distr_data[air_mass][0]
data = modified_distr_data[air_mass][1]
fit_bins = modified_distr_data[air_mass][2]
fits = modified_distr_data[air_mass][3]
m_distr = ax1.scatter(bins,data, label = air_mass,color = colors[i])
f_distr = ax1.semilogx(fit_bins,fits,color = colors[i])
ax1.set_xlim(40,500)
ax1.set_ylim(0,1.1)
i+=1
plt.legend()
plt.show()
| 26.650407
| 137
| 0.742221
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 473
| 0.144295
|
ac4e9a7286b947fb0b00f67815da9872ce954025
| 161
|
py
|
Python
|
Estrutura_Decisao/pair_or_odd.py
|
M3nin0/supreme-broccoli
|
186c1ea3b839ba3139f9301660dec8fbd27a162e
|
[
"Apache-2.0"
] | null | null | null |
Estrutura_Decisao/pair_or_odd.py
|
M3nin0/supreme-broccoli
|
186c1ea3b839ba3139f9301660dec8fbd27a162e
|
[
"Apache-2.0"
] | null | null | null |
Estrutura_Decisao/pair_or_odd.py
|
M3nin0/supreme-broccoli
|
186c1ea3b839ba3139f9301660dec8fbd27a162e
|
[
"Apache-2.0"
] | null | null | null |
num = int(input("Insira um numero para descobrir se este é par ou impar: "))
if num % 2 == 0:
print("Este numero é par")
else:
print("Este numero é impar")
| 17.888889
| 76
| 0.658385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 101
| 0.615854
|
ac4ee0a9c265d35fc43a606e8c10147a4a14ebe8
| 342
|
py
|
Python
|
pos_multie_print/config/docs.py
|
ashish-greycube/pos_multie_print
|
f84f36cdf32f53b200c8fe7b9c754e199094d841
|
[
"MIT"
] | null | null | null |
pos_multie_print/config/docs.py
|
ashish-greycube/pos_multie_print
|
f84f36cdf32f53b200c8fe7b9c754e199094d841
|
[
"MIT"
] | null | null | null |
pos_multie_print/config/docs.py
|
ashish-greycube/pos_multie_print
|
f84f36cdf32f53b200c8fe7b9c754e199094d841
|
[
"MIT"
] | null | null | null |
"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/pos_multie_print"
# docs_base_url = "https://[org_name].github.io/pos_multie_print"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "POS Multiple Print"
| 28.5
| 68
| 0.739766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 286
| 0.836257
|
ac4f1637d2da63115e2a93b02c3d3a4bb30ba74a
| 56
|
py
|
Python
|
src/import_hook/__init__.py
|
zthxxx/sniputils
|
e67f55dfa0689f1dde6b6e78d76f04022b4d4585
|
[
"MIT"
] | null | null | null |
src/import_hook/__init__.py
|
zthxxx/sniputils
|
e67f55dfa0689f1dde6b6e78d76f04022b4d4585
|
[
"MIT"
] | null | null | null |
src/import_hook/__init__.py
|
zthxxx/sniputils
|
e67f55dfa0689f1dde6b6e78d76f04022b4d4585
|
[
"MIT"
] | null | null | null |
from .import_track import *
from .reimportable import *
| 18.666667
| 27
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
ac4f175249ba254c543b5b853dde50f7e4c40661
| 837
|
py
|
Python
|
check_performance.py
|
5laps2go/xbrr
|
4c0824b53bfe971111d60e6c1ff4e36f4f4845a3
|
[
"MIT"
] | null | null | null |
check_performance.py
|
5laps2go/xbrr
|
4c0824b53bfe971111d60e6c1ff4e36f4f4845a3
|
[
"MIT"
] | null | null | null |
check_performance.py
|
5laps2go/xbrr
|
4c0824b53bfe971111d60e6c1ff4e36f4f4845a3
|
[
"MIT"
] | null | null | null |
import os
import shutil
import pyfbi
from xbrr.edinet.client.document_client import DocumentClient
from xbrr.edinet.reader.reader import Reader
from xbrr.edinet.reader.doc import Doc
from xbrr.edinet.reader.aspects.finance import Finance
@pyfbi.target
def check():
_dir = os.path.join(os.path.dirname(__file__), "./data")
if os.path.exists(_dir):
shutil.rmtree(_dir)
else:
os.mkdir(_dir)
client = DocumentClient()
xbrl_root = client.get_xbrl("S100G2KL", save_dir=_dir, expand_level="dir")
xbrl_doc = Doc(root_dir=xbrl_root, xbrl_kind="public")
reader = Reader(xbrl_doc)
print("Start Calculation")
bs = reader.extract(Finance).bs()
bs.to_csv("bs.csv", index=False, encoding="shift_jis")
shutil.rmtree(_dir)
with pyfbi.watch():
check()
pyfbi.dump("result")
pyfbi.show()
| 26.15625
| 78
| 0.710872
| 0
| 0
| 0
| 0
| 527
| 0.62963
| 0
| 0
| 77
| 0.091995
|
ac4f82e72b64166dbb545dc5a1c2ec940777bbee
| 1,096
|
py
|
Python
|
pe3.py
|
ChrisCalderon/project-euler
|
96055343fc3ef7653184708fe350018ee751ea17
|
[
"MIT"
] | 1
|
2015-12-16T05:13:30.000Z
|
2015-12-16T05:13:30.000Z
|
pe3.py
|
ChrisCalderon/project-euler
|
96055343fc3ef7653184708fe350018ee751ea17
|
[
"MIT"
] | null | null | null |
pe3.py
|
ChrisCalderon/project-euler
|
96055343fc3ef7653184708fe350018ee751ea17
|
[
"MIT"
] | null | null | null |
PRIMES = [3]
def next_prime():
num = PRIMES[-1] + 2 # odd + 2 is the next odd, don't check evens for primality
is_prime = False
while True:
lim = num**0.5 # don't check for prime factors larger than this
for p in PRIMES:
if p > lim:
is_prime = True
break
elif num%p==0:
is_prime = False
break
else:
continue
if is_prime:
PRIMES.append(num)
return num
else:
num += 2
def largest_prime_factor(n):
largest = 2
while not n&1:
n >> 1 # divide out the twos
if n%3 == 0:
largest = 3
while n%3==0:
n /= 3
while n > 1:
p = next_prime()
if n%p==0:
largest = p
while n%p==0:
n /= p
return largest
def main():
# testing prime finding
# print 2, 3,
# for i in range(100):
# print next_prime(),
print largest_prime_factor(600851475143)
if __name__ == '__main__':
main()
| 22.833333
| 83
| 0.469891
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 220
| 0.20073
|
ac4fb7ef759fec615c1233d88bd6d5b5c8a82c1d
| 144
|
py
|
Python
|
backend/puzzle/apps.py
|
mductran/puzzle
|
c4598f5420dff126fa67db1e0adee1677a8baf8f
|
[
"Apache-2.0"
] | null | null | null |
backend/puzzle/apps.py
|
mductran/puzzle
|
c4598f5420dff126fa67db1e0adee1677a8baf8f
|
[
"Apache-2.0"
] | null | null | null |
backend/puzzle/apps.py
|
mductran/puzzle
|
c4598f5420dff126fa67db1e0adee1677a8baf8f
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class PuzzleConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'puzzle'
| 20.571429
| 56
| 0.756944
| 107
| 0.743056
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.270833
|
ac505c7c29aa070c9931ea9a50fc3af3c4aa490f
| 10,701
|
py
|
Python
|
cryptoquant/api/okex/futures_api.py
|
studyquant/StudyQuant
|
24790634ac320b25361672754558c3797f4fc9e3
|
[
"Apache-2.0"
] | 74
|
2018-08-10T17:05:57.000Z
|
2022-03-26T07:06:02.000Z
|
cryptoquant/api/okex/futures_api.py
|
ezailwoo/studyquant
|
24790634ac320b25361672754558c3797f4fc9e3
|
[
"Apache-2.0"
] | 1
|
2022-03-24T06:42:00.000Z
|
2022-03-24T06:42:00.000Z
|
cryptoquant/api/okex/futures_api.py
|
ezailwoo/studyquant
|
24790634ac320b25361672754558c3797f4fc9e3
|
[
"Apache-2.0"
] | 18
|
2020-09-22T09:03:49.000Z
|
2022-03-31T20:48:54.000Z
|
from .client import Client
from .consts import *
class FutureAPI(Client):
def __init__(self, api_key, api_secret_key, passphrase, use_server_time=False, first=False):
Client.__init__(self, api_key, api_secret_key, passphrase, use_server_time, first)
# query position
def get_position(self):
return self._request_without_params(GET, FUTURE_POSITION)
# query specific position
def get_specific_position(self, instrument_id):
return self._request_without_params(GET, FUTURE_SPECIFIC_POSITION + str(instrument_id) + '/position')
# query accounts info
def get_accounts(self):
return self._request_without_params(GET, FUTURE_ACCOUNTS)
# query coin account info
def get_coin_account(self, underlying):
return self._request_without_params(GET, FUTURE_COIN_ACCOUNT + str(underlying))
# query leverage
def get_leverage(self, underlying):
return self._request_without_params(GET, FUTURE_GET_LEVERAGE + str(underlying) + '/leverage')
# set leverage
def set_leverage(self, underlying, leverage, instrument_id='', direction=''):
params = {'leverage': leverage}
if instrument_id:
params['instrument_id'] = instrument_id
if direction:
params['direction'] = direction
return self._request_with_params(POST, FUTURE_SET_LEVERAGE + str(underlying) + '/leverage', params)
# query ledger
def get_ledger(self, underlying, after='', before='', limit='', type=''):
params = {}
if after:
params['after'] = after
if before:
params['before'] = before
if limit:
params['limit'] = limit
if type:
params['type'] = type
return self._request_with_params(GET, FUTURE_LEDGER + str(underlying) + '/ledger', params, cursor=True)
# take order
# def take_order(self, instrument_id, type, price, size, client_oid='', order_type='0', match_price='0'):
# params = {'client_oid': client_oid, 'instrument_id': instrument_id, 'type': type, 'order_type': order_type, 'price': price, 'size': size, 'match_price': match_price}
# return self._request_with_params(POST, FUTURE_ORDER, params)
# take order 下单
def take_order(self, client_oid,instrument_id, otype,price, size, leverage, order_type,match_price):
params = {'client_oid':client_oid,'instrument_id': instrument_id, 'type': otype, 'price': price, 'size': size, 'leverage': leverage,'order_type':order_type,'match_price':match_price}
return self._request_with_params(POST, FUTURE_ORDER, params)
# take orders
def take_orders(self, instrument_id, orders_data):
params = {'instrument_id': instrument_id, 'orders_data': orders_data}
return self._request_with_params(POST, FUTURE_ORDERS, params)
# revoke order
def revoke_order(self, instrument_id, order_id='', client_oid=''):
if order_id:
return self._request_without_params(POST, FUTURE_REVOKE_ORDER + str(instrument_id) + '/' + str(order_id))
elif client_oid:
return self._request_without_params(POST, FUTURE_REVOKE_ORDER + str(instrument_id) + '/' + str(client_oid))
# revoke orders
def revoke_orders(self, instrument_id, order_ids='', client_oids=''):
params = {}
if order_ids:
params = {'order_ids': order_ids}
elif client_oids:
params = {'client_oids': client_oids}
return self._request_with_params(POST, FUTURE_REVOKE_ORDERS + str(instrument_id), params)
# query order list
def get_order_list(self, state, instrument_id,after='', before='', limit=''):
params = {'state': state}
if after:
params['after'] = after
if before:
params['before'] = before
if limit:
params['limit'] = limit
return self._request_with_params(GET, FUTURE_ORDERS_LIST + str(instrument_id), params, cursor=True)
# query order info
def get_order_info(self, instrument_id, order_id='', client_oid=''):
if order_id:
return self._request_without_params(GET, FUTURE_ORDER_INFO + str(instrument_id) + '/' + str(order_id))
elif client_oid:
return self._request_without_params(GET, FUTURE_ORDER_INFO + str(instrument_id) + '/' + str(client_oid))
# query fills
def get_fills(self, instrument_id, order_id='', after='', before='', limit=''):
params = {'instrument_id': instrument_id}
if order_id:
params['order_id'] = order_id
if after:
params['after'] = after
if before:
params['before'] = before
if limit:
params['limit'] = limit
return self._request_with_params(GET, FUTURE_FILLS, params, cursor=True)
# set margin_mode
def set_margin_mode(self, underlying, margin_mode):
params = {'underlying': underlying, 'margin_mode': margin_mode}
return self._request_with_params(POST, FUTURE_MARGIN_MODE, params)
# close_position
def close_position(self, instrument_id, direction):
params = {'instrument_id': instrument_id, 'direction': direction}
return self._request_with_params(POST, FUTURE_CLOSE_POSITION, params)
# cancel_all
def cancel_all(self, instrument_id, direction):
params = {'instrument_id': instrument_id, 'direction': direction}
return self._request_with_params(POST, FUTURE_CANCEL_ALL, params)
# take order_algo
def take_order_algo(self, instrument_id, type, order_type, size, trigger_price='', algo_price='', callback_rate='', algo_variance='', avg_amount='', price_limit='', sweep_range='', sweep_ratio='', single_limit='', time_interval=''):
params = {'instrument_id': instrument_id, 'type': type, 'order_type': order_type, 'size': size}
if order_type == '1': # 止盈止损参数(最多同时存在10单)
params['trigger_price'] = trigger_price
params['algo_price'] = algo_price
elif order_type == '2': # 跟踪委托参数(最多同时存在10单)
params['callback_rate'] = callback_rate
params['trigger_price'] = trigger_price
elif order_type == '3': # 冰山委托参数(最多同时存在6单)
params['algo_variance'] = algo_variance
params['avg_amount'] = avg_amount
params['price_limit'] = price_limit
elif order_type == '4': # 时间加权参数(最多同时存在6单)
params['sweep_range'] = sweep_range
params['sweep_ratio'] = sweep_ratio
params['single_limit'] = single_limit
params['price_limit'] = price_limit
params['time_interval'] = time_interval
return self._request_with_params(POST, FUTURE_ORDER_ALGO, params)
# cancel_algos
def cancel_algos(self, instrument_id, algo_ids, order_type):
params = {'instrument_id': instrument_id, 'algo_ids': algo_ids, 'order_type': order_type}
return self._request_with_params(POST, FUTURE_CANCEL_ALGOS, params)
# get order_algos
def get_order_algos(self, instrument_id, order_type, status='', algo_id='', before='', after='', limit=''):
params = {'order_type': order_type}
if status:
params['status'] = status
elif algo_id:
params['algo_id'] = algo_id
if before:
params['before'] = before
if after:
params['after'] = after
if limit:
params['limit'] = limit
return self._request_with_params(GET, FUTURE_GET_ORDER_ALGOS + str(instrument_id), params)
def get_trade_fee(self):
return self._request_without_params(GET, FUTURE_TRADE_FEE)
# get products info
def get_products(self):
return self._request_without_params(GET, FUTURE_PRODUCTS_INFO)
# get depth
def get_depth(self, instrument_id, size='', depth=''):
params = {'size': size, 'depth': depth}
return self._request_with_params(GET, FUTURE_DEPTH + str(instrument_id) + '/book', params)
# get ticker
def get_ticker(self):
return self._request_without_params(GET, FUTURE_TICKER)
# get specific ticker
def get_specific_ticker(self, instrument_id):
return self._request_without_params(GET, FUTURE_SPECIFIC_TICKER + str(instrument_id) + '/ticker')
# query trades
def get_trades(self, instrument_id, after='', before='', limit=''):
params = {}
if after:
params['after'] = after
if before:
params['before'] = before
if limit:
params['limit'] = limit
return self._request_with_params(GET, FUTURE_TRADES + str(instrument_id) + '/trades', params, cursor=True)
# query k-line
def get_kline(self, instrument_id, granularity='', start='', end=''):
params = {'granularity': granularity, 'start': start, 'end': end}
# 按时间倒叙 即由结束时间到开始时间
return self._request_with_params(GET, FUTURE_KLINE + str(instrument_id) + '/candles', params)
# 按时间正序 即由开始时间到结束时间
# data = self._request_with_params(GET, FUTURE_KLINE + str(instrument_id) + '/candles', params)
# return list(reversed(data))
# query index
def get_index(self, instrument_id):
return self._request_without_params(GET, FUTURE_INDEX + str(instrument_id) + '/index')
# query rate
def get_rate(self):
return self._request_without_params(GET, FUTURE_RATE)
# query estimate price
def get_estimated_price(self, instrument_id):
return self._request_without_params(GET, FUTURE_ESTIMAT_PRICE + str(instrument_id) + '/estimated_price')
# query the total platform of the platform
def get_holds(self, instrument_id):
return self._request_without_params(GET, FUTURE_HOLDS + str(instrument_id) + '/open_interest')
# query limit price
def get_limit(self, instrument_id):
return self._request_without_params(GET, FUTURE_LIMIT + str(instrument_id) + '/price_limit')
# query limit price
def get_liquidation(self, instrument_id, status, limit='', froms='', to=''):
params = {'status': status}
if limit:
params['limit'] = limit
if froms:
params['from'] = froms
if to:
params['to'] = to
return self._request_with_params(GET, FUTURE_LIQUIDATION + str(instrument_id) + '/liquidation', params)
# query holds amount
def get_holds_amount(self, instrument_id):
return self._request_without_params(GET, HOLD_AMOUNT + str(instrument_id) + '/holds')
# query mark price
def get_mark_price(self, instrument_id):
return self._request_without_params(GET, FUTURE_MARK + str(instrument_id) + '/mark_price')
| 43.149194
| 236
| 0.657882
| 10,837
| 0.995225
| 0
| 0
| 0
| 0
| 0
| 0
| 2,373
| 0.217926
|
ac52168f298fb9c551b44c7fca2f04721962c5e4
| 2,254
|
py
|
Python
|
advent_of_code_2017/day 14/solution.py
|
jvanelteren/advent_of_code
|
3c547645250adb2d95ebac43d5d2111cdf9b09e9
|
[
"MIT"
] | 1
|
2021-12-23T11:24:11.000Z
|
2021-12-23T11:24:11.000Z
|
advent_of_code_2017/day 14/solution.py
|
jvanelteren/advent_of_code
|
3c547645250adb2d95ebac43d5d2111cdf9b09e9
|
[
"MIT"
] | null | null | null |
advent_of_code_2017/day 14/solution.py
|
jvanelteren/advent_of_code
|
3c547645250adb2d95ebac43d5d2111cdf9b09e9
|
[
"MIT"
] | null | null | null |
#%%
# read full assignment
# think algo before implementing
# dont use a dict when you need a list
# assignment is still = and not ==
# dont use itertools when you can use np.roll
# check mathemathical functions if the parentheses are ok
# networkx is awesome
# sometimes while true is better than just too small for loop
# networkx addes nodes when adding edge to nonexistent node
# %%
import os
import re
import numpy as np
try:
os.chdir(os.path.join(os.getcwd(), 'day 14'))
print(os.getcwd())
except:
pass
from functools import reduce
import operator
import networkx as nx
import numpy as np
# f = open('input.txt','r').read().strip()
def gethash(f):
lengths = [ord(l) for l in f]
lengths += [17, 31, 73, 47, 23]
circular = np.arange(256)
skip = 0
start = 0
for r in range(64):
for l in lengths:
circular = np.roll(circular,-start)
circular[:l]=circular[:l][::-1]
circular = np.roll(circular,+start)
start = (start + l + skip)%len(circular)
skip +=1
def densehash(inp):
return (reduce(lambda a,b : operator.xor(a,b),inp))
hashcode = ''
for i in range(16):
hashcode += hex(densehash(circular[i*16:i*16+16]))[2:].zfill(2)
return hashcode
def getbits(inp):
my_hexdata = inp
scale = 16 ## equals to hexadecimal
num_of_bits = 4
return bin(int(my_hexdata, scale))[2:].zfill(num_of_bits)
count= 0
f = 'stpzcrnm'
for r in range(128):
h = gethash('stpzcrnm-'+str(r))
count+=len(''.join([getbits(b) for b in h]).replace('0',''))
count
# %%
count= 0
grid = []
f = 'stpzcrnm'
for r in range(128):
h = gethash('stpzcrnm-'+str(r))
grid.append(list(''.join([getbits(b) for b in h])))
count+=len(''.join([getbits(b) for b in h]).replace('0',''))
# %%
grid = np.array(grid)
print(grid.shape)
G = nx.Graph()
for index,output in np.ndenumerate(grid):
if output == '1':
i,j = index[0], index[1]
G.add_edge((i,j),(i+1,j))
G.add_edge((i,j),(i-1,j))
G.add_edge((i,j),(i,j+1))
G.add_edge((i,j),(i,j-1))
for index,output in np.ndenumerate(grid):
if output == '0':
if G.has_node(index): G.remove_node(index)
nx.number_connected_components(G)
# %%
| 25.325843
| 72
| 0.613576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 528
| 0.23425
|
ac53a111991d6177a1eaab1a5dfb80e80e02f826
| 1,423
|
py
|
Python
|
binyard/general_diff.py
|
giprayogo/binyard
|
c1cfa880cb9907416da2363fa0e4ca2de920543e
|
[
"MIT"
] | null | null | null |
binyard/general_diff.py
|
giprayogo/binyard
|
c1cfa880cb9907416da2363fa0e4ca2de920543e
|
[
"MIT"
] | null | null | null |
binyard/general_diff.py
|
giprayogo/binyard
|
c1cfa880cb9907416da2363fa0e4ca2de920543e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import math
import argparse
from fractions import Fraction
parser = argparse.ArgumentParser()
parser.add_argument('filename')
parser.add_argument('--column', '-c', required=True)
parser.add_argument('--error-column', '-e')
args = parser.parse_args()
filename = args.filename
columns = list(map(int, args.column.split(',')))
err_columns = list(map(int, args.error_column.split(','))) if args.error_column else None
with open(filename, 'r') as data_file:
reference_data = {}
for line in data_file.readlines():
if '#' in line:
print(line.rstrip())
continue
else:
split = line.split()
if not any(reference_data.values()):
for column in columns:
reference_data[column] = float(split[column])
if err_columns:
for err_column in err_columns:
reference_data[err_column] = float(split[err_column])
print('ref: ',' '.join(map(str,split)))
else:
for column in columns:
split[column] = float(split[column]) - reference_data[column]
if err_columns:
for err_column in err_columns:
split[err_column] = math.sqrt(float(split[err_column])**2 + reference_data[err_column]**2)
print(' '.join(map(str,split)))
| 36.487179
| 114
| 0.583275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 90
| 0.063247
|
ac53b737b7f7790c24745dd75f232a7019768317
| 7,968
|
py
|
Python
|
challenges/challenge1_test.py
|
jamiejamiebobamie/CS-2.2-Advanced-Recursion-and-Graphs
|
3de50122ed783ee9dee251ae87173286a861f33d
|
[
"MIT"
] | null | null | null |
challenges/challenge1_test.py
|
jamiejamiebobamie/CS-2.2-Advanced-Recursion-and-Graphs
|
3de50122ed783ee9dee251ae87173286a861f33d
|
[
"MIT"
] | 5
|
2019-07-26T05:39:34.000Z
|
2019-08-16T14:59:21.000Z
|
challenges/challenge1_test.py
|
jamiejamiebobamie/CS-2.2-Advanced-Recursion-and-Graphs
|
3de50122ed783ee9dee251ae87173286a861f33d
|
[
"MIT"
] | null | null | null |
import unittest
from challenge1 import *
"""class Test_Read_Graph_Method(unittest.TestCase):
def __init__(self):
super(TestingClass, self).__init__()
self.filepath = "graph_data.txt"
self.vertices, self.edges = read_graph(filepath)
def test_read(self):
self.assertTrue(self.vertices,['1', '2', '3', '4'])
self.assertTrue(self.edges,[(1,2), (1,4), (2,3), (2,4)])
class Test_LLGraph_Methods(unittest.TestCase):
def __init__(self):
super(TestingClass, self).__init__()
self.filepath = "graph_data.txt"
self.vertices, self.edges = read_graph(filepath)
self.LLGraph = LLGraph(self.vertices)
def test_init_(self):
self.assertTrue(self.LLGraph.numberOfVertices, 4)
self.assertTrue(self.LLGraph.get_vertices(), ['1', '2', '3', '4'])
def test_add_edges(self):
self.LLGraph.add_edges(self.edges)
self.assertTrue(self.LLGraph.get_edges(1), [(1, 2, 1), (1, 4, 1)])
self.assertTrue(self.LLGraph.get_edges(2), [(2, 3, 1), (2, 4, 1)])
self.assertTrue(self.LLGraph.get_edges(3), "No out-going edges.")
self.assertTrue(self.LLGraph.get_edges(4), "No out-going edges.")
self.LLGraph.add_edge(1, 3, 5)
self.assertTrue(self.LLGraph.get_edges(1), [(1, 2, 1), (1, 4, 1), (1, 3, 5)])
self.LLGraph.add_edge(4, 3, 2)
self.assertTrue(self.LLGraph.get_edges(4), (4, 3, 2))
self.LLGraph.add_edge(3, 4) # testing default weight of one if weight is not entered
self.assertTrue(self.LLGraph.get_edges(3), (3, 4, 1))
def test_add_vertex(self):
self.LLGraph.add_vertex()
self.assertTrue(self.LLGraph.get_vertices(), ['1', '2', '3', '4', '5'])
self.assertTrue(self.LLGraph.numberOfVertices, 5)
# def test_iter_(self):
# self.assertTrue(self.LLGraph.__iter__(), ['1', [(1, 2, 1), (1, 4, 1), (1, 3, 5)]], ['2', [(2, 3, 1), (2, 4, 1)]], ['3', (3, 4, 1)], ['4', (4, 3, 2)], ['5', 'No out-going edges.']])
def test_get_neighbors_of_a_vertex(self):
self.assertTrue(self.LLGraph.get_neighbors_of_a_vertex(1), [2, 4, 3])
class Test_AM_Graph_Methods(unittest.TestCase):
def __init__(self):
super(TestingClass, self).__init__()
self.filepath = "graph_data.txt"
self.vertices, self.edges = read_graph(filepath)
self.AMGraph = AMGraph(len(self.vertices))
def test_init_(self):
self.assertTrue(self.AMGraph.numberOfVertices, 4)
self.assertTrue(self.AMGraph.get_vertices(), [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
def test_add_edges(self):
self.AMGraph.add_edges(self.edges)
self.assertTrue(self.AMGraph.get_edges(1), [0, 1, 0, 1])
self.assertTrue(self.AMGraph.get_edges(2), [0, 0, 1, 1])
self.assertTrue(self.AMGraph.get_edges(3), [0, 0, 0, 0])
self.assertTrue(self.AMGraph.get_edges(4), [0, 0, 0, 0])
selfAMLGraph.add_edge(1, 3, 5)
self.assertTrue(self.AMGraph.get_edges(1), [0, 1, 5, 1])
self.AMGraph.add_edge(4, 3, 2)
self.assertTrue(self.AMGraph.get_edges(4), [0, 0, 2, 0])
self.AMGraph.add_edge(3, 4) # testing default weight of one if weight is not entered
self.assertTrue(self.AMGraph.get_edges(3), [0, 0, 0, 1])
def test_add_vertex(self):
self.AMGraph.add_vertex()
self.assertTrue(self.AMGraph.get_vertices(), [[0, 1, 5, 1, 0], [0, 0, 1, 1, 0], [0, 0, 0, 1, 0], [0, 0, 2, 0, 0], [0, 0, 0, 0, 0]])
self.assertTrue(self.AMGraph.numberOfVertices, 5)
class Test_Dict_Graph_Methods(unittest.TestCase):
def __init__(self):
super(TestingClass, self).__init__()
self.filepath = "graph_data.txt"
self.vertices, self.edges = read_graph(filepath)
self.Graph = Graph(len(self.vertices))
def test_init_(self):
self.assertTrue(self.Graph.numberOfVertices, 4)
self.assertTrue(self.Graph.get_vertices(), ['1', '2', '3', '4'])
def test_add_edges(self):
self.Graph.add_edges(self.edges)
self.assertTrue(self.Graph.get_edges(1), [(1, 2, 1), (1, 4, 1)])
self.assertTrue(self.Graph.get_edges(2), [(2, 3, 1), (2, 4, 1)])
self.assertTrue(self.Graph.get_edges(3), "No out-going edges.")
self.assertTrue(self.Graph.get_edges(4), "No out-going edges.")
self.Graph.add_edge(1, 3, 5)
self.assertTrue(self.Graph.get_edges(1), [(1, 2, 1), (1, 4, 1), (1, 3, 5)])
self.Graph.add_edge(4, 3, 2)
self.assertTrue(self.Graph.get_edges(4), (4, 3, 2))
self.Graph.add_edge(3, 4) # testing default weight of one if weight is not entered
self.assertTrue(self.Graph.get_edges(3), (3, 4, 1))
def test_add_vertex(self):
self.Graph.add_vertex()
self.assertTrue(self.Graph.get_vertices(), ['1', '2', '3', '4', '5'])
self.assertTrue(self.Graph.numberOfVertices, 5)
# def test_iter_(self):
# self.assertTrue(self.LLGraph.__iter__(), ['1', [(1, 2, 1), (1, 4, 1), (1, 3, 5)]], ['2', [(2, 3, 1), (2, 4, 1)]], ['3', (3, 4, 1)], ['4', (4, 3, 2)], ['5', 'No out-going edges.']])
def test_get_neighbors_of_a_vertex(self):
self.assertTrue(self.Graph.get_neighbors_of_a_vertex(1), [2, 4, 3])
if __name__ == '__main__':
unittest.main()"""
from challenge1 import *
filepath = "graph_data.txt"
data = read_graph(filepath)
assert data[0] == ['1', '2', '3', '4']
assert data[1] == [(1,2), (1,4), (2,3), (2,4)]
# linked list implementation
print("\ntesting linked list implementation...")
newGraph = LLGraph(data[0]) # adding the vertices
assert newGraph.numberOfVertices == 4
assert newGraph.get_vertices() == ['1', '2', '3', '4']
newGraph.add_edges(data[1]) # adding edges
assert newGraph.get_edges(1) == [(1, 2, 1), (1, 4, 1)]
assert newGraph.get_edges(2) == [(2, 3, 1), (2, 4, 1)]
assert newGraph.get_edges(3) and newGraph.get_edges(4) == "No out-going edges."
newGraph.add_edge(1, 3, 5)
assert newGraph.get_edges(1) == [(1, 2, 1), (1, 4, 1), (1, 3, 5)]
newGraph.add_edge(4, 3, 2)
assert newGraph.get_edges(4) == (4, 3, 2)
newGraph.add_edge(3, 4)
assert newGraph.get_edges(3) == (3, 4, 1)
assert newGraph.get_vertices() == ['1','2','3','4']
assert newGraph.numberOfVertices == 4
newGraph.add_vertex()
assert newGraph.get_vertices() == ['1','2','3','4','5']
assert newGraph.numberOfVertices == 5
assert newGraph.__iter__() == [['1', [(1, 2, 1), (1, 4, 1), (1, 3, 5)]], ['2', [(2, 3, 1), (2, 4, 1)]], ['3', (3, 4, 1)], ['4', (4, 3, 2)], ['5', 'No out-going edges.']]
assert newGraph.get_neighbors_of_a_vertex(1) == [2, 4, 3]
linkedL = LinkedList()
newGraph.vertices.append(linkedL)
newGraph.numberOfVertices += 1 # hacking my graph to test getVertex method
assert newGraph.get_vertex(newGraph.numberOfVertices) == linkedL
print("all linked-list-graph tests pass")
# adjacency matrix implementation
print("\ntesting adjacenecy matrix implementation...")
newGraph = AMGraph(len(data[0])) # adding the vertices
assert newGraph.numberOfVertices == 4
assert newGraph.vertices == [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
assert newGraph.get_vertices() == [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
newGraph.add_edges(data[1]) # adding edges
assert newGraph.get_edges(1) == [0, 1, 0, 1]
assert newGraph.get_edges(2) == [0, 0, 1, 1]
assert newGraph.get_edges(3) and newGraph.get_edges(4) == [0, 0, 0, 0]
newGraph.add_edge(1, 3, 5)
assert newGraph.get_edges(1) == [0, 1, 5, 1]
newGraph.add_edge(4, 3, 2)
assert newGraph.get_edges(4) == [0, 0, 2, 0]
newGraph.add_edge(3, 4)
assert newGraph.get_edges(3) == [0, 0, 0, 1]
newGraph.add_vertex()
assert newGraph.numberOfVertices == 5
assert newGraph.vertices == [[0, 1, 5, 1, 0], [0, 0, 1, 1, 0], [0, 0, 0, 1, 0], [0, 0, 2, 0, 0], [0, 0, 0, 0, 0]]
assert newGraph.get_vertices() == [[0, 1, 5, 1, 0], [0, 0, 1, 1, 0], [0, 0, 0, 1, 0], [0, 0, 2, 0, 0], [0, 0, 0, 0, 0]]
print("all adjacenecy matrix graph tests pass")
| 41.717277
| 190
| 0.616717
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,728
| 0.718876
|
ac53ceb43286b504010da1532640f80e8e04aec8
| 870
|
py
|
Python
|
utils.py
|
twerkmeister/tacotron2
|
404c0758591dab8b72933f010f51c5c2b7490827
|
[
"BSD-3-Clause"
] | null | null | null |
utils.py
|
twerkmeister/tacotron2
|
404c0758591dab8b72933f010f51c5c2b7490827
|
[
"BSD-3-Clause"
] | null | null | null |
utils.py
|
twerkmeister/tacotron2
|
404c0758591dab8b72933f010f51c5c2b7490827
|
[
"BSD-3-Clause"
] | 1
|
2020-04-30T11:21:15.000Z
|
2020-04-30T11:21:15.000Z
|
import numpy as np
from scipy.io.wavfile import read
import torch
def get_mask_from_lengths(lengths):
max_len = torch.max(lengths).item()
if torch.cuda.is_available():
ids = torch.arange(0, max_len, out=torch.cuda.LongTensor(max_len))
else:
ids = torch.arange(0, max_len, out=torch.LongTensor(max_len))
mask = (ids < lengths.unsqueeze(1)).byte()
return mask
def load_wav_to_torch(full_path):
sampling_rate, data = read(full_path)
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
def load_dataset(filename, separator="|"):
with open(filename, encoding='utf-8') as f:
dataset = [line.strip().split(separator) for line in f]
return dataset
def to_gpu(x):
x = x.contiguous()
if torch.cuda.is_available():
x = x.cuda(non_blocking=True)
return torch.autograd.Variable(x)
| 26.363636
| 74
| 0.687356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.011494
|
ac54c2f02d6419e0bb0da94bc53fab30b66b86a4
| 8,526
|
py
|
Python
|
plugins/_Post_Process/_XAI/lime_tabular_batch.py
|
isabella232/nnc-plugin
|
3bc71266696d0341e5e9a2ff2020980700f28719
|
[
"Apache-2.0"
] | null | null | null |
plugins/_Post_Process/_XAI/lime_tabular_batch.py
|
isabella232/nnc-plugin
|
3bc71266696d0341e5e9a2ff2020980700f28719
|
[
"Apache-2.0"
] | null | null | null |
plugins/_Post_Process/_XAI/lime_tabular_batch.py
|
isabella232/nnc-plugin
|
3bc71266696d0341e5e9a2ff2020980700f28719
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021,2022 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
from scipy import stats
from sklearn.linear_model import Ridge
import csv
import collections
from nnabla import logger
import nnabla.utils.load as load
from nnabla.utils.cli.utility import let_data_to_variable
def func(args):
class ForwardConfig:
pass
# Load model
info = load.load([args.model], prepare_data_iterator=False,
batch_size=args.num_samples)
config = ForwardConfig
config.global_config = info.global_config
config.executors = info.executors.values()
config.networks = []
if len(config.executors) < 1:
logger.critical('Executor is not found in {}.'.format(args.model))
return
executor = list(config.executors)[0]
if len(config.executors) > 1:
logger.log(99, 'Only the first executor {} is used in the LIMETABULAR calculation.'.format(
executor.name))
if executor.network.name in info.networks.keys():
config.networks.append(info.networks[executor.network.name])
else:
logger.critical('Network {} is not found in {}.'.format(
executor.network.name, args.model))
return
# Prepare variable
input_variable, data_name = list(executor.dataset_assign.items())[0]
output_variable = list(executor.output_assign.keys())[0]
# Load csv
with open(args.input, 'r') as f:
reader = csv.reader(f)
_ = next(reader)
samples = np.array([[float(r) for r in row] for row in reader])[:, :-1]
with open(args.train, 'r') as f:
reader = csv.reader(f)
feature_names = next(reader)[:-1]
train = np.array([[float(r) for r in row] for row in reader])[:, :-1]
categorical_features = ''.join(args.categorical.split())
categorical_features = [
int(x) for x in categorical_features.split(',') if x != '']
results = []
for sample in samples:
# discretization
to_discretize = list(
set(range(train.shape[1])) - set(categorical_features))
discrete_train = train.copy()
discrete_sample = sample.copy()
freq = {}
val = {}
quartiles = {}
quartile_boundary = {}
quartile_mean = {}
quartile_stds = {}
for i in range(train.shape[1]):
if i in to_discretize:
column = train[:, i]
quartile = np.unique(np.percentile(column, [25, 50, 75]))
quartiles[i] = quartile
discrete_train[:, i] = np.searchsorted(
quartile, column).astype(int)
discrete_sample[i] = np.searchsorted(
quartile, discrete_sample[i]).astype(int)
count = collections.Counter(discrete_train[:, i])
val[i], f = map(list, zip(*(sorted(count.items()))))
freq[i] = np.array(f) / np.sum(np.array(f))
means = np.zeros(len(quartile) + 1)
stds = np.zeros(len(quartile) + 1)
for key in range(len(quartile) + 1):
tmp = column[discrete_train[:, i] == key]
means[key] = 0 if len(tmp) == 0 else np.mean(tmp)
stds[key] = 1.0e-11 if len(
tmp) == 0 else np.std(tmp) + 1.0e-11
quartile_mean[i] = means
quartile_stds[i] = stds
quartile_boundary[i] = [
np.min(column)] + quartile.tolist() + [np.max(column)]
else:
count = collections.Counter(discrete_train[:, i])
val[i], f = map(list, zip(*(sorted(count.items()))))
freq[i] = np.array(f) / np.sum(np.array(f))
discrete_data = np.zeros((args.num_samples, train.shape[1]))
binary_data = np.zeros((args.num_samples, train.shape[1]))
np.random.seed(0)
for i in range(train.shape[1]):
discrete_data[:, i] = np.random.choice(
val[i], size=args.num_samples, replace=True, p=freq[i]).astype(int)
binary_data[:, i] = (discrete_data[:, i] ==
discrete_sample[i]).astype(int)
discrete_data[0] = discrete_sample
binary_data[0] = np.ones_like(discrete_sample)
continuous_data = discrete_data.copy()
discrete_data = discrete_data.astype(int)
# undiscretization
for i in to_discretize:
mins = np.array(quartile_boundary[i])[discrete_data[1:, i]]
maxs = np.array(quartile_boundary[i])[discrete_data[1:, i] + 1]
means = np.array(quartile_mean[i])[discrete_data[1:, i]]
stds = np.array(quartile_stds[i])[discrete_data[1:, i]]
std_min = (mins - means) / stds
std_max = (maxs - means) / stds
unequal = (mins != maxs)
ret = std_min
ret[np.where(unequal)] = stats.truncnorm.rvs(
std_min[unequal],
std_max[unequal],
loc=means[unequal],
scale=stds[unequal]
)
continuous_data[1:, i] = ret
continuous_data[0] = sample
let_data_to_variable(input_variable.variable_instance, continuous_data,
data_name=data_name, variable_name=input_variable.name)
# Forward
executor.forward_target.forward(clear_buffer=True)
pseudo_label = output_variable.variable_instance.d[:, args.class_index]
# regerssion
def kernel(x, y):
sigma = np.sqrt(train.shape[1]) * 0.75
d = np.linalg.norm(y - x, axis=1)
return np.sqrt(np.exp(-d * d / sigma**2))
np.random.seed(0)
weights = kernel(binary_data, binary_data[0])
model = Ridge(alpha=1, fit_intercept=True)
model.fit(binary_data, pseudo_label, sample_weight=weights)
results.append(model.coef_)
# Generate output csv
with open(args.output, 'w', newline="\n") as f:
writer = csv.writer(f)
writer.writerow(
['Index'] + ['Importance of ' + x for x in feature_names])
for i, result in enumerate(results):
writer.writerow(
[str(i + 1)] + ['{:.5f}'.format(value) for value in result])
logger.log(99, 'LIME(tabular batch) completed successfully.')
def main():
parser = argparse.ArgumentParser(
description='LIME (tabular batch)\n'
'\n'
'"Why Should I Trust You?": Explaining the Predictions of Any Classifier\n' +
'Marco Tulio Ribeiro, Sameer Singh, Carlos Guestrin\n' +
'Knowledge Discovery and Data Mining, 2016.\n' +
'https://dl.acm.org/doi/abs/10.1145/2939672.2939778\n' +
'', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-m', '--model', help='path to model nnp file (model), default=results.nnp', required=True, default='results.nnp')
parser.add_argument(
'-i', '--input', help='path to input csv file (csv)', required=True)
parser.add_argument(
'-c', '--categorical', help='indexes of categorical features in input csv (comma separated int)', required=False, default='')
parser.add_argument(
'-c2', '--class_index', help='class index (int), default=0', required=True, default=0, type=int)
parser.add_argument(
'-n', '--num_samples', help='number of samples (int), default=1000', required=True, default=1000, type=int)
parser.add_argument(
'-t', '--train', help='path to training dataset csv file (csv)', required=True)
parser.add_argument(
'-o', '--output', help='path to output csv file (csv), default=lime_tabular_batch.csv', required=True, default='lime_tabular_batch.csv')
parser.set_defaults(func=func)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
| 40.407583
| 144
| 0.59301
| 33
| 0.003871
| 0
| 0
| 0
| 0
| 0
| 0
| 1,664
| 0.195168
|
ac54d54663f6738aa459a24b061f95f72da27abe
| 3,454
|
py
|
Python
|
examples/applications/plot_circuits.py
|
ImportanceOfBeingErnest/networkx
|
eb3a675c5b2b15e33b0a4a35bcee34d6b81ed94d
|
[
"BSD-3-Clause"
] | 1
|
2020-03-06T05:04:14.000Z
|
2020-03-06T05:04:14.000Z
|
examples/applications/plot_circuits.py
|
ImportanceOfBeingErnest/networkx
|
eb3a675c5b2b15e33b0a4a35bcee34d6b81ed94d
|
[
"BSD-3-Clause"
] | 1
|
2019-11-28T21:08:50.000Z
|
2019-11-28T21:08:50.000Z
|
examples/applications/plot_circuits.py
|
ImportanceOfBeingErnest/networkx
|
eb3a675c5b2b15e33b0a4a35bcee34d6b81ed94d
|
[
"BSD-3-Clause"
] | 4
|
2019-07-19T15:06:37.000Z
|
2021-03-17T22:29:04.000Z
|
#!/usr/bin/env python
# circuits.py - convert a Boolean circuit to an equivalent Boolean formula
#
# Copyright 2016 Jeffrey Finkelstein <jeffrey.finkelstein@gmail.com>.
#
# This file is part of NetworkX.
#
# NetworkX is distributed under a BSD license; see LICENSE.txt for more
# information.
"""
========
Circuits
========
Convert a Boolean circuit to an equivalent Boolean formula.
A Boolean circuit can be exponentially more expressive than an
equivalent formula in the worst case, since the circuit can reuse
subcircuits multiple times, whereas a formula cannot reuse subformulas
more than once. Thus creating a Boolean formula from a Boolean circuit
in this way may be infeasible if the circuit is large.
"""
from networkx import dag_to_branching
from networkx import DiGraph
from networkx.utils import arbitrary_element
def circuit_to_formula(circuit):
# Convert the circuit to an equivalent formula.
formula = dag_to_branching(circuit)
# Transfer the operator or variable labels for each node from the
# circuit to the formula.
for v in formula:
source = formula.node[v]['source']
formula.node[v]['label'] = circuit.node[source]['label']
return formula
def formula_to_string(formula):
def _to_string(formula, root):
# If there are no children, this is a variable node.
label = formula.node[root]['label']
if not formula[root]:
return label
# Otherwise, this is an operator.
children = formula[root]
# If one child, the label must be a NOT operator.
if len(children) == 1:
child = arbitrary_element(children)
return '{}({})'.format(label, _to_string(formula, child))
# NB "left" and "right" here are a little misleading: there is
# no order on the children of a node. That's okay because the
# Boolean AND and OR operators are symmetric. It just means that
# the order of the operands cannot be predicted and hence the
# function does not necessarily behave the same way on every
# invocation.
left, right = formula[root]
left_subformula = _to_string(formula, left)
right_subformula = _to_string(formula, right)
return '({} {} {})'.format(left_subformula, label, right_subformula)
root = next(v for v, d in formula.in_degree() if d == 0)
return _to_string(formula, root)
def main():
# Create an example Boolean circuit.
#
# This circuit has a ∧ at the output and two ∨s at the next layer.
# The third layer has a variable x that appears in the left ∨, a
# variable y that appears in both the left and right ∨s, and a
# negation for the variable z that appears as the sole node in the
# fourth layer.
circuit = DiGraph()
# Layer 0
circuit.add_node(0, label='∧')
# Layer 1
circuit.add_node(1, label='∨')
circuit.add_node(2, label='∨')
circuit.add_edge(0, 1)
circuit.add_edge(0, 2)
# Layer 2
circuit.add_node(3, label='x')
circuit.add_node(4, label='y')
circuit.add_node(5, label='¬')
circuit.add_edge(1, 3)
circuit.add_edge(1, 4)
circuit.add_edge(2, 4)
circuit.add_edge(2, 5)
# Layer 3
circuit.add_node(6, label='z')
circuit.add_edge(5, 6)
# Convert the circuit to an equivalent formula.
formula = circuit_to_formula(circuit)
print(formula_to_string(formula))
if __name__ == '__main__':
main()
| 34.19802
| 76
| 0.675738
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,787
| 0.515134
|
ac576d0fc1700d09b6ee7d12ba1be4753eca3284
| 4,669
|
py
|
Python
|
torchtools/tensors/tensor_group.py
|
cjwcommuny/torch-tools
|
a64c0bdd87df065744fb49644f767165d3516b27
|
[
"MIT"
] | null | null | null |
torchtools/tensors/tensor_group.py
|
cjwcommuny/torch-tools
|
a64c0bdd87df065744fb49644f767165d3516b27
|
[
"MIT"
] | null | null | null |
torchtools/tensors/tensor_group.py
|
cjwcommuny/torch-tools
|
a64c0bdd87df065744fb49644f767165d3516b27
|
[
"MIT"
] | null | null | null |
from typing import Dict, Tuple
import torch
from torch import Tensor
from torchtools.tensors.function import unsqueeze
class TensorGroup:
def __init__(self, tensors: Dict[str, Tensor], check_validity: bool=True):
self.tensors = tensors
if check_validity:
assert self.check_tensors_len(), "tensors don't has same length"
def check_tensors_len(self) -> bool:
return len(set(map(lambda x: len(x), self.tensors.values()))) == 1
@property
def columns(self):
return self.tensors.keys()
def __len__(self):
return len(next(iter(self.tensors.values())))
def __getitem__(self, item):
if isinstance(item, str):
return self.tensors[item]
else:
return TensorGroup(
{key: tensor[item] for key, tensor in self.tensors.items()},
check_validity=False
)
def __setitem__(self, key, value):
if isinstance(key, str):
self.tensors[key] = value
else:
for k in self.tensors.keys():
self.tensors[k][key] = value
def min(self, input: str) -> Tuple['TensorGroup', Tensor]:
indices = self.tensors[input].argmin(dim=0, keepdim=True)
return (
TensorGroup(
{key: tensor[indices] for key, tensor in self.tensors.items()},
check_validity=False
),
indices
)
def max(self, input: str) -> Tuple['TensorGroup', Tensor]:
indices = self.tensors[input].argmax(dim=0, keepdim=True)
return (
TensorGroup(
{key: tensor[indices] for key, tensor in self.tensors.items()},
check_validity=False
),
indices
)
def sort(self, input: str, descending: bool=False) -> Tuple['TensorGroup', Tensor]:
indices = self.tensors[input].argsort(dim=0, descending=descending)
return (
TensorGroup(
{key: tensor[indices] for key, tensor in self.tensors.items()},
check_validity=False
),
indices
)
def topk(self, input: str, k: int, largest: bool=True, sorted: bool=True) -> Tuple['TensorGroup', Tensor]:
_, indices = self.tensors[input].topk(k, largest=largest, sorted=sorted)
return (
TensorGroup(
{key: tensor[indices] for key, tensor in self.tensors.items()},
check_validity=False
),
indices
)
@staticmethod
def __mask_reshape_to_broadcast(tensor: Tensor, mask: Tensor) -> Tensor:
result = unsqueeze(mask, dim=1, num=tensor.dim() - 1)
print(f"mask.shape: {result.shape}")
return result
def masked_select(self, mask: Tensor) -> 'TensorGroup':
assert mask.dim() == 1
print(f"mask: {mask.shape}")
print(f"features: {self.tensors['features'].shape}")
print(f"frame_id: {self.tensors['frame_id'].shape}")
return TensorGroup(
{key: tensor.masked_select(self.__mask_reshape_to_broadcast(tensor, mask)) for key, tensor in self.tensors.items()},
check_validity=False
)
def index_select(self, indices: Tensor) -> 'TensorGroup':
assert indices.dim() == 1
return self[indices]
def lt_select(self, input: str, other) -> 'TensorGroup':
"""
NOTE: tensor.dim() must all be 1
"""
mask = torch.lt(self.tensors[input], other)
return self.masked_select(mask)
def le_select(self, input: str, other) -> 'TensorGroup':
"""
NOTE: tensor.dim() must all be 1
"""
mask = torch.le(self.tensors[input], other)
return self.masked_select(mask)
def gt_select(self, input: str, other) -> 'TensorGroup':
"""
NOTE: tensor.dim() must all be 1
"""
mask = torch.gt(self.tensors[input], other)
return self.masked_select(mask)
def ge_select(self, input: str, other) -> 'TensorGroup':
"""
NOTE: tensor.dim() must all be 1
"""
mask = torch.ge(self.tensors[input], other)
return self.masked_select(mask)
def ne_select(self, input: str, other) -> 'TensorGroup':
"""
NOTE: tensor.dim() must all be 1
"""
mask = torch.ne(self.tensors[input], other)
return self.masked_select(mask)
def eq_select(self, input: str, other) -> 'TensorGroup':
"""
NOTE: tensor.dim() must all be 1
"""
mask = torch.eq(self.tensors[input], other)
return self.masked_select(mask)
| 32.2
| 128
| 0.572714
| 4,546
| 0.973656
| 0
| 0
| 286
| 0.061255
| 0
| 0
| 663
| 0.142
|
ac57b9751b82e69ea88e5b4020e3f0156b95a4e8
| 5,029
|
py
|
Python
|
merlinservices/urls.py
|
USGS-WiM/merlin_django
|
880a5634736de36fbb48cbfe7f60305a83975dcf
|
[
"CC0-1.0"
] | null | null | null |
merlinservices/urls.py
|
USGS-WiM/merlin_django
|
880a5634736de36fbb48cbfe7f60305a83975dcf
|
[
"CC0-1.0"
] | 28
|
2019-08-20T20:06:32.000Z
|
2021-12-17T23:08:05.000Z
|
merlinservices/urls.py
|
USGS-WiM/merlin_django
|
880a5634736de36fbb48cbfe7f60305a83975dcf
|
[
"CC0-1.0"
] | 2
|
2020-02-21T17:52:18.000Z
|
2020-05-08T09:05:55.000Z
|
from django.urls import path
from django.conf.urls import url, include
from django.views.generic.base import TemplateView
from merlinservices import views
from rest_framework.routers import DefaultRouter
from rest_framework.schemas import get_schema_view
from rest_framework_bulk.routes import BulkRouter
#router = DefaultRouter()
router = BulkRouter()
router.register(r'acids', views.AcidViewSet, 'acids')
router.register(r'analyses', views.AnalysisTypeViewSet, 'analyses')
router.register(r'blankwaters', views.BlankWaterViewSet, 'blankwaters')
router.register(r'bottles', views.BottleViewSet, 'bottles')
router.register(r'bottleprefixes', views.BottlePrefixViewSet, 'bottleprefixes')
router.register(r'bottletypes', views.BottleTypeViewSet, 'bottletypes')
router.register(r'brominations', views.BrominationViewSet, 'brominations')
router.register(r'constituents', views.ConstituentTypeViewSet, 'constituents')
router.register(r'cooperators', views.CooperatorViewSet, 'cooperators')
router.register(r'detectionflags', views.DetectionFlagViewSet, 'detectionflags')
router.register(r'filters', views.FilterTypeViewSet, 'filters')
router.register(r'isotopeflags', views.IsotopeFlagViewSet, 'isotopeflags')
router.register(r'mediums', views.MediumTypeViewSet, 'mediums')
router.register(r'methods', views.MethodTypeViewSet, 'methods')
router.register(r'preservations', views.PreservationTypeViewSet, 'preservations')
router.register(r'projects', views.ProjectViewSet, 'projects')
router.register(r'projectssites', views.ProjectSiteViewSet, 'projectssites')
router.register(r'processings', views.ProcessingTypeViewSet, 'processings')
router.register(r'qualityassurances', views.QualityAssuranceViewSet, 'qualityassurances')
router.register(r'qualityassurancetypes', views.QualityAssuranceTypeViewSet, 'qualityassurancetypes')
router.register(r'results', views.ResultViewSet, 'results')
router.register(r'resultdatafiles', views.ResultDataFileViewSet, 'resultdatafiles')
router.register(r'samples', views.SampleViewSet, 'samples')
router.register(r'samplebottles', views.SampleBottleViewSet, 'samplebottles')
router.register(r'samplebottlebrominations', views.SampleBottleBrominationViewSet, 'samplebottlebrominations')
router.register(r'sites', views.SiteViewSet, 'sites')
router.register(r'units', views.UnitTypeViewSet, 'units')
router.register(r'users', views.UserViewSet, 'users')
router.register(r'fullresults', views.FullResultViewSet, 'fullresults')
router.register(r'fullsamplebottles', views.FullSampleBottleViewSet, 'fullsamplebottles')
router.register(r'bulkacids', views.AcidBulkUpdateViewSet, 'bulkacids')
router.register(r'bulkblankwaters', views.BlankWaterBulkUpdateViewSet, 'bulkblankwaters')
router.register(r'bulkbottles', views.BottleBulkCreateUpdateViewSet, 'bulkbottles')
router.register(r'bulkbottleprefixes', views.BottlePrefixBulkCreateUpdateViewSet, 'bulkbottleprefixes')
router.register(r'bulkbrominations', views.BrominationBulkUpdateViewSet, 'bulkbrominations')
router.register(r'bulkcooperators', views.CooperatorBulkUpdateViewSet, 'bulkcooperators')
router.register(r'bulkprojects', views.ProjectBulkUpdateViewSet, 'bulkprojects')
router.register(r'bulkprojectssites', views.ProjectBulkUpdateViewSet, 'bulkprojectssites')
router.register(r'bulkresults', views.ResultBulkCreateUpdateViewSet, 'bulkresults')
router.register(r'bulksamples', views.SampleBulkCreateUpdateViewSet, 'bulksamples')
router.register(r'bulksamplebottles', views.SampleBottleBulkCreateUpdateViewSet, 'bulksamplebottles')
router.register(r'bulksamplebottlebrominations',
views.SampleBottleBrominationBulkCreateUpdateViewSet, 'bulksamplebottlebrominations')
router.register(r'bulksites', views.SiteBulkUpdateViewSet, 'bulksites')
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('openapi', get_schema_view(title="CBRSServices", description="API for CBRS DMS"), name='openapi-schema'),
path('swagger-ui/', TemplateView.as_view(template_name='swagger-ui.html',
extra_context={'schema_url': 'openapi-schema'}), name='swagger-ui'),
path('redoc/', TemplateView.as_view(template_name='redoc.html',
extra_context={'schema_url': 'openapi-schema'}), name='redoc'),
url(r'^auth/$', views.AuthView.as_view(), name='authenticate'),
url(r'^batchupload', views.BatchUpload.as_view(), name='batchupload'),
url(r'^reportresultscooperator/',
views.ReportResultsCooperator.as_view(), name='reportresultscooperator'),
url(r'^reportresultsnwis/', views.ReportResultsNwis.as_view(), name='reportresultsnwis'),
url(r'^reportsamplesnwis/', views.ReportSamplesNwis.as_view(), name='reportsamplesnwis'),
url(r'^resultcountprojects/',
views.ReportResultsCountProjects.as_view(), name='resultcountprojects'),
url(r'^resultcountnawqa/', views.ReportResultsCountNawqa.as_view(), name='resultcountnawqa'),
]
| 65.311688
| 114
| 0.791211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,785
| 0.354941
|
ac5a48ef46f5c110ca09df0b1e0e5cb2859ebb3d
| 1,134
|
py
|
Python
|
model.py
|
bruchano/StockPricePrediction
|
6fa3a643e9959fbf26ffd95af54981b077ddd33f
|
[
"MIT"
] | null | null | null |
model.py
|
bruchano/StockPricePrediction
|
6fa3a643e9959fbf26ffd95af54981b077ddd33f
|
[
"MIT"
] | null | null | null |
model.py
|
bruchano/StockPricePrediction
|
6fa3a643e9959fbf26ffd95af54981b077ddd33f
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn.functional as F
import datetime as dt
import pandas_datareader
class FullyConnected(torch.nn.Module):
def __init__(self, in_features, out_features, dropout=0.):
super().__init__()
self.fc = torch.nn.Linear(in_features, out_features)
self.norm = torch.nn.LayerNorm(in_features)
self.leakyrelu = torch.nn.LeakyReLU()
self.dropout = torch.nn.Dropout(dropout)
def forward(self, x):
x = self.dropout(x)
x = self.fc(x)
x = self.leakyrelu(x)
return x
class StockPricePredictor(torch.nn.Module):
def __init__(self, n_fc, input_size=60, dropout=0.05):
super().__init__()
self.fc = torch.nn.Sequential()
self.fc.add_module("0", FullyConnected(input_size, n_fc[0], dropout))
for i in range(len(n_fc) - 1):
self.fc.add_module(str(i + 1), FullyConnected(n_fc[i], n_fc[i + 1], dropout))
self.fc.add_module(str(len(n_fc)), FullyConnected(n_fc[-1], 1, dropout))
def forward(self, x):
x = x.reshape(-1, 60)
return self.fc(x)
| 32.4
| 90
| 0.614638
| 1,024
| 0.902998
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0.002646
|
ac5ca3d9a1b5567a5c378d005b2800a24b5822f4
| 1,148
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_python_bf16_numpy_datatype.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 11
|
2016-08-29T07:43:26.000Z
|
2016-08-29T07:51:24.000Z
|
python/paddle/fluid/tests/unittests/test_python_bf16_numpy_datatype.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_python_bf16_numpy_datatype.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 1
|
2021-12-09T08:59:17.000Z
|
2021-12-09T08:59:17.000Z
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle_bfloat import bfloat16
import unittest
class TestBF16DataType(unittest.TestCase):
def test_matmul(self):
a_bf16 = np.random.random((6, 7)).astype(bfloat16)
b_bf16 = np.random.random((7, 8)).astype(bfloat16)
c_bf16 = np.matmul(a_bf16, b_bf16)
a_fp32 = a_bf16.astype(np.float32)
b_fp32 = b_bf16.astype(np.float32)
c_fp32 = np.matmul(a_fp32, b_fp32)
self.assertTrue(np.allclose(c_bf16, c_fp32))
if __name__ == "__main__":
unittest.main()
| 31.888889
| 74
| 0.718641
| 415
| 0.361498
| 0
| 0
| 0
| 0
| 0
| 0
| 607
| 0.528746
|
ac5d5c3626cc5c773bf91a5f517bfdbe0b549607
| 687
|
py
|
Python
|
tests/api/v2/test_datasources.py
|
droessmj/python-sdk
|
42ea2366d08ef5e4d1fa45029480b800352ab765
|
[
"MIT"
] | 2
|
2020-09-08T20:42:05.000Z
|
2020-09-09T14:27:55.000Z
|
tests/api/v2/test_datasources.py
|
droessmj/python-sdk
|
42ea2366d08ef5e4d1fa45029480b800352ab765
|
[
"MIT"
] | null | null | null |
tests/api/v2/test_datasources.py
|
droessmj/python-sdk
|
42ea2366d08ef5e4d1fa45029480b800352ab765
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Test suite for the community-developed Python SDK for interacting with Lacework APIs.
"""
import pytest
from laceworksdk.api.v2.datasources import DatasourcesAPI
from tests.api.test_base_endpoint import BaseEndpoint
# Tests
@pytest.fixture(scope="module")
def api_object(api):
return api.datasources
class TestDatasources(BaseEndpoint):
OBJECT_ID_NAME = "name"
OBJECT_TYPE = DatasourcesAPI
def test_api_get(self, api_object):
response = api_object.get()
assert "data" in response.keys()
def test_api_get_by_type(self, api_object):
self._get_object_classifier_test(api_object, "type", self.OBJECT_ID_NAME)
| 22.9
| 85
| 0.737991
| 347
| 0.505095
| 0
| 0
| 79
| 0.114993
| 0
| 0
| 149
| 0.216885
|
ac5ea208004616e2bfb96c0a007f009fdaeed064
| 2,793
|
py
|
Python
|
src/expand_mnist.py
|
whalsey/misc
|
8649cb070017a2a6c3c1cdd7fd1e37f45b251ef1
|
[
"Unlicense"
] | null | null | null |
src/expand_mnist.py
|
whalsey/misc
|
8649cb070017a2a6c3c1cdd7fd1e37f45b251ef1
|
[
"Unlicense"
] | null | null | null |
src/expand_mnist.py
|
whalsey/misc
|
8649cb070017a2a6c3c1cdd7fd1e37f45b251ef1
|
[
"Unlicense"
] | null | null | null |
"""expand_mnist.py
~~~~~~~~~~~~~~~~~~
Take the 50,000 MNIST training images, and create an expanded set of
250,000 images, by displacing each training image up, down, left and
right, by one pixel. Save the resulting file to
../data/mnist_expanded.pkl.gz.
Note that this program is memory intensive, and may not run on small
systems.
"""
from __future__ import print_function
#### Libraries
# Standard library
import cPickle
import gzip
import os.path
import random
# Third-party libraries
import numpy as np
import scipy.ndimage.interpolation
import matplotlib.pyplot as plt
def sign(a):
return -1 if a < 0 else 1
print("Expanding the MNIST training set")
if os.path.exists("../data/mnist_expanded.pkl.gz"):
print("The expanded training set already exists. Exiting.")
else:
f = gzip.open("../data/mnist.pkl.gz", 'rb')
training_data, validation_data, test_data = cPickle.load(f)
f.close()
expanded_training_pairs = []
j = 0 # counter
# for each image in the training data
for x, y in zip(training_data[0], training_data[1]):
expanded_training_pairs.append((x, y))
image = np.reshape(x, (-1, 28))
j += 1
if j % 1000 == 0: print("Expanding image number ", j)
# create four new images with shifts and rotations
for _ in range(4):
# calculate x shift
shift_x = random.randint(-3, 3)
# calculate y shift
shift_y = random.randint(-3, 3)
new_img = np.roll(image, shift_x, 0)
new_img = np.roll(new_img, shift_y, 1)
# pad the shifted area with 0's
# todo - will add this later *(though it does not seem necessary)
# if sign(shift_x) == 1:
# new_img[:shift_x][:] = np.zeros((shift_x, 28))
# else:
# new_img[28-shift_x:][:] = np.zeros((shift_x, 28))
#
# if sign(shift_y) == 1:
# new_img[:][:shift_y] = np.zeros((28, shift_y))
# else:
# new_img[:][28-shift_y:] = np.zeros((28, shift_y))
# calculate degree of rotation
degree = (random.random() - 0.5) * 90
new_img = scipy.ndimage.interpolation.rotate(new_img, degree, reshape=False)
# plt.imshow(new_img)
#
# plt.pause(0.01)
# plt.clf()
expanded_training_pairs.append((np.reshape(new_img, 784), y))
random.shuffle(expanded_training_pairs)
expanded_training_data = [list(d) for d in zip(*expanded_training_pairs)]
print("Saving expanded data. This may take a few minutes.")
f = gzip.open("../data/mnist_expanded.pkl.gz", "w")
cPickle.dump((expanded_training_data, validation_data, test_data), f)
f.close()
| 29.09375
| 88
| 0.6058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,237
| 0.442893
|
ac6056041eeb8497e63663fa127d721d28fac540
| 17,489
|
py
|
Python
|
randconv/coordinator_factory.py
|
jm-begon/randconv
|
cb7438f5876c18192e8caaf3cafd88e839c26048
|
[
"BSD-3-Clause"
] | 1
|
2016-08-01T08:09:28.000Z
|
2016-08-01T08:09:28.000Z
|
randconv/coordinator_factory.py
|
jm-begon/randconv
|
cb7438f5876c18192e8caaf3cafd88e839c26048
|
[
"BSD-3-Clause"
] | null | null | null |
randconv/coordinator_factory.py
|
jm-begon/randconv
|
cb7438f5876c18192e8caaf3cafd88e839c26048
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
A set of factory function to help create usual cases of coordinator
"""
__author__ = "Begon Jean-Michel <jm.begon@gmail.com>"
__copyright__ = "3-clause BSD License"
__date__ = "20 January 2015"
import math
from .image import *
from .util import (OddUniformGenerator, NumberGenerator,
CustomDiscreteNumberGenerator, GaussianNumberGenerator)
from .feature_extractor import ImageLinearizationExtractor, DepthCompressorILE
from .coordinator import (RandConvCoordinator, PyxitCoordinator)
class Const:
RND_RU = "RND_RU" # -1 (real uniform)
RND_SET = "RND_SET" # -2 (Discrete set with predifined probabilities)
RND_GAUSS = "RND_GAUSS" # (Gaussian distribution)
FGEN_ORDERED = "FGEN_ORDERED" # Ordered combination of others
FGEN_CUSTOM = "FGEN_CUSTOM" # Custom filters
FGEN_ZEROPERT = "FGEN_ZEROPERT" # Perturbation around origin
FGEN_IDPERT = "FGEN_IDPERT" # Perturbation around id filter
FGEN_IDDIST = "FGEN_IDDIST" # Maximum distance around id filter
FGEN_STRAT = "FGEN_STRAT" # Stratified scheme
POOLING_NONE = "POOLING_NONE" # 0
POOLING_AGGREG_MIN = "POOLING_AGGREG_MIN" # 1
POOLING_AGGREG_AVG = "POOLING_AGGREG_AVG" # 2
POOLING_AGGREG_MAX = "POOLING_AGGREG_MAX" # 3
POOLING_CONV_MIN = "POOLING_MW_MIN" # 4
POOLING_CONV_AVG = "POOLING_MW_AVG" # 5
POOLING_CONV_MAX = "POOLING_MW_MAX" # 6
POOLING_MORPH_OPENING = "POOLING_MORPH_OPENING" # 7
POOLING_MORPH_CLOSING = "POOLING_MORPH_CLOSING" # 8
FEATEXT_ALL = "FEATEXTRACT_ALL"
FEATEXT_SPASUB = "FEATEXTRACT_SPASUB"
def pyxit_factory(
nb_subwindows=10,
sw_min_size_ratio=0.5, sw_max_size_ratio=1.,
sw_target_width=16, sw_target_height=16,
fixed_size=False,
sw_interpolation=SubWindowExtractor.INTERPOLATION_BILINEAR,
n_jobs=-1, verbosity=10, temp_folder=None,
random=True):
"""
Factory method to create :class:`PyxitCoordinator`
Parameters
----------
nb_subwindows : int >= 0 (default : 10)
The number of subwindow to extract
sw_min_size_ratio : float > 0 (default : 0.5)
The minimum size of a subwindow expressed as the ratio of the size
of the original image
sw_max_size_ratio : float : sw_min_size_ratio
<= sw_max_size_ratio <= 1 (default : 1.)
The maximim size of a subwindow expressed as the ratio of the size
of the original image
sw_target_width : int > 0 (default : 16)
The width of the subwindows after reinterpolation
sw_target_height : int > 0 (default : 16)
The height of the subwindows after reinterpolation
fixed_size : boolean (default : False)
Whether to use fixe size subwindow. If False, subwindows are drawn
randomly. If True, the target size is use as the subwindow size and
only the position is drawn randomly
sw_interpolation : int (default :
SubWindowExtractor.INTERPOLATION_BILINEAR)
The subwindow reinterpolation algorithm. For more information, see
:class:`SubWindowExtractor`
n_jobs : int >0 or -1 (default : -1)
The number of process to spawn for parallelizing the computation.
If -1, the maximum number is selected. See also :mod:`Joblib`.
verbosity : int >= 0 (default : 10)
The verbosity level
temp_folder : string (directory path) (default : None)
The temporary folder used for memmap. If none, some default folder
will be use (see the :class:`ParallelCoordinator`)
random : bool (default : True)
Whether to use randomness or use a predefined seed
Return
------
coordinator : :class:`Coordinator`
The PyxitCoordinator (possibly decorated) corresponding to the set
of parameters
Notes
-----
- Subwindow random generator
The subwindow random generator is a :class:`NumberGenerator` base
instance (generate real nubers uniformely).
- Feature extractor
Base instance of :class:`ImageLinearizationExtractor`
"""
swngSeed = 0
#Randomness
if random:
swngSeed = None
#SubWindowExtractor
swNumGenerator = NumberGenerator(seed=swngSeed)
if fixed_size:
sw_extractor = FixTargetSWExtractor(sw_target_width,
sw_target_height,
sw_interpolation,
swNumGenerator)
else:
sw_extractor = SubWindowExtractor(sw_min_size_ratio,
sw_max_size_ratio,
sw_target_width,
sw_target_height,
sw_interpolation,
swNumGenerator)
multi_sw_extractor = MultiSWExtractor(sw_extractor, nb_subwindows, True)
#FEATURE EXTRACTOR
feature_extractor = ImageLinearizationExtractor()
#LOGGER
autoFlush = verbosity >= 45
logger = ProgressLogger(StandardLogger(autoFlush=autoFlush,
verbosity=verbosity))
#COORDINATOR
coordinator = PyxitCoordinator(multi_sw_extractor, feature_extractor, logger,
verbosity)
if n_jobs != 1:
coordinator.parallelize(n_jobs, temp_folder)
return coordinator
def get_multi_poolers(poolings, finalHeight, finalWidth):
#Aggregator
poolers = []
for height, width, policy in poolings:
if policy is Const.POOLING_NONE:
poolers.append(IdentityPooler())
elif policy is Const.POOLING_AGGREG_AVG:
poolers.append(AverageAggregator(width, height,
finalWidth,
finalHeight))
elif policy is Const.POOLING_AGGREG_MAX:
poolers.append(MaximumAggregator(width, height,
finalWidth,
finalHeight))
elif policy is Const.POOLING_AGGREG_MIN:
poolers.append(MinimumAggregator(width, height,
finalWidth,
finalHeight))
elif policy is Const.POOLING_CONV_MIN:
poolers.append(FastMWMinPooler(height, width))
elif policy is Const.POOLING_CONV_AVG:
poolers.append(FastMWAvgPooler(height, width))
elif policy is Const.POOLING_CONV_MAX:
poolers.append(FastMWMaxPooler(height, width))
elif policy is Const.POOLING_MORPH_OPENING:
poolers.append(MorphOpeningPooler(height, width))
elif policy is Const.POOLING_MORPH_CLOSING:
poolers.append(MorphClosingPooler(height, width))
return MultiPooler(poolers)
def get_number_generator(genType, min_value, max_value, seed, **kwargs):
if genType is Const.RND_RU:
value_generatorerator = NumberGenerator(min_value, max_value, seed)
elif genType is Const.RND_SET:
probLaw = kwargs["probLaw"]
value_generatorerator = CustomDiscreteNumberGenerator(probLaw, seed)
elif genType is Const.RND_GAUSS:
if "outRange" in kwargs:
outRange = kwargs["outRange"]
value_generatorerator = GaussianNumberGenerator(min_value, max_value, seed,
outRange)
else:
value_generatorerator = GaussianNumberGenerator(min_value, max_value, seed)
return value_generatorerator
def get_filter_generator(policy, parameters, nb_filterss, random=False):
if policy == Const.FGEN_ORDERED:
#Parameters is a list of tuples (policy, parameters)
ls = []
subNbFilters = int(math.ceil(nb_filterss/len(parameters)))
for subPolicy, subParameters in parameters:
ls.append(get_filter_generator(subPolicy, subParameters,
subNbFilters, random))
return OrderedMFF(ls, nb_filterss)
if policy is Const.FGEN_CUSTOM:
print "Custom filters"
return custom_finite_3_same_filter()
#Parameters is a dictionary
valSeed = None
sizeSeed = None
shuffling_seed = None
perturbationSeed = None
cell_seed = None
sparseSeed = 5
if random:
valSeed = 1
sizeSeed = 2
shuffling_seed = 3
perturbationSeed = 4
cell_seed = 5
sparseSeed = 6
min_size = parameters["min_size"]
max_size = parameters["max_size"]
size_generatorerator = OddUniformGenerator(min_size, max_size, seed=sizeSeed)
min_val = parameters["min_val"]
max_val = parameters["max_val"]
value_generator = parameters["value_generator"]
value_generatorerator = get_number_generator(value_generator, min_val, max_val,
valSeed, **parameters)
normalization = None
if "normalization" in parameters:
normalization = parameters["normalization"]
if policy is Const.FGEN_ZEROPERT:
print "Zero perturbation filters"
baseFilterGenerator = FilterGenerator(value_generatorerator, size_generatorerator,
normalisation=normalization)
elif policy is Const.FGEN_IDPERT:
print "Id perturbation filters"
baseFilterGenerator = IdPerturbatedFG(value_generatorerator, size_generatorerator,
normalisation=normalization)
elif policy is Const.FGEN_IDDIST:
print "Id distance filters"
max_dist = parameters["max_dist"]
baseFilterGenerator = IdMaxL1DistPerturbFG(value_generatorerator, size_generatorerator,
max_dist,
normalisation=normalization,
shuffling_seed=shuffling_seed)
elif policy is Const.FGEN_STRAT:
print "Stratified filters"
nb_cells = parameters["strat_nb_cells"]
minPerturbation = 0
if "minPerturbation" in parameters:
minPerturbation = parameters["minPerturbation"]
maxPerturbation = 1
if "maxPerturbation" in parameters:
maxPerturbation = parameters["maxPerturbation"]
perturbationGenerator = get_number_generator(value_generator,
minPerturbation,
maxPerturbation,
perturbationSeed)
baseFilterGenerator = StratifiedFG(min_val, max_val, nb_cells,
perturbationGenerator,
size_generatorerator,
normalisation=normalization,
cell_seed=cell_seed)
if "sparse_proba" in parameters:
print "Adding sparcity"
sparse_proba = parameters["sparse_proba"]
baseFilterGenerator = SparsityDecoratorFG(baseFilterGenerator,
sparse_proba,
sparseSeed)
print "Returning filters"
return Finite3SameFilter(baseFilterGenerator, nb_filterss)
def get_feature_extractor(policy, **kwargs):
if policy is Const.FEATEXT_SPASUB:
nbCol = kwargs.get("nbCol", 2)
return DepthCompressorILE(nbCol)
else: # Suupose Const.FEATEXT_ALL
return ImageLinearizationExtractor()
#TODO : include in randconv : (Const.FEATEXT_ALL, {}), (Const.FEATEXT_SPASUB, {"nbCol":2})
def randconv_factory(
nb_filters=5,
filter_policy=(Const.FGEN_ZEROPERT,
{"min_size": 2, "max_size": 32, "min_val": -1, "max_val": 1,
"value_generator": Const.RND_RU,
"normalization": FilterGenerator.NORMALISATION_MEANVAR}),
poolings=[(3, 3, Const.POOLING_AGGREG_AVG)],
extractor=(Const.FEATEXT_ALL, {}),
nb_subwindows=10,
sw_min_size_ratio=0.5, sw_max_size_ratio=1.,
sw_target_width=16, sw_target_height=16,
sw_interpolation=SubWindowExtractor.INTERPOLATION_BILINEAR,
include_original_img=False,
n_jobs=-1, verbosity=10, temp_folder=None,
random=True):
"""
Factory method to create :class:`RandConvCoordinator` tuned for RGB images
Parameters
----------
nb_filterss : int >= 0 (default : 5)
The number of filter
filter_policy : pair (policyType, parameters)
policyType : one of Const.FGEN_*
The type of filter generation policy to use
parameters : dict
The parameter dictionnary to forward to :func:`get_filter_generator`
poolings : iterable of triple (height, width, policy) (default :
[(3, 3, Const.POOLING_AGGREG_AVG)])
A list of parameters to instanciate the according :class:`Pooler`
height : int > 0
the height of the neighborhood window
width : int > 0
the width of the neighborhood window
policy : int in {Const.POOLING_NONE, Const.POOLING_AGGREG_MIN,
Const.POOLING_AGGREG_AVG, Const.POOLING_AGGREG_MAX,
Const.POOLING_CONV_MIN, Const.POOLING_CONV_AVG, Const.POOLING_CONV_MAX}
nb_subwindows : int >= 0 (default : 10)
The number of subwindow to extract
sw_min_size_ratio : float > 0 (default : 0.5)
The minimum size of a subwindow expressed as the ratio of the size
of the original image
sw_max_size_ratio : float : sw_min_size_ratio
<= sw_max_size_ratio <= 1 (default : 1.)
The maximim size of a subwindow expressed as the ratio of the size
of the original image
sw_target_width : int > 0 (default : 16)
The width of the subwindows after reinterpolation
sw_target_height : int > 0 (default : 16)
The height of the subwindows after reinterpolation
sw_interpolation : int (default :
SubWindowExtractor.INTERPOLATION_BILINEAR)
The subwindow reinterpolation algorithm. For more information, see
:class:`SubWindowExtractor`
include_original_img : boolean (default : False)
Whether or not to include the original image in the subwindow
extraction process
n_jobs : int >0 or -1 (default : -1)
The number of process to spawn for parallelizing the computation.
If -1, the maximum number is selected. See also :mod:`Joblib`.
verbosity : int >= 0 (default : 10)
The verbosity level
temp_folder : string (directory path) (default : None)
The temporary folder used for memmap. If none, some default folder
will be use (see the :class:`ParallelCoordinator`)
random : bool (default : True)
Whether to use randomness or use a predefined seed
Return
------
coordinator : :class:`Coordinator`
The RandConvCoordinator corresponding to the
set of parameters
Notes
-----
- Filter generator
Base instance of :class:`Finite3SameFilter` with a base instance of
:class:`NumberGenerator` for the values and
:class:`OddUniformGenerator` for the sizes
- Filter size
The filter are square (same width as height)
- Convolver
Base instance of :class:`RGBConvolver`
- Subwindow random generator
The subwindow random generator is a :class:`NumberGenerator` base
instance (generate real nubers uniformely).
- Feature extractor
Base instance of :class:`ImageLinearizationExtractor`
"""
#RANDOMNESS
swngSeed = None
if random is False:
swngSeed = 0
#CONVOLUTIONAL EXTRACTOR
#Filter generator
#Type/policy parameters, #filters, random
filter_policyType, filter_policyParam = filter_policy
filter_generator = get_filter_generator(filter_policyType, filter_policyParam,
nb_filters, random)
#Convolver
convolver = RGBConvolver()
#Aggregator
multi_pooler = get_multi_poolers(poolings, sw_target_height,
sw_target_width)
#SubWindowExtractor
swNumGenerator = NumberGenerator(seed=swngSeed)
sw_extractor = SubWindowExtractor(sw_min_size_ratio,
sw_max_size_ratio,
sw_target_width,
sw_target_height,
sw_interpolation, swNumGenerator)
multi_sw_extractor = MultiSWExtractor(sw_extractor, nb_subwindows, False)
#ConvolutionalExtractor
convolutional_extractor = ConvolutionalExtractor(filter_generator,
convolver,
multi_sw_extractor,
multi_pooler,
include_original_img)
#FEATURE EXTRACTOR
feature_extractor = get_feature_extractor(extractor[0], **extractor[1])
#COORDINATOR
coordinator = RandConvCoordinator(convolutional_extractor, feature_extractor)
if n_jobs != 1:
coordinator.parallelize(n_jobs, temp_folder)
return coordinator
| 40.204598
| 95
| 0.623535
| 1,075
| 0.061467
| 0
| 0
| 0
| 0
| 0
| 0
| 7,140
| 0.408257
|
ac6074b9d7933f990f474a4b8d34085357c16a13
| 2,944
|
py
|
Python
|
projections.py
|
barrulik/3d-projections
|
291770c466383c917dd68eb0ad4121195598a29f
|
[
"Apache-2.0"
] | 1
|
2022-01-20T20:01:24.000Z
|
2022-01-20T20:01:24.000Z
|
projections.py
|
barrulik/3d-projections
|
291770c466383c917dd68eb0ad4121195598a29f
|
[
"Apache-2.0"
] | 1
|
2022-01-20T20:01:30.000Z
|
2022-01-21T14:23:11.000Z
|
projections.py
|
barrulik/3d-projections
|
291770c466383c917dd68eb0ad4121195598a29f
|
[
"Apache-2.0"
] | null | null | null |
import pygame
import numpy as np
from math import *
import json
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RAINBOW = (0, 0, 0)
rainbow = True
WIDTH, HEIGHT = 800, 600
#WIDTH, HEIGHT = 1600, 900
def drawLine(point1, point2, screen):
if rainbow:
pygame.draw.line(screen, RAINBOW, (point1[0], point1[1]), (point2[0], point2[1]))
else:
pygame.draw.line(screen, BLACK, (point1[0], point1[1]), (point2[0], point2[1]))
def rotateX(angle):
return np.matrix([
[1, 0, 0],
[0, cos(angle), -sin(angle)],
[0, sin(angle), cos(angle)]
])
def rotateY(angle):
return np.matrix([
[cos(angle), 0, sin(angle)],
[0, 1, 0],
[-sin(angle), 0, cos(angle)]
])
def rotateZ(angle):
return np.matrix([
[cos(angle), -sin(angle), 0],
[sin(angle), cos(angle), 0],
[0, 0, 1]
])
def projectPoint(point, angle, offset, scale):
rotated = point.reshape(3, 1)
rotated = np.dot(rotateX(pi / 2), rotated)
rotated = np.dot(rotateX(angle[0]), rotated)
rotated = np.dot(rotateY(angle[1]), rotated)
rotated = np.dot(rotateZ(angle[2]), rotated)
projected = np.dot(np.matrix([[1, 0, 0], [0, 1, 0]]), rotated)
x = int(projected[0][0] * scale) + WIDTH/2
y = int(projected[1][0] * scale) + HEIGHT/2
return [x, y]
def renderObject(objectPath, offset, angle, scale, screen):
f = open(objectPath)
data = json.load(f)
points = data.get("points")
if points:
temp = ""
for pointName in points:
point = points.get(pointName)
point = np.matrix(point)+np.matrix([offset])
temp += '"'+pointName+'":'+str(projectPoint(point, angle, offset, scale))+','
projectedPoints = json.loads('{'+temp[:-1]+'}')
lines = data.get("lines")
if lines:
for line in lines:
for p1name in line:
p1 = p1name
p2 = projectedPoints.get(line.get(p1))
p1 = projectedPoints.get(p1)
drawLine(p1, p2, screen)
objects = data.get("objects")
if objects:
for obj in objects:
renderObject(obj.get("objectPath"), np.squeeze(np.array(np.matrix(obj.get("offset"))+np.matrix(offset)*scale/obj.get("scale"))) ,np.squeeze(np.array(np.matrix(obj["angle"])+ angle)), obj.get("scale"), screen)
screen = pygame.display.set_mode((WIDTH, HEIGHT))
clock = pygame.time.Clock()
angle = 0
while True:
# so spin rate is not super fast/constant
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
angle += 0.01
screen.fill(WHITE)
# type ur code here
renderObject("objects/2squares.json", [0, 0, 0], [angle, angle, angle], 100, screen)
renderObject("objects/square.json", [0, 0, 1], [angle, angle, angle], 100, screen)
pygame.display.update()
| 27.009174
| 224
| 0.571332
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 215
| 0.07303
|
ac615575fca01649282cfa5b82c564f0a14d7c09
| 708
|
py
|
Python
|
insert_data.py
|
Amantechcse/cricket-fantasy-game
|
dd256adef88de7fd4132dea55d52bfba493efa30
|
[
"MIT"
] | null | null | null |
insert_data.py
|
Amantechcse/cricket-fantasy-game
|
dd256adef88de7fd4132dea55d52bfba493efa30
|
[
"MIT"
] | null | null | null |
insert_data.py
|
Amantechcse/cricket-fantasy-game
|
dd256adef88de7fd4132dea55d52bfba493efa30
|
[
"MIT"
] | null | null | null |
import sqlite3
book=sqlite3.connect("bookstore.db")
curbook=book.cursor()
#curbook.execute('''create table books (book_id integer primary key autoincrement , book_name text(20), author text(20), price integer);''')
while True:
x=input("want to enter data yes/no: ")
if x=='yes':
book_id=int(input("Enter book id: "))
book_name=input("Enter book name: ")
author=input("Enter author name: ")
price=input("Enter price of book: ")
curbook.execute("insert into books (book_id,book_name,author, price) values(?,?,?,?);",(book_id,book_name,author, price))
book.commit()
print("data add successfully")
else:
break
#book.close()
| 28.32
| 141
| 0.638418
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 375
| 0.529661
|
ac61daa3c54495624b8682899688bd4fd36deaca
| 13,110
|
py
|
Python
|
api/config/h5Template/tanmuContent.py
|
jimbunny/wedding-invitation
|
a3648454e1105d9362f95d9f6e69055a7522e15b
|
[
"MIT"
] | null | null | null |
api/config/h5Template/tanmuContent.py
|
jimbunny/wedding-invitation
|
a3648454e1105d9362f95d9f6e69055a7522e15b
|
[
"MIT"
] | null | null | null |
api/config/h5Template/tanmuContent.py
|
jimbunny/wedding-invitation
|
a3648454e1105d9362f95d9f6e69055a7522e15b
|
[
"MIT"
] | null | null | null |
tanmuContent = '''
<style>
.barrage-input-tip {
z-index: 1999;
position: absolute;
left: 10px;
width: 179.883px;
height: 35.7422px;
line-height: 35.7422px;
border-radius: 35.7422px;
box-sizing: border-box;
color: rgb(255, 255, 255);
margin-left: 45.7031px;
background-color: {{ data.tanmuBtnColor }};
opacity: 0.65;
pointer-events: initial;
padding: 0px 16.9922px;
font-size: 14.0625px;
display: block;
}
.data-box{display:none}
.barrage_box_top{width:100%;height:160px;margin:0px auto;}
.barrage_box_top .barrage-row{margin-bottom:20px;}
.barrage_box_top .barrage-item{
background-color: {{ data.tanmuColor }};margin-bottom:10px; white-space:nowrap;color:{{ data.fontColor }}; font-size: 12px; transform: scale(1); opacity: 1; transition: all 0.65s ease-in 0s;padding: 6px 8px 0px 8px; height: 32px;display: inline-block;border-radius: 25px;
}
</style>
<div class="maka-barrage-dom" style="top: 0px; left: 0px; background-color: transparent; z-index: 1000;">
<div class="barrage-content" style="position: fixed; box-sizing: border-box; padding: 11.7188px; right: 0px; bottom: 0px; z-index: 1000; width: 100%; pointer-events: none; background: linear-gradient(rgba(0, 0, 0, 0) 0%, rgba(0, 0, 0, 0.2) 100%);">
<div class="barrage-words row" style="margin-top: 11.7188px; height: 212.695px;"><div class="barrage-word" style="min-height: 32.2266px; line-height: 32.2266px; font-size: 12.8906px; padding: 4.10156px; border-radius: 22.8516px; bottom: 94.3359px; max-width: 310.547px; background-color: rgba(47, 50, 52, 0.6); transform: scale(1); opacity: 0; transition: bottom 2s ease-out 0s, opacity 0.75s linear 0.75s;">
</div>
</div>
<div class="barrage-bottom row" id="barrageBtn" style="padding-bottom: env(safe-area-inset-bottom); margin-top: 14.0625px; position: fixed; left: 11.7188px; bottom: 47px; pointer-events: initial;">
<div class="barrage-input-tip" data-toggle="modal" data-target="#myModal" style="background:{{ data.tanmuColor }}; width: 179.883px; height: 35.7422px; line-height: 35.7422px; border-radius: 35.7422px; box-sizing: border-box; color: rgb(255, 255, 255); margin-left: 45.7031px; background-color: rgb(47, 50, 52); opacity: 0.65; pointer-events: initial; padding: 0px 16.9922px; font-size: 14.0625px;">ฝากคำอวยพร...</div>
</div>
<div class="backdrop" style="position: fixed; width: 100%; height: 100%; background-color: rgba(0, 0, 0, 0); z-index: 999; display: none; top: 0px; left: 0px; pointer-events: initial;"></div>
<div class="barrage-btn tanBtn" style="padding-bottom: env(safe-area-inset-bottom); margin-top: 14.0625px; position: fixed; left: 11.7188px; bottom: 11.7188px; pointer-events: initial;">
<div class="correct-icon" id="tanmuOpen" style="background: url("https://i.ibb.co/1QmGHWV/danmu-open1.png") 0% 0% / contain no-repeat; border-radius: 100%; width: 35.7422px; height: 35.7422px;"></div>
<div class="close-icon" id="tanmuClose" style="background: url("https://i.ibb.co/QNwcxLx/danmu-close1.png") 0% 0% / contain no-repeat; border-radius: 100%; width: 35.7422px; height: 35.7422px; display: none;">
<b style="position: absolute; color: rgb(255, 255, 255); top: 2.92969px; left: 19.9219px; font-weight: 600; font-size: 8.78906px; transform: scale(0.8);">{{ data.greetings | length }}</b>
</div>
</div>
<div id="j-barrage-top" class="barrage_box barrage_box_top" style="position: fixed; box-sizing: border-box; padding: 0px; right: 0px; bottom: 0px; z-index: 1000; width: 100%; pointer-events: none;"></div>
</div>
<div class="barrage-input-wrap" id="modalShow" style="display: none; position: fixed; left: 0px; bottom: 0px;height: 0px; width: 100%; background-color:transparent; padding: 9.375px 11.7188px; box-sizing: border-box; z-index: 2000; pointer-events: initial;">
<!-- 模态框(Modal) -->
<div class="modal fade" id="myModal" tabindex="-1" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true">
<div style="width:100%;" class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" style="cursor: pointer;" data-dismiss="modal" aria-hidden="true">×</button>
<h4 class="modal-title" id="myModalLabel">อวยพร</h4>
</div>
<div class="modal-body">
<form action="" id="form" class="form-horizontal">
<div class="form-group">
<div class="col-md-24" style="padding-left:10px;padding-right: 10px;">
<input type="text" class="form-control" style="width:100% !important;" name="name" placeholder="ชื่อ-นามสกุล" />
</div>
</div>
<div class="form-group">
<div class="col-md-24" style="padding-left:10px;padding-right: 10px;">
<input type="text" class="form-control" style="width:100% !important;" name="greetings" placeholder="คำอวยพร" />
</div>
</div>
<div class="form-group">
<div class="col-md-24 col-md-offset-2" style="padding-left:10px;padding-right: 10px;">
<button id="subBtn" type="submit" class="btn btn-primary" style="width:100%;">ส่ง</button>
</div>
</div>
</form>
</div>
</div><!-- /.modal-content -->
</div><!-- /.modal-dialog -->
</div>
<!-- /.modal -->
</div>
</div>
<div class="alert alert-danger hide">ส่งคำอวยพรล้มเหลว!</div>
<div class="alert alert-success hide">ส่งคำอวยพรสำเร็จ!</div>
<script src="/static/js/bootstrap.min.js"></script>
<script src="/static/js/bootstrapValidator.min.js"></script>
<script type="text/javascript" src="/static/js/index.js"></script>
<style type="text/css">
*{
padding:0;
margin:0;
}
a{
text-decoration: none;
}
.form-control{
display: inline-block;
width: auto;
padding: 6px 12px;
font-size: 14px;
line-height: 1.42857143;
color: #555;
background-color: #fff;
background-image: none;
border: 1px solid #ccc;
border-radius: 4px;
-webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075);
box-shadow: inset 0 1px 1px rgba(0,0,0,.075);
-webkit-transition: border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;
-o-transition: border-color ease-in-out .15s,box-shadow ease-in-out .15s;
transition: border-color ease-in-out .15s,box-shadow ease-in-out .15s;
}
.btn{
display: inline-block;
padding: 6px 12px;
margin-bottom: 0;
font-size: 14px;
font-weight: 400;
line-height: 1.42857143;
text-align: center;
white-space: nowrap;
vertical-align: middle;
-ms-touch-action: manipulation;
touch-action: manipulation;
cursor: pointer;
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
background-image: none;
border: 1px solid transparent;
border-radius: 4px;
}
.btn-primary {
color: #fff;
background-color: #337ab7;
border-color: #2e6da4;
}
/*组件主样式*/
.overflow-text{
display: block;
white-space:nowrap;
overflow:hidden;
text-overflow:ellipsis;
opacity:0;
clear: both;
padding:0 10px;
border-radius: 10px;
box-sizing: border-box;
max-width: 100%;
color:#fff;
animation:colorchange 3s infinite alternate;
-webkit-animation:colorchange 3s infinite alternate; /*Safari and Chrome*/
}
@keyframes colorchange{
0%{
color:red;
}
50%{
color:green;
}
100%{
color:#6993f9;
}
}
/*组件主样式*/
.alert{
position: fixed;
width: 50%;
margin-left: 20%;
z-index: 2000;
}
</style>
<script type="text/javascript">
var Obj;
$.ajax({
//几个参数需要注意一下
type: "GET",//方法类型
dataType: "json",//预期服务器返回的数据类型
url: "/api/v1/h5/greetings/"+{{ data.id }},//url
success: function (result) {
console.log(result);//打印服务端返回的数据(调试用)
if (result.code == 0) {
// 数据初始化
Obj = $('#j-barrage-top').barrage({
data : result.data, //数据列表
row : 1, //显示行数
time : 2500, //间隔时间
gap : 100, //每一个的间隙
position : 'fixed', //绝对定位
direction : 'bottom left', //方向
ismoseoverclose : true, //悬浮是否停止
height : 30, //设置单个div的高度
})
Obj.start();
} else {
alert("tanmu Error");
};
},
error : function() {
alert("tanmu Error");
}
});
</script>
<script>
$("#barrageBtn").click(function() {
var modalShowDiv = document.getElementById('modalShow');
modalShowDiv.style.display = 'block';
})
var kg = true; //给一个开关并赋值,用来进行后面的 if else 条件判断
$(".tanBtn").click(function() { //给button按钮一个点击事件
if (kg) { //进行判断
var tanmuOpenDiv= document.getElementById('tanmuOpen');
tanmuOpenDiv.style.display = 'block';
var tanmuCloseDiv= document.getElementById('tanmuClose');
tanmuCloseDiv.style.display='none';
Obj.start();
var barrageBtnDiv= document.getElementById('barrageBtn');
barrageBtnDiv.style.display = 'block';
} else {
var tanmuOpenDiv= document.getElementById('tanmuOpen');
tanmuOpenDiv.style.display = 'none';
var tanmuCloseDiv= document.getElementById('tanmuClose');
tanmuCloseDiv.style.display='block';
Obj.close();
var barrageBtnDiv= document.getElementById('barrageBtn');
barrageBtnDiv.style.display = 'none';
}
kg = !kg; //这里的感叹号是取反的意思,如果你没有写,当你点击切换回第一张图片时,就会不生效
})
$('#myModal').on('hidden.bs.modal', function (e) {
// 清空表单和验证
// Reset a form
document.getElementById("form").reset();
$('#form').bootstrapValidator("resetForm",true);
})
$('form').bootstrapValidator({
//默认提示
message: 'This value is not valid',
// 表单框里右侧的icon
feedbackIcons: {
valid: 'glyphicon glyphicon-ok',
invalid: 'glyphicon glyphicon-remove',
validating: 'glyphicon glyphicon-refresh'
},
excluded: [':disabled'],
submitHandler: function (validator, form, submitButton) {
// 表单提交成功时会调用此方法
// validator: 表单验证实例对象
// form jq对象 指定表单对象
// submitButton jq对象 指定提交按钮的对象
},
fields: {
name: {
message: 'ปรดกรอกชื่อ, ความยาวไม่เกิน 20 ตัวอักษร',
validators: {
notEmpty: { //不能为空
message: 'โปรดกรอกชื่อ'
},
stringLength: {
max: 20,
message: 'ความยาวไม่เกิน 20 ตัวอักษร'
},
}
},
greetings: {
message: 'โปรดกรอกคำอวยพร, ความยาวไม่เกิน 40 ตัวอักษร',
validators: {
notEmpty: {
message: 'โปรดกรอกคำอวยพร'
},
stringLength: {
max: 40,
message: 'ความยาวไม่เกิน 40 ตัวอักษร'
},
}
},
}
});
var that = this
$("#subBtn").click(function () { //非submit按钮点击后进行验证,如果是submit则无需此句直接验证
$("form").bootstrapValidator('validate'); //提交验证
if ($("form").data('bootstrapValidator').isValid()) { //获取验证结果,如果成功,执行下面代码
$.ajax({
//几个参数需要注意一下
type: "POST",//方法类型
dataType: "json",//预期服务器返回的数据类型
url: "/api/v1/h5/greetings/"+{{ data.id }},//url
data: $('#form').serialize(),
success: function (result) {
console.log(result);//打印服务端返回的数据(调试用)
if (result.code == 0) {
$("#myModal").modal('hide');
//添加评论
//此格式与dataa.js的数据格式必须一致
var addVal = {
text : result.data
}
//添加进数组
Obj.data.unshift(addVal);
$(".alert-success").addClass("show");
window.setTimeout(function(){
$(".alert-success").removeClass("show");
},1000);//显示的时间
} else {
$(".alert-danger").addClass("show");
window.setTimeout(function(){
$(".alert-danger").removeClass("show");
},1000);//显示的时间
};
},
error : function() {
{#alert("Error!");#}
$(".alert-danger").addClass("show");
window.setTimeout(function(){
$(".alert-danger").removeClass("show");
},1000);//显示的时间
}
});
}
});
</script>
'''
| 39.017857
| 427
| 0.564607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 14,224
| 0.998947
|
ac622bca39310127b42776aefdbd9c65467abc04
| 871
|
py
|
Python
|
example/example2.py
|
xrloong/xrSolver
|
4f36660b78456840f65215ffce0481cdc280f980
|
[
"Apache-2.0"
] | null | null | null |
example/example2.py
|
xrloong/xrSolver
|
4f36660b78456840f65215ffce0481cdc280f980
|
[
"Apache-2.0"
] | null | null | null |
example/example2.py
|
xrloong/xrSolver
|
4f36660b78456840f65215ffce0481cdc280f980
|
[
"Apache-2.0"
] | null | null | null |
from xrsolver import Problem
import solver
# This example is the second case from https://www.youtube.com/watch?v=WJEZh7GWHnw
s = solver.Solver()
p = Problem()
x1 = p.generateVariable("x1", lb=0, ub=3)
x2 = p.generateVariable("x2", lb=0, ub=3)
x3 = p.generateVariable("x3", lb=0, ub=3)
x4 = p.generateVariable("x4", lb=0, ub=3)
x5 = p.generateVariable("x5", lb=0, ub=3)
p.addVariable(x1)
p.addVariable(x2)
p.addVariable(x3)
p.addVariable(x4)
p.addVariable(x5)
p.appendConstraint(x1 + x2 <= 5)
p.appendConstraint(x2 <= 0.5 * (x1 + x2))
p.appendConstraint(x5 >= 0.4 * (x3 + x4))
p.appendConstraint(x1 + x2 + x3 + x4 +x5 == 10)
p.appendObjective(8.1 * x1 + 10.5 * x2 + 6.4 * x3 + 7.5 * x4 + 5.0 * x5)
s.solveProblem(p)
print("x1 =", x1.getValue())
print("x2 =", x2.getValue())
print("x3 =", x3.getValue())
print("x4 =", x4.getValue())
print("x5 =", x5.getValue())
| 23.540541
| 82
| 0.64868
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 132
| 0.15155
|
ac635fff94dc7903f590b0e63087a7ab13c8a9ab
| 11,669
|
py
|
Python
|
src/ipyradiant/visualization/explore/interactive_exploration.py
|
lnijhawan/ipyradiant
|
d804e9031ef39c1ea75fedd52d110302c065ad84
|
[
"BSD-3-Clause"
] | null | null | null |
src/ipyradiant/visualization/explore/interactive_exploration.py
|
lnijhawan/ipyradiant
|
d804e9031ef39c1ea75fedd52d110302c065ad84
|
[
"BSD-3-Clause"
] | null | null | null |
src/ipyradiant/visualization/explore/interactive_exploration.py
|
lnijhawan/ipyradiant
|
d804e9031ef39c1ea75fedd52d110302c065ad84
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2021 ipyradiant contributors.
# Distributed under the terms of the Modified BSD License.
from typing import Union
import ipycytoscape as cyto
import ipywidgets as W
import rdflib
import traitlets as trt
from ipyradiant.query.api import SPARQLQueryFramer
from ipyradiant.rdf2nx.uri_converter import URItoID
DEFAULT_CYTO_STYLE = [
{
"selector": "node",
"css": {
"label": "data(_label)",
"text-wrap": "wrap",
"text-max-width": "150px",
"text-valign": "center",
"text-halign": "center",
"font-size": "10",
"font-family": '"Gill Sans", sans-serif',
"color": "black",
"background-color": "grey",
},
},
{
"selector": "edge[classes='temp-edge']",
"css": {
"label": "data(_label)",
"line-color": "#a8eae5",
},
},
{
"selector": "node.clicked",
"css": {
"background-color": "grey",
"line-color": "black",
"target-arrow-color": "black",
"source-arrow-color": "black",
},
},
{
"selector": "node.temp",
"css": {
"background-color": "#FFB6C1",
"line-color": "black",
"target-arrow-color": "black",
"source-arrow-color": "black",
},
},
{
"selector": "edge.directed",
"style": {
"curve-style": "bezier",
"target-arrow-shape": "triangle",
"line-color": "grey",
# "label": "data(iri)",
"font-size": "5",
},
},
{
"selector": "edge.temp",
"style": {
"curve-style": "bezier",
"line-color": "#a8eae5",
# "label": "data(iri)",
"font-size": "5",
},
},
{"selector": "edge.multiple_edges", "style": {"curve-style": "bezier"}},
]
def add_cyto_class(element: Union[cyto.Node, cyto.Edge], class_addition: str) -> str:
"""Update the classes string for a cytoscape element with an addition
TODO support multiple class additions
:param element: the cytoscape Node/Edge to update classes for
:param class_addition: the class string to add
:return: the class string
"""
try:
classes = set(element.classes.split(" "))
except AttributeError:
classes = set()
classes.add(class_addition)
return " ".join(classes)
def remove_cyto_class(element: Union[cyto.Node, cyto.Edge], class_removal: str) -> str:
"""Update the classes string for a cytoscape element with a removal
TODO support multiple class additions
:param element: the cytoscape Node/Edge to update classes for
:param class_removal: the class string to remove
:return: the class string
"""
try:
classes = set(element.classes.split(" "))
classes.discard(class_removal)
return " ".join(classes)
except AttributeError:
return ""
class GetOutgoingPredicateObjects(SPARQLQueryFramer):
"""
Return all triples for non-Literal objects (and the optional object labels).
"""
sparql = """
SELECT DISTINCT ?s ?p ?o ?label
WHERE {
?s ?p ?o .
FILTER (!isLiteral(?o))
OPTIONAL {?o rdfs:label ?label}
}
"""
# Throughout we assign the layout to self.cytoscape_widget_layout multiple times.
# This is so that the graph refreshes the layout every time nodes are added or removed,
# which provides an optimal viewing experience.
class InteractiveViewer(W.VBox):
expand_button = trt.Instance(W.Button)
undo_button = trt.Instance(W.Button)
remove_temp_nodes_button = trt.Instance(W.Button)
cytoscape_widget = trt.Instance(cyto.CytoscapeWidget)
selected_node = trt.Instance(cyto.Node, allow_none=True)
rdf_graph = trt.Instance(rdflib.graph.Graph, allow_none=True)
cyto_style = trt.List(allow_none=True)
cytoscape_widget_layout = trt.Unicode(default_value="cola")
#
existing_node_ids = []
new_nodes = {}
new_edges = {}
@trt.default("expand_button")
def _create_expand_button(self):
button = W.Button(
description="Expand Upon Selected Node",
layout=W.Layout(width="50%", height="40px"),
)
button.on_click(self.expand_button_clicked)
return button
@trt.default("undo_button")
def _create_undo_button(self):
button = W.Button(
description="Undo Last Expansion",
layout=W.Layout(width="25%", height="40px"),
disabled=True,
)
button.on_click(self.undo_expansion)
return button
@trt.default("remove_temp_nodes_button")
def _create_remove_temp_nodes_button(self):
button = W.Button(
description="Remove Temporary Nodes",
layout=W.Layout(width="25%", height="40px"),
disabled=False,
)
button.on_click(self.remove_temp_nodes)
return button
@trt.default("selected_node")
def _create_default_selected_node(self):
return None
@trt.default("cyto_style")
def _create_cyto_style(self):
return DEFAULT_CYTO_STYLE
@trt.default("rdf_graph")
def _create_rdf_graph(self):
return rdflib.Graph()
@trt.default("cytoscape_widget")
def _create_cytoscape_widget(self):
return cyto.CytoscapeWidget()
@trt.default("layout")
def _create_layout(self):
return W.Layout(width="80%")
@trt.observe("cytoscape_widget")
def update_cytoscape_widget(self, change):
"""Apply settings to cytoscape graph when updating"""
if change.old == change.new:
return
self.cytoscape_widget.set_layout(name=self.cytoscape_widget_layout)
self.cytoscape_widget.set_style(self.cyto_style)
# on is a callback for cytoscape_widget instance (must be set on each instance)
self.cytoscape_widget.on("node", "click", self.log_node_clicks)
# Must set children again so that the changes propagate to the front end
# Ideally, would be done automatically with traits
# https://github.com/jupyrdf/ipyradiant/issues/79
self.children = (
self.cytoscape_widget,
W.HBox(
children=[
self.expand_button,
self.undo_button,
self.remove_temp_nodes_button,
]
),
)
@trt.validate("children")
def validate_children(self, proposal):
"""
Validate method for default children.
This is necessary because @trt.default does not work on children.
"""
children = proposal.value
if not children:
children = (
self.cytoscape_widget,
W.HBox(
children=[
self.expand_button,
self.undo_button,
self.remove_temp_nodes_button,
]
),
)
return children
def get_node(self, node: dict) -> cyto.Node:
"""This function is used to find a node given the id of a node copy"""
for cyto_node in self.cytoscape_widget.graph.nodes:
if cyto_node.data["id"] == node["data"]["id"]:
return cyto_node
# TODO: Make this function return None and log a warning if not node not found.
raise ValueError("Node not found in cytoscape.graph.nodes.")
def log_node_clicks(self, node: dict):
"""
This function works with registering a click on a node.
This will mark the node as selected and change the color of the selected node.
"""
cyto_node = self.get_node(node)
if self.selected_node == cyto_node:
cyto_node.classes = remove_cyto_class(cyto_node, "temp")
cyto_node.classes = add_cyto_class(cyto_node, "clicked")
# NOTE: changes won't propagate to frontend until graph is updated
self.update_cytoscape_frontend()
self.selected_node = cyto_node
def expand_button_clicked(self, button):
"""
This function expands a node by loading in its predicates and subjects when
a node is selected and the expand button is clicked.
"""
self.undo_button.disabled = False
if self.selected_node is None:
return None
new_data = GetOutgoingPredicateObjects.run_query(
graph=self.rdf_graph, s=self.selected_node.data["iri"]
)
objs = new_data["o"].tolist()
preds = new_data["p"].tolist()
labels = new_data["label"].tolist()
self.existing_node_ids = [
node.data["id"] for node in self.cytoscape_widget.graph.nodes
]
self.new_nodes = {
idx: cyto.Node(
data={
"id": str(iri),
"iri": iri,
"_label": labels[idx] or str(iri),
},
classes="temp",
)
for idx, iri in enumerate(objs)
if str(iri) not in self.existing_node_ids
}
self.new_edges = {
idx: cyto.Edge(
data={
"source": self.selected_node.data["id"],
"target": str(iri),
"iri": URItoID(preds[idx]),
},
classes="temp",
)
for idx, iri in enumerate(objs)
}
self.cytoscape_widget.graph.add_nodes(self.new_nodes.values())
self.cytoscape_widget.graph.add_edges(self.new_edges.values())
self.cytoscape_widget.set_layout(name=self.cytoscape_widget_layout)
def undo_expansion(self, button):
"""
Preliminary function for undoing expansions upon a node.
As of right now, a user can only undo the most recent expansion.
Afterwards, the button will be disabled until a new expansion is made.
"""
self.undo_button.disabled = True
for node in self.new_nodes:
self.cytoscape_widget.graph.remove_node_by_id(
self.new_nodes[node].data["id"]
)
for edge in self.new_edges:
try:
self.cytoscape_widget.graph.remove_edge(self.new_edges[edge])
except ValueError:
# edge already removed from graph because the node was removed earlier.
pass
self.cytoscape_widget.set_layout(name=self.cytoscape_widget_layout)
def remove_temp_nodes(self, button):
"""Remove all nodes that have the 'temp' style"""
nodes_to_remove = {
node for node in self.cytoscape_widget.graph.nodes if "temp" in node.classes
}
for node in nodes_to_remove:
self.cytoscape_widget.graph.remove_node(node)
# change edge color
for edge in self.cytoscape_widget.graph.edges:
edge.classes = remove_cyto_class(edge, "temp")
edge.classes = add_cyto_class(edge, "directed")
# NOTE: changes won't propagate to frontend until graph is updated
self.update_cytoscape_frontend()
self.cytoscape_widget.set_layout(name=self.cytoscape_widget_layout)
self.undo_button.disabled = True
def update_cytoscape_frontend(self):
"""A temporary workaround to trigger a frontend refresh"""
self.cytoscape_widget.graph.add_node(cyto.Node(data={"id": "random node"}))
self.cytoscape_widget.graph.remove_node_by_id("random node")
| 32.504178
| 88
| 0.589768
| 8,418
| 0.721399
| 0
| 0
| 2,969
| 0.254435
| 0
| 0
| 3,926
| 0.336447
|
ac64882142c0ccfa449492e6be034f7737d14e85
| 2,165
|
py
|
Python
|
src/main/serialization/codec/object/collectionCodec.py
|
typingtanuki/pyserialization
|
f4a0d9cff08b3a6ce8f83f3a258c4dce1367d151
|
[
"Apache-2.0"
] | null | null | null |
src/main/serialization/codec/object/collectionCodec.py
|
typingtanuki/pyserialization
|
f4a0d9cff08b3a6ce8f83f3a258c4dce1367d151
|
[
"Apache-2.0"
] | null | null | null |
src/main/serialization/codec/object/collectionCodec.py
|
typingtanuki/pyserialization
|
f4a0d9cff08b3a6ce8f83f3a258c4dce1367d151
|
[
"Apache-2.0"
] | null | null | null |
from typing import List, TypeVar
from src.main.serialization.codec.codec import Codec
from src.main.serialization.codec.codecCache import CodecCache
from src.main.serialization.codec.object.noneCodec import NoneCodec
from src.main.serialization.codec.utils.byteIo import ByteIo
from src.main.serialization.codec.utils.bytes import *
T = TypeVar('T')
class CollectionCodec(Codec[List[T]]):
"""
Codec for collections
"""
reserved_byte: bytes
codec_cache: CodecCache
value_codec: Codec[T] or None
def __init__(self,
reserved_byte: bytes,
codec_cache: CodecCache,
value_type: type or None = None):
super().__init__()
self.reserved_byte = reserved_byte
self.codec_cache = codec_cache
if value_type is None:
self.value_codec = None
else:
self.value_codec = codec_cache.codec_for_type(value_type)
def read(self, io: ByteIo) -> List[T] or None:
read: int = from_byte(io.peek())
if read == from_byte(NoneCodec.NONE_VALUE):
return None
size: int = io.read_size(self.reserved_byte)
out: List[any] = []
for i in range(0, size):
codec: Codec = self.value_codec
if codec is None:
codec = self.codec_cache.get(io.peek())
out.append(codec.read(io))
return out
def write(self, io: ByteIo, collection: List[T]) -> None:
if collection is None:
io.write(NoneCodec.NONE_VALUE)
return
io.write_size(len(collection), self.reserved_byte)
for value in collection:
codec: Codec = self.value_codec
if codec is None:
codec = self.codec_cache.codec_for(value)
codec.write(io, value)
def reserved_bytes(self) -> [bytes]:
reserved_int: int = from_byte(self.reserved_byte)
return [to_byte(reserved_int),
to_byte(reserved_int + 1),
to_byte(reserved_int + 2),
to_byte(reserved_int + 3)]
def writes(self, typez: type) -> bool:
return False
| 29.657534
| 69
| 0.611085
| 1,810
| 0.836028
| 0
| 0
| 0
| 0
| 0
| 0
| 40
| 0.018476
|
ac64c1d7463ad68a50c3cf1fa6beb3067354a863
| 3,049
|
py
|
Python
|
pyatv/__init__.py
|
acheronfail/pyatv
|
9cb96ffcc49938c4b43c92b7b40ddcecae37e732
|
[
"MIT"
] | null | null | null |
pyatv/__init__.py
|
acheronfail/pyatv
|
9cb96ffcc49938c4b43c92b7b40ddcecae37e732
|
[
"MIT"
] | 128
|
2020-04-24T06:42:29.000Z
|
2021-02-19T11:34:20.000Z
|
pyatv/__init__.py
|
acheronfail/pyatv
|
9cb96ffcc49938c4b43c92b7b40ddcecae37e732
|
[
"MIT"
] | null | null | null |
"""Main routines for interacting with an Apple TV."""
import asyncio
import datetime # noqa
from ipaddress import IPv4Address
from typing import List
import aiohttp
from pyatv import conf, exceptions, interface
from pyatv.airplay import AirPlayStreamAPI
from pyatv.const import Protocol
from pyatv.dmap import DmapAppleTV
from pyatv.dmap.pairing import DmapPairingHandler
from pyatv.mrp import MrpAppleTV
from pyatv.mrp.pairing import MrpPairingHandler
from pyatv.airplay.pairing import AirPlayPairingHandler
from pyatv.support import net
from pyatv.support.scan import BaseScanner, UnicastMdnsScanner, MulticastMdnsScanner
async def scan(
loop: asyncio.AbstractEventLoop,
timeout: int = 5,
identifier: str = None,
protocol: Protocol = None,
hosts: List[str] = None,
) -> List[conf.AppleTV]:
"""Scan for Apple TVs on network and return their configurations."""
def _should_include(atv):
if not atv.ready:
return False
if identifier and identifier not in atv.all_identifiers:
return False
if protocol and atv.get_service(protocol) is None:
return False
return True
scanner: BaseScanner
if hosts:
scanner = UnicastMdnsScanner([IPv4Address(host) for host in hosts], loop)
else:
scanner = MulticastMdnsScanner(loop, identifier)
devices = (await scanner.discover(timeout)).values()
return [device for device in devices if _should_include(device)]
async def connect(
config: conf.AppleTV,
loop: asyncio.AbstractEventLoop,
protocol: Protocol = None,
session: aiohttp.ClientSession = None,
) -> interface.AppleTV:
"""Connect to a device based on a configuration."""
if config.identifier is None:
raise exceptions.DeviceIdMissingError("no device identifier")
service = config.main_service(protocol=protocol)
implementation = {Protocol.DMAP: DmapAppleTV, Protocol.MRP: MrpAppleTV}.get(
service.protocol
)
if not implementation:
raise exceptions.UnsupportedProtocolError(str(service.protocol))
# AirPlay stream API is the same for both DMAP and MRP
airplay = AirPlayStreamAPI(config, loop)
atv = implementation(loop, await net.create_session(session), config, airplay)
await atv.connect()
return atv
async def pair(
config: conf.AppleTV,
protocol: Protocol,
loop: asyncio.AbstractEventLoop,
session: aiohttp.ClientSession = None,
**kwargs
):
"""Pair a protocol for an Apple TV."""
service = config.get_service(protocol)
if not service:
raise exceptions.NoServiceError(
"no service available for protocol " + str(protocol)
)
handler = {
Protocol.DMAP: DmapPairingHandler,
Protocol.MRP: MrpPairingHandler,
Protocol.AirPlay: AirPlayPairingHandler,
}.get(protocol)
if handler is None:
raise exceptions.UnsupportedProtocolError(str(protocol))
return handler(config, await net.create_session(session), loop, **kwargs)
| 29.317308
| 84
| 0.711053
| 0
| 0
| 0
| 0
| 0
| 0
| 2,412
| 0.791079
| 328
| 0.107576
|
ac65e8637b6048418cfca104d86483ce0041387d
| 1,323
|
py
|
Python
|
demessifyme/file_read_write.py
|
lilianluong/demessifyme
|
7b90611316a4fc723fe38af8fe6e1ee4209e8fd2
|
[
"MIT"
] | null | null | null |
demessifyme/file_read_write.py
|
lilianluong/demessifyme
|
7b90611316a4fc723fe38af8fe6e1ee4209e8fd2
|
[
"MIT"
] | null | null | null |
demessifyme/file_read_write.py
|
lilianluong/demessifyme
|
7b90611316a4fc723fe38af8fe6e1ee4209e8fd2
|
[
"MIT"
] | 1
|
2020-10-10T11:13:37.000Z
|
2020-10-10T11:13:37.000Z
|
import glob
import os
from doc2vec import read_file, embed_document
def get_file_embeddings():
# Returns a list of names in list files.
txt_files = glob.glob('**/*.txt', recursive=True)
pdf_files = glob.glob('**/*.pdf', recursive=True)
docx_files = glob.glob('**/*.docx', recursive=True)
file_names = txt_files + pdf_files + docx_files
print("Retrieved files:")
for filename in file_names:
print(filename)
vector_list = []
for file in file_names:
v = embed_document(read_file(file))
vector_list.append(v)
print("Processing files...")
return file_names, vector_list
def write_folders(named_folders):
for folder_name, folder in named_folders.items():
if not len(folder): continue
directory = folder_name
for file in folder:
new_path = os.path.join(os.path.dirname(os.path.abspath(file)),
directory)
if not os.path.exists(new_path):
os.mkdir(new_path)
# print(os.path.join(new_path, os.path.basename(os.path.abspath(file))))
os.rename(file, os.path.join(new_path,
os.path.basename(os.path.abspath(file))))
print(f"Moved {len(folder)} files to folder named {folder_name}.")
| 33.075
| 84
| 0.615268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 241
| 0.182162
|
ac665b07df9871fa56973cfc475e0b3e944d2fc8
| 8,902
|
py
|
Python
|
util/third_party/tensorflow_extra/tool/tflite/tflite.py
|
bojanpotocnik/gecko_sdk
|
9e70b13fc4701459c5f8a8f5e8918ec3f5ea8903
|
[
"Zlib"
] | 82
|
2016-06-29T17:24:43.000Z
|
2021-04-16T06:49:17.000Z
|
util/third_party/tensorflow_extra/tool/tflite/tflite.py
|
bojanpotocnik/gecko_sdk
|
9e70b13fc4701459c5f8a8f5e8918ec3f5ea8903
|
[
"Zlib"
] | 2
|
2017-02-13T10:07:17.000Z
|
2017-03-22T21:28:26.000Z
|
util/third_party/tensorflow_extra/tool/tflite/tflite.py
|
bojanpotocnik/gecko_sdk
|
9e70b13fc4701459c5f8a8f5e8918ec3f5ea8903
|
[
"Zlib"
] | 56
|
2016-08-02T10:50:50.000Z
|
2021-07-19T08:57:34.000Z
|
#!/usr/bin/env python3
import sys
import os
import argparse
from string import Template
import re
# Patch site-packages to find numpy
import jinja2
if sys.platform.startswith("win"):
site_packages_path = os.path.abspath(os.path.join(os.path.dirname(jinja2.__file__), "../../../ext-site-packages"))
else:
site_packages_path = os.path.abspath(os.path.join(os.path.dirname(jinja2.__file__), "../../../../ext-site-packages"))
if os.path.exists(site_packages_path):
if site_packages_path not in sys.path:
sys.path.insert(0, site_packages_path)
"""
Generation of parameter files requires the tflite_model, tflite_model_parameters
and tensorflow_lite_support packages. Because these packages are not installed
in the uc-generation environment where this python script will be run, these
packages are supplied as source. tflite_model and tflite_model_parameters were
fetched from internal repos, while the tensorflow_lite_support was fetched from
https://github.com/tensorflow/tflite-support.
"""
import tflite.Model
from tflite_model import TfliteModel
from tflite_model_parameters import TfliteModelParameters
template_model_h = """// Auto-generated serialization of TFLite flatbuffers in config directory
#ifndef SL_TFLITE_MICRO_MODEL_H
#define SL_TFLITE_MICRO_MODEL_H
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
${data}
extern const uint8_t *default_model_array;
extern const uint32_t default_model_len;
#ifdef __cplusplus
}
#endif
#endif // SL_TFLITE_MICRO_MODEL_H
"""
template_model_h_single ="""
extern const uint8_t ${model_name}_array[];
extern const uint32_t ${model_name}_len;
"""
template_model_c = """// Auto-generated serialization of TFLite flatbuffers in config directory
#include "em_device.h"
#include "sl_tflite_micro_model.h"
${data}
const uint8_t *default_model_array = ${model_name}_array;
const uint32_t default_model_len = ${data_len}UL;
"""
template_model_c_single = """
const uint8_t ${model_name}_array[] __ALIGNED(4) = {
${data}
};
const uint32_t ${model_name}_len = ${data_len}UL;
"""
template_opcode_resolver_h = """// Auto-generated macro to instanciate and initialize opcode resolver based on TFLite flatbuffers in config directory
#ifndef SL_TFLITE_MICRO_OPCODE_RESOLVER_H
#define SL_TFLITE_MICRO_OPCODE_RESOLVER_H
#define SL_TFLITE_MICRO_OPCODE_RESOLVER(opcode_resolver, error_reporter) \\
static tflite::MicroMutableOpResolver<${data_len}> opcode_resolver(error_reporter); \\
${data}
#endif // SL_TFLITE_MICRO_OPCODE_RESOLVER_H
"""
template_model_parameter_single = """#define SL_${model_name}_${config_key} ${config_val}
"""
template_model_default_parameter_single = """#define SL_DEFAULT_MODEL_${config_key} SL_${model_name}_${config_key}
"""
template_model_parameters_h = """// Auto-generated parameters from TFLite flatbuffers in config directory
#ifndef SL_TFLITE_MICRO_MODEL_PARAMETERS_H
#define SL_TFLITE_MICRO_MODEL_PARAMETERS_H
${data}
#endif // SL_TFLITE_MICRO_MODEL_PARAMETERS_H
"""
"""
The following dictionary has been created using the BuiltinOperator enum defining operatior values, see schema_generated.h, and
function names defined in the MicroMutableOpResolver object, see micro_mutable_op_resolver.h.
"""
opcode_dict = {
101: 'AddAbs',
0: 'AddAdd',
106: 'AddAddN',
56: 'AddArgMax',
79: 'AddArgMin',
1: 'AddAveragePool2D',
104: 'AddCeil',
2: 'AddConcatenation',
3: 'AddConv2D',
108: 'AddCos',
4: 'AddDepthwiseConv2D',
6: 'AddDequantize',
71: 'AddEqual',
8: 'AddFloor',
9: 'AddFullyConnected',
61: 'AddGreater',
62: 'AddGreaterEqual',
117: 'AddHardSwish',
11: 'AddL2Normalization',
58: 'AddLess',
63: 'AddLessEqual',
73: 'AddLog',
86: 'AddLogicalAnd',
87: 'AddLogicalNot',
84: 'AddLogicalOr',
14: 'AddLogistic',
55: 'AddMaximum',
17: 'AddMaxPool2D',
40: 'AddMean',
57: 'AddMinimum',
18: 'AddMul',
59: 'AddNeg',
73: 'AddNotEqual',
83: 'AddPack',
34: 'AddPad',
60: 'AddPadV2',
54: 'AddPrelu',
114: 'AddQuantize',
82: 'AddReduceMax',
19: 'AddRelu',
21: 'AddRelu6',
22: 'AddReshape',
97: 'AddResizeNearestNeighbor',
116: 'AddRound',
76: 'AddRsqrt',
77: 'AddShape',
66: 'AddSin',
25: 'AddSoftmax',
47: 'AddSplit',
102: 'AddSplitV',
75: 'AddSqrt',
92: 'AddSquare',
45: 'AddStridedSlice',
41: 'AddSub',
27: 'AddSvdf',
28: 'AddTanh',
67: 'AddTransposeConv',
88: 'AddUnpack'
}
def sanitize_filename(name):
# Strip invalid characters
name = re.sub(r'[^a-zA-Z0-9_]', '', name)
# C variables can't start with a number
name = name.lstrip('0123456789')
return name
def find_tflite_files(input_dir):
for f in os.listdir(input_dir):
if os.path.splitext(f)[-1] == '.tflite':
with open(os.path.join(input_dir, f), 'rb') as fd:
data = fd.read()
filename = sanitize_filename(os.path.splitext(f)[0])
yield filename, data
def generate_c_array(buf):
arr = ''
for i, ch in enumerate(buf):
if (i % 12) == 0:
arr += '\n '
arr += '0x{:02x}, '.format(ch)
return arr.lstrip().rstrip(', ')
def opcode_parse_opcode(opcode):
if opcode.CustomCode() != None:
opcode_val = opcode.CustomCode()
else:
opcode_val = opcode.BuiltinCode()
if opcode_val in opcode_dict.keys():
opcode_func = opcode_dict[opcode_val]
opcode_entry = {opcode_val: opcode_func}
else:
print(f"tflite.py WARNING: An unknown operator with code value={opcode_val} has been discovered. It will not be automatic initialized.")
opcode_entry = {-1: "UndefinedOp"}
return opcode_entry
def opcode_parse_model(model):
opcodes = {}
for index in range(model.OperatorCodesLength()):
opcode = model.OperatorCodes(index)
opcodes.update(opcode_parse_opcode(opcode))
return opcodes
def generate_files(input_dir, output_dir):
tc = Template(template_model_c_single)
th = Template(template_model_h_single)
data_c = ''
data_h = ''
parameter_defines = ''
opcodes = {}
for model_name, buf in find_tflite_files(input_dir):
props = {
'model_name': model_name,
'data': generate_c_array(buf),
'data_len': len(buf),
}
data_c += tc.substitute(**props)
data_h += th.substitute(**props)
model = tflite.Model.Model.GetRootAsModel(buf)
opcodes.update(opcode_parse_model(model))
# Extract model parameters
try:
loaded_model_params = TfliteModelParameters.load_from_tflite_flatbuffer(buf)
except:
loaded_model_params = {}
if loaded_model_params:
param_define_t = Template(template_model_parameter_single)
default_define_t = Template(template_model_default_parameter_single)
parameter_defines += f'// Definitions generated from {model_name}.tflite\n'
default_model_defines = f'// Default model parameters\n'
for key, value in sorted(loaded_model_params.items()):
# Ensure valid C code:
if type(value) == str:
value = f'"{value}"'
elif type(value) == bool:
value = str(value).lower()
props = {
'model_name': model_name.upper(),
'config_key': key.replace('.', '_').upper(),
'config_val': value,
}
parameter_defines += param_define_t.substitute(**props)
default_model_defines += default_define_t.substitute(**props)
parameter_defines += '\n'
tc = Template(template_model_c)
with open(os.path.join(output_dir, 'sl_tflite_micro_model.c'), 'w') as fd:
fd.write(tc.substitute(data=data_c, model_name=model_name, data_len=len(buf)))
th = Template(template_model_h)
with open(os.path.join(output_dir, 'sl_tflite_micro_model.h'), 'w') as fd:
fd.write(th.substitute(data=data_h))
tm = Template(template_opcode_resolver_h)
opcode_len = len(opcodes)
opcode_str = ''
# Only emit this file if model parameters are available
if parameter_defines:
tp = Template(template_model_parameters_h)
with open(os.path.join(output_dir, 'sl_tflite_micro_model_parameters.h'), 'w') as fd:
fd.write(tp.substitute(data=(parameter_defines + default_model_defines)))
for opcode_key in opcodes.keys():
if opcode_key != 32: # CUSTOM opcode
opcode_str += f"opcode_resolver.{opcodes[opcode_key]}(); \\\n"
with open(os.path.join(output_dir, 'sl_tflite_micro_opcode_resolver.h'), 'w') as fd:
fd.write(tm.substitute({'data_len':str(opcode_len), 'data':opcode_str}))
def entry():
parser = argparse.ArgumentParser(description='TensorFlow Lite flatbuffer to C converter.')
parser.add_argument('-i', required=True, help='Input directory containing .tflite files')
parser.add_argument('-o', required=True, help='Output directory to populate with serialized content.')
args = parser.parse_args()
generate_files(args.i, args.o)
entry()
| 30.909722
| 149
| 0.702089
| 0
| 0
| 281
| 0.031566
| 0
| 0
| 0
| 0
| 4,025
| 0.452146
|
ac66fd2ca8108c50a826f92ddb5befe5db26ac80
| 3,290
|
py
|
Python
|
braintree/transparent_redirect_gateway.py
|
eldarion/braintree_python
|
8be3f69fb9a4171c5e9be049c8440fcc4f79fb40
|
[
"MIT"
] | 3
|
2015-11-05T08:57:12.000Z
|
2016-07-17T18:10:55.000Z
|
braintree/transparent_redirect_gateway.py
|
eldarion/braintree_python
|
8be3f69fb9a4171c5e9be049c8440fcc4f79fb40
|
[
"MIT"
] | null | null | null |
braintree/transparent_redirect_gateway.py
|
eldarion/braintree_python
|
8be3f69fb9a4171c5e9be049c8440fcc4f79fb40
|
[
"MIT"
] | null | null | null |
import cgi
from datetime import datetime
import urllib
import braintree
from braintree.util.crypto import Crypto
from braintree.error_result import ErrorResult
from braintree.exceptions.forged_query_string_error import ForgedQueryStringError
from braintree.util.http import Http
from braintree.successful_result import SuccessfulResult
from braintree.transparent_redirect import TransparentRedirect
class TransparentRedirectGateway(object):
def __init__(self, gateway):
self.gateway = gateway
self.config = gateway.config
def confirm(self, query_string):
"""
Confirms a transparent redirect request. It expects the query string from the
redirect request. The query string should _not_ include the leading "?" character. ::
result = braintree.TransparentRedirect.confirm("foo=bar&id=12345")
"""
parsed_query_string = self._parse_and_validate_query_string(query_string)
confirmation_gateway = {
TransparentRedirect.Kind.CreateCustomer: "customer",
TransparentRedirect.Kind.UpdateCustomer: "customer",
TransparentRedirect.Kind.CreatePaymentMethod: "credit_card",
TransparentRedirect.Kind.UpdatePaymentMethod: "credit_card",
TransparentRedirect.Kind.CreateTransaction: "transaction"
}[parsed_query_string["kind"][0]]
return getattr(self.gateway, confirmation_gateway)._post("/transparent_redirect_requests/" + parsed_query_string["id"][0] + "/confirm")
def tr_data(self, data, redirect_url):
data = self.__flatten_dictionary(data)
date_string = datetime.utcnow().strftime("%Y%m%d%H%M%S")
data["time"] = date_string
data["redirect_url"] = redirect_url
data["public_key"] = self.config.public_key
data["api_version"] = self.config.api_version()
tr_content = urllib.urlencode(data)
tr_hash = Crypto.hmac_hash(self.config.private_key, tr_content)
return tr_hash + "|" + tr_content
def url(self):
"""
Returns the url for POSTing Transparent Redirect HTML forms
"""
return self.config.base_merchant_url() + "/transparent_redirect_requests"
def _parse_and_validate_query_string(self, query_string):
query_params = cgi.parse_qs(query_string)
http_status = int(query_params["http_status"][0])
message = query_params.get("bt_message")
if message != None:
message = message[0]
if Http.is_error_status(http_status):
Http.raise_exception_from_status(http_status, message)
if not self._is_valid_tr_query_string(query_string):
raise ForgedQueryStringError
return query_params
def _is_valid_tr_query_string(self, query_string):
content, hash = query_string.split("&hash=")
return hash == Crypto.hmac_hash(self.config.private_key, content)
def __flatten_dictionary(self, params, parent=None):
data = {}
for key, val in params.iteritems():
full_key = parent + "[" + key + "]" if parent else key
if isinstance(val, dict):
data.update(self.__flatten_dictionary(val, full_key))
else:
data[full_key] = val
return data
| 40.121951
| 143
| 0.687234
| 2,888
| 0.877812
| 0
| 0
| 0
| 0
| 0
| 0
| 603
| 0.183283
|
ac67224e0a480ab178264f670f037b9c677d4fdc
| 358
|
py
|
Python
|
paint/migrations/0007_auto_20200405_1748.py
|
atulk17/Paint-App
|
4b56455596d140cee4a9b19c71fe82364c3f3b7c
|
[
"BSD-2-Clause"
] | null | null | null |
paint/migrations/0007_auto_20200405_1748.py
|
atulk17/Paint-App
|
4b56455596d140cee4a9b19c71fe82364c3f3b7c
|
[
"BSD-2-Clause"
] | null | null | null |
paint/migrations/0007_auto_20200405_1748.py
|
atulk17/Paint-App
|
4b56455596d140cee4a9b19c71fe82364c3f3b7c
|
[
"BSD-2-Clause"
] | 1
|
2020-05-31T11:37:48.000Z
|
2020-05-31T11:37:48.000Z
|
# Generated by Django 3.0.4 on 2020-04-05 12:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('paint', '0006_auto_20200405_1746'),
]
operations = [
migrations.AlterModelTable(
name='office_expense',
table='Office_Expense',
),
]
| 19.888889
| 48
| 0.578212
| 267
| 0.74581
| 0
| 0
| 0
| 0
| 0
| 0
| 112
| 0.312849
|
ac681a8ffae94d52efd701e9160788c29c5b6e8c
| 72
|
py
|
Python
|
app.py
|
ManarAbdelkarim/-hr-managmentSystem
|
22d2ea340824c9533576c3e7c96296f443d7bf50
|
[
"MIT"
] | null | null | null |
app.py
|
ManarAbdelkarim/-hr-managmentSystem
|
22d2ea340824c9533576c3e7c96296f443d7bf50
|
[
"MIT"
] | null | null | null |
app.py
|
ManarAbdelkarim/-hr-managmentSystem
|
22d2ea340824c9533576c3e7c96296f443d7bf50
|
[
"MIT"
] | null | null | null |
from flask import Flask
app = Flask(__name__,static_folder='../static')
| 36
| 47
| 0.763889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.152778
|
ac68c7b7a0f74f060897b04d3c95265a4c3918fb
| 4,412
|
py
|
Python
|
esprit/models.py
|
Ken125pig/esprit
|
6fc6b24450627077f1d38145e14765f776feba8b
|
[
"Apache-2.0"
] | null | null | null |
esprit/models.py
|
Ken125pig/esprit
|
6fc6b24450627077f1d38145e14765f776feba8b
|
[
"Apache-2.0"
] | 6
|
2016-02-04T12:08:40.000Z
|
2017-12-08T15:48:35.000Z
|
esprit/models.py
|
Ken125pig/esprit
|
6fc6b24450627077f1d38145e14765f776feba8b
|
[
"Apache-2.0"
] | 4
|
2016-09-09T11:01:27.000Z
|
2021-11-21T05:49:14.000Z
|
from copy import deepcopy
import string
from esprit import versions
unicode_punctuation_map = dict((ord(char), None) for char in string.punctuation)
class Query(object):
def __init__(self, raw=None):
self.q = QueryBuilder.match_all() if raw is None else raw
if "query" not in self.q:
self.q["query"] = {"match_all": {}}
def query_string(self, s, op=None, must=False, should=False):
self.clear_match_all()
qs = {"query": s}
if op is not None:
qs["default_operator"] = op
if must:
self.add_must()
self.q["bool"]["must"].append(qs)
elif should:
self.add_should()
self.q["bool"]["must"].append(qs)
else:
self.q["query"]["query_string"] = qs
def add_should(self):
if "bool" not in self.q["query"]:
self.q["query"]["bool"] = {}
if "should" not in self.q["query"]["bool"]:
self.q["query"]["bool"]["should"] = []
def add_must(self):
if "bool" not in self.q["query"]:
self.q["query"]["bool"] = {}
if "must" not in self.q["query"]["bool"]:
self.q["query"]["bool"]["must"] = []
def clear_match_all(self):
if "match_all" in self.q["query"]:
del self.q["query"]["match_all"]
def include_source(self, values, es_version="0.90.13"):
if "_source" not in self.q:
self.q["_source"] = {}
if versions.source_include(es_version):
self.q["_source"]["include"] = values
else:
self.q["_source"]["includes"] = values
def set_source(self, values):
self.q["_source"] = values
def as_dict(self):
return self.q
class QueryBuilder(object):
_match_all = {"query": {"match_all": {}}}
_query_string = {"query": {"query_string": {"query": "<query string>"}}}
_term = {"query": {"term": {}}} # term : {"<key>" : "<value>"}
_terms_filter = {"query": {"filtered": {"filter": {"terms": {}}}}} # terms : {"<key>" : ["<value>"]}
_term_filter = {"query": {"filtered": {"filter": {"term": {}}}}} # terms : {"<key>" : "<value>"}
_fields_constraint = {"fields": []}
_special_chars = ["+", "-", "&&", "||", "!", "(", ")", "{", "}", "[", "]", "^", '"', "~", "*", "?", ":", "/"]
_escape_char = "\\" # which is a special special character too!
@classmethod
def match_all(cls):
return deepcopy(cls._match_all)
@classmethod
def query_string(cls, query):
q = deepcopy(cls._query_string)
q["query"]["query_string"]["query"] = query
return q
@classmethod
def term(cls, key, value):
q = deepcopy(cls._term)
q["query"]["term"][key] = value
return q
@classmethod
def term_filter(cls, key, value):
q = deepcopy(cls._term_filter)
q["query"]["filtered"]["filter"]["term"][key] = value
return q
@classmethod
def terms_filter(cls, key, values):
if not isinstance(values, list):
values = [values]
q = deepcopy(cls._terms_filter)
q["query"]["filtered"]["filter"]["terms"][key] = values
return q
@classmethod
def fields(cls, query, fields=None):
fields = [] if fields is None else fields if isinstance(fields, list) else [fields]
fc = deepcopy(cls._fields_constraint)
fc["fields"] = fields
query.update(fc)
return query
@classmethod
def tokenise(cls, text_string):
# FIXME: note that we don't do anything about stopwords right now.
out = text_string
if type(text_string) == "str":
out = text_string.translate(str.maketrans("", ""), string.punctuation)
elif type(text_string) == "unicode":
out = text_string.translate(unicode_punctuation_map)
return list(set([o.lower() for o in out.split(" ") if o != ""]))
@classmethod
def escape(cls, query_string):
qs = query_string.replace(cls._escape_char, cls._escape_char + cls._escape_char) # escape the escape char
for sc in cls._special_chars:
qs = qs.replace(sc, cls._escape_char + sc)
return qs
| 34.46875
| 120
| 0.529465
| 4,256
| 0.964642
| 0
| 0
| 1,776
| 0.402539
| 0
| 0
| 904
| 0.204896
|
ac6bcb9638b71ebbb75437e8984e9150ca824759
| 2,074
|
py
|
Python
|
src/focus/api.py
|
RogerRueegg/lvw-young-talents
|
baf8490230230fffb232a13eb641b55ede29a710
|
[
"MIT"
] | 1
|
2018-02-13T08:09:02.000Z
|
2018-02-13T08:09:02.000Z
|
src/focus/api.py
|
RogerRueegg/lvw-young-talents
|
baf8490230230fffb232a13eb641b55ede29a710
|
[
"MIT"
] | null | null | null |
src/focus/api.py
|
RogerRueegg/lvw-young-talents
|
baf8490230230fffb232a13eb641b55ede29a710
|
[
"MIT"
] | null | null | null |
from . import models
from . import serializers
from rest_framework import viewsets, permissions
class CompetitionViewSet(viewsets.ModelViewSet):
"""ViewSet for the Competition class"""
queryset = models.Competition.objects.all()
serializer_class = serializers.CompetitionSerializer
permission_classes = [permissions.IsAuthenticated]
class TrainingViewSet(viewsets.ModelViewSet):
"""ViewSet for the Training class"""
queryset = models.Training.objects.all()
serializer_class = serializers.TrainingSerializer
permission_classes = [permissions.IsAuthenticated]
class CompetitorViewSet(viewsets.ModelViewSet):
"""ViewSet for the Competitor class"""
queryset = models.Competitor.objects.all()
serializer_class = serializers.CompetitorSerializer
permission_classes = [permissions.IsAuthenticated]
class TrainingpresenceViewSet(viewsets.ModelViewSet):
"""ViewSet for the Trainingpresence class"""
queryset = models.Trainingpresence.objects.all()
serializer_class = serializers.TrainingpresenceSerializer
permission_classes = [permissions.IsAuthenticated]
class DriverViewSet(viewsets.ModelViewSet):
"""ViewSet for the Driver class"""
queryset = models.Driver.objects.all()
serializer_class = serializers.DriverSerializer
permission_classes = [permissions.IsAuthenticated]
class EventViewSet(viewsets.ModelViewSet):
"""ViewSet for the Event class"""
queryset = models.Event.objects.all()
serializer_class = serializers.EventSerializer
permission_classes = [permissions.IsAuthenticated]
class ResultViewSet(viewsets.ModelViewSet):
"""ViewSet for the Result class"""
queryset = models.Result.objects.all()
serializer_class = serializers.ResultSerializer
permission_classes = [permissions.IsAuthenticated]
class LocationViewSet(viewsets.ModelViewSet):
"""ViewSet for the Location class"""
queryset = models.Location.objects.all()
serializer_class = serializers.LocationSerializer
permission_classes = [permissions.IsAuthenticated]
| 29.628571
| 61
| 0.771456
| 1,952
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 294
| 0.141755
|
ac6bff0ef28a6f43b86268b434e114b49de8f3f4
| 1,273
|
py
|
Python
|
algorithms/array/majority_element.py
|
kevinshenyang07/Data-Structure-and-Algo
|
36b02feea04b892f1256de090c4fcf7b6aa98873
|
[
"MIT"
] | null | null | null |
algorithms/array/majority_element.py
|
kevinshenyang07/Data-Structure-and-Algo
|
36b02feea04b892f1256de090c4fcf7b6aa98873
|
[
"MIT"
] | null | null | null |
algorithms/array/majority_element.py
|
kevinshenyang07/Data-Structure-and-Algo
|
36b02feea04b892f1256de090c4fcf7b6aa98873
|
[
"MIT"
] | null | null | null |
# Moore Voting
# Majority Element
# Given an array of size n, find the majority element.
# The majority element is the element that appears more than ⌊ n/2 ⌋ times.
# assume at leat one element
class Solution(object):
def majorityElement(self, nums):
count, major_num = 0, nums[0]
for num in nums:
if num == major_num:
count += 1
elif count == 0:
count, major_num = 1, num
else:
count -= 1
return major_num
# Majority Element II
class Solution(object):
def majorityElement(self, nums):
if not nums: return []
count1 = count2 = 0
major1 = major2 = nums[0]
for num in nums:
if num == major1:
count1 += 1
elif num == major2:
count2 += 1
elif count1 == 0:
count1, major1 = 1, num
elif count2 == 0:
count2, major2 = 1, num
else:
count1 -= 1
count2 -= 1
res = []
if nums.count(major1) > len(nums) / 3:
res.append(major1)
if major2 != major1 and nums.count(major2) > len(nums) / 3:
res.append(major2)
return res
| 25.979592
| 75
| 0.495679
| 1,052
| 0.823806
| 0
| 0
| 0
| 0
| 0
| 0
| 214
| 0.16758
|
ac6d9ecff3da360afd64ff653a18f8313213ea89
| 13,844
|
py
|
Python
|
evmosproto/evmos/incentives/v1/incentives_pb2.py
|
hanchon-live/evmosproto
|
141f336cf027a88c5bf227ab49069dd1cf2e4853
|
[
"MIT"
] | null | null | null |
evmosproto/evmos/incentives/v1/incentives_pb2.py
|
hanchon-live/evmosproto
|
141f336cf027a88c5bf227ab49069dd1cf2e4853
|
[
"MIT"
] | null | null | null |
evmosproto/evmos/incentives/v1/incentives_pb2.py
|
hanchon-live/evmosproto
|
141f336cf027a88c5bf227ab49069dd1cf2e4853
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: evmos/incentives/v1/incentives.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from evmosproto.gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from evmosproto.cosmos.base.v1beta1 import coin_pb2 as cosmos_dot_base_dot_v1beta1_dot_coin__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='evmos/incentives/v1/incentives.proto',
package='evmos.incentives.v1',
syntax='proto3',
serialized_options=b'Z+github.com/tharsis/evmos/x/incentives/types',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n$evmos/incentives/v1/incentives.proto\x12\x13\x65vmos.incentives.v1\x1a\x14gogoproto/gogo.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1e\x63osmos/base/v1beta1/coin.proto\"\xe2\x01\n\tIncentive\x12\x10\n\x08\x63ontract\x18\x01 \x01(\t\x12\x66\n\x0b\x61llocations\x18\x02 \x03(\x0b\x32\x1c.cosmos.base.v1beta1.DecCoinB3\xc8\xde\x1f\x00\xaa\xdf\x1f+github.com/cosmos/cosmos-sdk/types.DecCoins\x12\x0e\n\x06\x65pochs\x18\x03 \x01(\r\x12\x38\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x08\x90\xdf\x1f\x01\xc8\xde\x1f\x00\x12\x11\n\ttotal_gas\x18\x05 \x01(\x04\"I\n\x08GasMeter\x12\x10\n\x08\x63ontract\x18\x01 \x01(\t\x12\x13\n\x0bparticipant\x18\x02 \x01(\t\x12\x16\n\x0e\x63umulative_gas\x18\x03 \x01(\x04\"\xcf\x01\n\x19RegisterIncentiveProposal\x12\r\n\x05title\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x10\n\x08\x63ontract\x18\x03 \x01(\t\x12\x66\n\x0b\x61llocations\x18\x04 \x03(\x0b\x32\x1c.cosmos.base.v1beta1.DecCoinB3\xc8\xde\x1f\x00\xaa\xdf\x1f+github.com/cosmos/cosmos-sdk/types.DecCoins\x12\x0e\n\x06\x65pochs\x18\x05 \x01(\r:\x04\xe8\xa0\x1f\x00\"U\n\x17\x43\x61ncelIncentiveProposal\x12\r\n\x05title\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x10\n\x08\x63ontract\x18\x03 \x01(\t:\x04\xe8\xa0\x1f\x00\x42-Z+github.com/tharsis/evmos/x/incentives/typesb\x06proto3'
,
dependencies=[gogoproto_dot_gogo__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,cosmos_dot_base_dot_v1beta1_dot_coin__pb2.DESCRIPTOR,])
_INCENTIVE = _descriptor.Descriptor(
name='Incentive',
full_name='evmos.incentives.v1.Incentive',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='contract', full_name='evmos.incentives.v1.Incentive.contract', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='allocations', full_name='evmos.incentives.v1.Incentive.allocations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000\252\337\037+github.com/cosmos/cosmos-sdk/types.DecCoins', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='epochs', full_name='evmos.incentives.v1.Incentive.epochs', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='start_time', full_name='evmos.incentives.v1.Incentive.start_time', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\220\337\037\001\310\336\037\000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_gas', full_name='evmos.incentives.v1.Incentive.total_gas', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=149,
serialized_end=375,
)
_GASMETER = _descriptor.Descriptor(
name='GasMeter',
full_name='evmos.incentives.v1.GasMeter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='contract', full_name='evmos.incentives.v1.GasMeter.contract', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='participant', full_name='evmos.incentives.v1.GasMeter.participant', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cumulative_gas', full_name='evmos.incentives.v1.GasMeter.cumulative_gas', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=377,
serialized_end=450,
)
_REGISTERINCENTIVEPROPOSAL = _descriptor.Descriptor(
name='RegisterIncentiveProposal',
full_name='evmos.incentives.v1.RegisterIncentiveProposal',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='title', full_name='evmos.incentives.v1.RegisterIncentiveProposal.title', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='evmos.incentives.v1.RegisterIncentiveProposal.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='contract', full_name='evmos.incentives.v1.RegisterIncentiveProposal.contract', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='allocations', full_name='evmos.incentives.v1.RegisterIncentiveProposal.allocations', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000\252\337\037+github.com/cosmos/cosmos-sdk/types.DecCoins', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='epochs', full_name='evmos.incentives.v1.RegisterIncentiveProposal.epochs', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\350\240\037\000',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=453,
serialized_end=660,
)
_CANCELINCENTIVEPROPOSAL = _descriptor.Descriptor(
name='CancelIncentiveProposal',
full_name='evmos.incentives.v1.CancelIncentiveProposal',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='title', full_name='evmos.incentives.v1.CancelIncentiveProposal.title', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='evmos.incentives.v1.CancelIncentiveProposal.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='contract', full_name='evmos.incentives.v1.CancelIncentiveProposal.contract', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\350\240\037\000',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=662,
serialized_end=747,
)
_INCENTIVE.fields_by_name['allocations'].message_type = cosmos_dot_base_dot_v1beta1_dot_coin__pb2._DECCOIN
_INCENTIVE.fields_by_name['start_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_REGISTERINCENTIVEPROPOSAL.fields_by_name['allocations'].message_type = cosmos_dot_base_dot_v1beta1_dot_coin__pb2._DECCOIN
DESCRIPTOR.message_types_by_name['Incentive'] = _INCENTIVE
DESCRIPTOR.message_types_by_name['GasMeter'] = _GASMETER
DESCRIPTOR.message_types_by_name['RegisterIncentiveProposal'] = _REGISTERINCENTIVEPROPOSAL
DESCRIPTOR.message_types_by_name['CancelIncentiveProposal'] = _CANCELINCENTIVEPROPOSAL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Incentive = _reflection.GeneratedProtocolMessageType('Incentive', (_message.Message,), {
'DESCRIPTOR' : _INCENTIVE,
'__module__' : 'evmos.incentives.v1.incentives_pb2'
# @@protoc_insertion_point(class_scope:evmos.incentives.v1.Incentive)
})
_sym_db.RegisterMessage(Incentive)
GasMeter = _reflection.GeneratedProtocolMessageType('GasMeter', (_message.Message,), {
'DESCRIPTOR' : _GASMETER,
'__module__' : 'evmos.incentives.v1.incentives_pb2'
# @@protoc_insertion_point(class_scope:evmos.incentives.v1.GasMeter)
})
_sym_db.RegisterMessage(GasMeter)
RegisterIncentiveProposal = _reflection.GeneratedProtocolMessageType('RegisterIncentiveProposal', (_message.Message,), {
'DESCRIPTOR' : _REGISTERINCENTIVEPROPOSAL,
'__module__' : 'evmos.incentives.v1.incentives_pb2'
# @@protoc_insertion_point(class_scope:evmos.incentives.v1.RegisterIncentiveProposal)
})
_sym_db.RegisterMessage(RegisterIncentiveProposal)
CancelIncentiveProposal = _reflection.GeneratedProtocolMessageType('CancelIncentiveProposal', (_message.Message,), {
'DESCRIPTOR' : _CANCELINCENTIVEPROPOSAL,
'__module__' : 'evmos.incentives.v1.incentives_pb2'
# @@protoc_insertion_point(class_scope:evmos.incentives.v1.CancelIncentiveProposal)
})
_sym_db.RegisterMessage(CancelIncentiveProposal)
DESCRIPTOR._options = None
_INCENTIVE.fields_by_name['allocations']._options = None
_INCENTIVE.fields_by_name['start_time']._options = None
_REGISTERINCENTIVEPROPOSAL.fields_by_name['allocations']._options = None
_REGISTERINCENTIVEPROPOSAL._options = None
_CANCELINCENTIVEPROPOSAL._options = None
# @@protoc_insertion_point(module_scope)
| 48.069444
| 1,372
| 0.769142
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,995
| 0.288573
|
ac6da0ba7a668648271540c11b0b554223adac1a
| 15,332
|
py
|
Python
|
modules/dashboard/question_editor.py
|
danieldanciu/schoggi
|
0e18f0cca58cf2318525d57691c2e674b131206d
|
[
"Apache-2.0"
] | null | null | null |
modules/dashboard/question_editor.py
|
danieldanciu/schoggi
|
0e18f0cca58cf2318525d57691c2e674b131206d
|
[
"Apache-2.0"
] | null | null | null |
modules/dashboard/question_editor.py
|
danieldanciu/schoggi
|
0e18f0cca58cf2318525d57691c2e674b131206d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting creation and editing of questions."""
__author__ = 'John Orr (jorr@google.com)'
import copy
import messages
from common import schema_fields
from models import transforms
from models.models import QuestionDAO
from models.models import QuestionDTO
from models.models import SaQuestionConstants
from modules.dashboard import dto_editor
from modules.dashboard import utils as dashboard_utils
class QuestionManagerAndEditor(dto_editor.BaseDatastoreAssetEditor):
"""An editor for editing and managing questions."""
def qmae_prepare_template(self, rest_handler, key=''):
"""Build the Jinja template for adding a question."""
template_values = {}
template_values['page_title'] = self.format_title('Edit Question')
template_values['main_content'] = self.get_form(
rest_handler, key,
dashboard_utils.build_assets_url('questions'))
return template_values
def get_add_mc_question(self):
self.render_page(self.qmae_prepare_template(McQuestionRESTHandler),
'assets', 'questions')
def get_add_sa_question(self):
self.render_page(self.qmae_prepare_template(SaQuestionRESTHandler),
'assets', 'questions')
def get_edit_question(self):
key = self.request.get('key')
question = QuestionDAO.load(key)
if not question:
raise Exception('No question found')
if question.type == QuestionDTO.MULTIPLE_CHOICE:
self.render_page(
self.qmae_prepare_template(McQuestionRESTHandler, key=key),
'assets', 'questions')
elif question.type == QuestionDTO.SHORT_ANSWER:
self.render_page(
self.qmae_prepare_template(SaQuestionRESTHandler, key=key),
'assets', 'questions')
else:
raise Exception('Unknown question type: %s' % question.type)
def get_clone_question(self):
original_question = QuestionDAO.load(self.request.get('key'))
cloned_question = QuestionDAO.clone(original_question)
cloned_question.description += ' (clone)'
QuestionDAO.save(cloned_question)
self.redirect(self.get_action_url('assets', {'tab': 'questions'}))
class BaseQuestionRESTHandler(dto_editor.BaseDatastoreRestHandler):
"""Common methods for handling REST end points with questions."""
def sanitize_input_dict(self, json_dict):
json_dict['description'] = json_dict['description'].strip()
def is_deletion_allowed(self, question):
used_by = QuestionDAO.used_by(question.id)
if used_by:
group_names = sorted(['"%s"' % x.description for x in used_by])
transforms.send_json_response(
self, 403,
('Question in use by question groups:\n%s.\nPlease delete it '
'from those groups and try again.') % ',\n'.join(group_names),
{'key': question.id})
return False
else:
return True
def validate_no_description_collision(self, description, key, errors):
descriptions = {q.description for q in QuestionDAO.get_all()
if not key or q.id != long(key)}
if description in descriptions:
errors.append(
'The description must be different from existing questions.')
class McQuestionRESTHandler(BaseQuestionRESTHandler):
"""REST handler for editing multiple choice questions."""
URI = '/rest/question/mc'
REQUIRED_MODULES = [
'array-extras', 'gcb-rte', 'inputex-radio', 'inputex-select',
'inputex-string', 'inputex-list', 'inputex-number', 'inputex-hidden']
EXTRA_JS_FILES = ['mc_question_editor_lib.js', 'mc_question_editor.js']
XSRF_TOKEN = 'mc-question-edit'
SCHEMA_VERSIONS = ['1.5']
DAO = QuestionDAO
@classmethod
def get_schema(cls):
"""Get the InputEx schema for the multiple choice question editor."""
mc_question = schema_fields.FieldRegistry(
'Multiple Choice Question',
description='multiple choice question',
extra_schema_dict_values={'className': 'mc-container'})
mc_question.add_property(schema_fields.SchemaField(
'version', '', 'string', optional=True, hidden=True))
mc_question.add_property(schema_fields.SchemaField(
'question', 'Question', 'html', optional=True,
extra_schema_dict_values={'className': 'mc-question'}))
mc_question.add_property(schema_fields.SchemaField(
'description', 'Description', 'string', optional=True,
extra_schema_dict_values={'className': 'mc-description'},
description=messages.QUESTION_DESCRIPTION))
mc_question.add_property(schema_fields.SchemaField(
'multiple_selections', 'Selection', 'boolean',
optional=True,
select_data=[
('false', 'Allow only one selection'),
('true', 'Allow multiple selections')],
extra_schema_dict_values={
'_type': 'radio',
'className': 'mc-selection'}))
choice_type = schema_fields.FieldRegistry(
'Choice',
extra_schema_dict_values={'className': 'mc-choice'})
choice_type.add_property(schema_fields.SchemaField(
'score', 'Score', 'string', optional=True, i18n=False,
extra_schema_dict_values={
'className': 'mc-choice-score', 'value': '0'}))
choice_type.add_property(schema_fields.SchemaField(
'text', 'Text', 'html', optional=True,
extra_schema_dict_values={'className': 'mc-choice-text'}))
choice_type.add_property(schema_fields.SchemaField(
'feedback', 'Feedback', 'html', optional=True,
extra_schema_dict_values={'className': 'mc-choice-feedback'}))
choices_array = schema_fields.FieldArray(
'choices', '', item_type=choice_type,
extra_schema_dict_values={
'className': 'mc-choice-container',
'listAddLabel': 'Add a choice',
'listRemoveLabel': 'Delete choice'})
mc_question.add_property(choices_array)
return mc_question
def pre_save_hook(self, question):
question.type = QuestionDTO.MULTIPLE_CHOICE
def transform_for_editor_hook(self, q_dict):
p_dict = copy.deepcopy(q_dict)
# InputEx does not correctly roundtrip booleans, so pass strings
p_dict['multiple_selections'] = (
'true' if q_dict.get('multiple_selections') else 'false')
return p_dict
def get_default_content(self):
return {
'version': self.SCHEMA_VERSIONS[0],
'question': '',
'description': '',
'multiple_selections': 'false',
'choices': [
{'score': '1', 'text': '', 'feedback': ''},
{'score': '0', 'text': '', 'feedback': ''},
{'score': '0', 'text': '', 'feedback': ''},
{'score': '0', 'text': '', 'feedback': ''}
]}
def validate(self, question_dict, key, version, errors):
# Currently only one version supported; version validity has already
# been checked.
self._validate15(question_dict, key, errors)
def _validate15(self, question_dict, key, errors):
if not question_dict['question'].strip():
errors.append('The question must have a non-empty body.')
if not question_dict['description']:
errors.append('The description must be non-empty.')
self.validate_no_description_collision(
question_dict['description'], key, errors)
if not question_dict['choices']:
errors.append('The question must have at least one choice.')
choices = question_dict['choices']
for index in range(0, len(choices)):
choice = choices[index]
if not choice['text'].strip():
errors.append('Choice %s has no response text.' % (index + 1))
try:
# Coefrce the score attrib into a python float
choice['score'] = float(choice['score'])
except ValueError:
errors.append(
'Choice %s must have a numeric score.' % (index + 1))
class SaQuestionRESTHandler(BaseQuestionRESTHandler):
"""REST handler for editing short answer questions."""
URI = '/rest/question/sa'
REQUIRED_MODULES = [
'gcb-rte', 'inputex-select', 'inputex-string', 'inputex-list',
'inputex-hidden', 'inputex-integer']
EXTRA_JS_FILES = []
XSRF_TOKEN = 'sa-question-edit'
GRADER_TYPES = [
('case_insensitive', 'Case insensitive string match'),
('regex', 'Regular expression'),
('numeric', 'Numeric')]
SCHEMA_VERSIONS = ['1.5']
DAO = QuestionDAO
@classmethod
def get_schema(cls):
"""Get the InputEx schema for the short answer question editor."""
sa_question = schema_fields.FieldRegistry(
'Short Answer Question',
description='short answer question',
extra_schema_dict_values={'className': 'sa-container'})
sa_question.add_property(schema_fields.SchemaField(
'version', '', 'string', optional=True, hidden=True))
sa_question.add_property(schema_fields.SchemaField(
'question', 'Question', 'html', optional=True,
extra_schema_dict_values={'className': 'sa-question'}))
sa_question.add_property(schema_fields.SchemaField(
'description', 'Description', 'string', optional=True,
extra_schema_dict_values={'className': 'sa-description'},
description=messages.QUESTION_DESCRIPTION))
sa_question.add_property(schema_fields.SchemaField(
'hint', 'Hint', 'html', optional=True,
extra_schema_dict_values={'className': 'sa-hint'}))
sa_question.add_property(schema_fields.SchemaField(
'defaultFeedback', 'Feedback', 'html', optional=True,
extra_schema_dict_values={'className': 'sa-feedback'},
description=messages.INCORRECT_ANSWER_FEEDBACK))
sa_question.add_property(schema_fields.SchemaField(
'rows', 'Rows', 'string', optional=True, i18n=False,
extra_schema_dict_values={
'className': 'sa-rows',
'value': SaQuestionConstants.DEFAULT_HEIGHT_ROWS
},
description=messages.INPUT_FIELD_HEIGHT_DESCRIPTION))
sa_question.add_property(schema_fields.SchemaField(
'columns', 'Columns', 'string', optional=True, i18n=False,
extra_schema_dict_values={
'className': 'sa-columns',
'value': SaQuestionConstants.DEFAULT_WIDTH_COLUMNS
},
description=messages.INPUT_FIELD_WIDTH_DESCRIPTION))
grader_type = schema_fields.FieldRegistry(
'Answer',
extra_schema_dict_values={'className': 'sa-grader'})
grader_type.add_property(schema_fields.SchemaField(
'score', 'Score', 'string', optional=True, i18n=False,
extra_schema_dict_values={'className': 'sa-grader-score'}))
grader_type.add_property(schema_fields.SchemaField(
'matcher', 'Grading', 'string', optional=True, i18n=False,
select_data=cls.GRADER_TYPES,
extra_schema_dict_values={'className': 'sa-grader-score'}))
grader_type.add_property(schema_fields.SchemaField(
'response', 'Response', 'string', optional=True,
extra_schema_dict_values={'className': 'sa-grader-text'}))
grader_type.add_property(schema_fields.SchemaField(
'feedback', 'Feedback', 'html', optional=True,
extra_schema_dict_values={'className': 'sa-grader-feedback'}))
graders_array = schema_fields.FieldArray(
'graders', '', item_type=grader_type,
extra_schema_dict_values={
'className': 'sa-grader-container',
'listAddLabel': 'Add an answer',
'listRemoveLabel': 'Delete this answer'})
sa_question.add_property(graders_array)
return sa_question
def pre_save_hook(self, question):
question.type = QuestionDTO.SHORT_ANSWER
def get_default_content(self):
return {
'version': self.SCHEMA_VERSIONS[0],
'question': '',
'description': '',
'graders': [{
'score': '1.0',
'matcher': 'case_insensitive',
'response': '',
'feedback': ''}]}
def validate(self, question_dict, key, version, errors):
# Currently only one version supported; version validity has already
# been checked.
self._validate15(question_dict, key, errors)
def _validate15(self, question_dict, key, errors):
if not question_dict['question'].strip():
errors.append('The question must have a non-empty body.')
if not question_dict['description']:
errors.append('The description must be non-empty.')
self.validate_no_description_collision(
question_dict['description'], key, errors)
try:
# Coerce the rows attrib into a python int
question_dict['rows'] = int(question_dict['rows'])
if question_dict['rows'] <= 0:
errors.append('Rows must be a positive whole number')
except ValueError:
errors.append('Rows must be a whole number')
try:
# Coerce the cols attrib into a python int
question_dict['columns'] = int(question_dict['columns'])
if question_dict['columns'] <= 0:
errors.append('Columns must be a positive whole number')
except ValueError:
errors.append('Columns must be a whole number')
if not question_dict['graders']:
errors.append('The question must have at least one answer.')
graders = question_dict['graders']
for index in range(0, len(graders)):
grader = graders[index]
assert grader['matcher'] in [
matcher for (matcher, unused_text) in self.GRADER_TYPES]
if not grader['response'].strip():
errors.append('Answer %s has no response text.' % (index + 1))
try:
float(grader['score'])
except ValueError:
errors.append(
'Answer %s must have a numeric score.' % (index + 1))
| 40.560847
| 79
| 0.621706
| 14,307
| 0.933146
| 0
| 0
| 5,780
| 0.376989
| 0
| 0
| 4,780
| 0.311766
|
ac6e43614244577418d650dcbc06148d7a2b0c7c
| 5,598
|
py
|
Python
|
clayful/__init__.py
|
Clayful/clayful-python
|
ddd5f1f986fb0079d5128e17f4b0fdce83b4cec1
|
[
"MIT"
] | null | null | null |
clayful/__init__.py
|
Clayful/clayful-python
|
ddd5f1f986fb0079d5128e17f4b0fdce83b4cec1
|
[
"MIT"
] | 3
|
2020-04-17T05:24:06.000Z
|
2022-02-10T09:00:22.000Z
|
clayful/__init__.py
|
Clayful/clayful-python
|
ddd5f1f986fb0079d5128e17f4b0fdce83b4cec1
|
[
"MIT"
] | null | null | null |
import re
import urllib
import numbers
from clayful.models import register_models
from clayful.requester import request
from clayful.exception import ClayfulException
class Clayful:
base_url = 'https://api.clayful.io'
default_headers = {
'Accept-Encoding': 'gzip',
'User-Agent': 'clayful-python',
'Clayful-SDK': 'clayful-python'
}
plugins = {
'request': request
}
listeners = {
'request': [],
'response': []
}
@staticmethod
def options_to_headers(o = {}):
headers = {}
if 'language' in o:
headers['Accept-Language'] = o['language']
if 'currency' in o:
headers['Accept-Currency'] = o['currency']
if 'time_zone' in o:
headers['Accept-Time-Zone'] = o['time_zone']
if 'client' in o:
headers['Authorization'] = 'Bearer ' + o['client']
if 'customer' in o:
headers['Authorization-Customer'] = o['customer']
if 'reCAPTCHA' in o:
headers['reCAPTCHA-Response'] = o['reCAPTCHA'];
if 'debug_language' in o:
headers['Accept-Debug-Language'] = o['debug_language']
if 'headers' in o:
headers.update(o['headers'])
return headers
@staticmethod
def get_endpoint(path):
return Clayful.base_url + path
@staticmethod
def normalize_query_values(query = {}):
copied = query.copy()
for key in copied:
if isinstance(copied[key], bool):
copied[key] = 'true' if copied[key] == True else 'false'
if isinstance(copied[key], numbers.Number):
copied[key] = str(copied[key])
return copied
@staticmethod
def extract_request_arguments(options):
result = {
'http_method': options['http_method'],
'request_url': options['path'],
'payload': None,
'query': {},
'headers': {},
'meta': {}
}
rest = options['args'][len(options['params']):]
for i, key in enumerate(options['params']):
result['request_url'] = result['request_url'].replace('{' + key + '}', str(options['args'][i]))
if (options['http_method'] == 'POST' or options['http_method'] == 'PUT') and (options.get('without_payload', False) == False):
result['payload'] = (rest[0:1] or (None,))[0]
rest = rest[1:]
query_headers = (rest[0:1] or ({},))[0]
result['query'] = Clayful.normalize_query_values(query_headers.get('query', {}))
result['headers'] = Clayful.options_to_headers(query_headers)
result['meta'] = query_headers.get('meta', {})
return result
@staticmethod
def call_api(options):
extracted = Clayful.extract_request_arguments(options)
extracted.update({
'request_url': Clayful.get_endpoint(extracted['request_url']),
'model_name': options['model_name'],
'method_name': options['method_name'],
'uses_form_data': options.get('uses_form_data', False),
'error': None,
'response': None,
})
default_headers = Clayful.default_headers.copy()
# Extend default headers with header options
default_headers.update(extracted['headers'])
extracted['headers'] = default_headers
Clayful.trigger('request', extracted)
try:
response = Clayful.plugins['request'](extracted)
extracted['response'] = response
Clayful.trigger('response', extracted)
return response
except ClayfulException as e:
extracted['error'] = e
Clayful.trigger('response', extracted)
raise
@staticmethod
def config(options = {}):
headers = Clayful.options_to_headers(options)
Clayful.default_headers.update(headers)
@staticmethod
def install(scope, plugin):
if scope in Clayful.plugins:
Clayful.plugins[scope] = plugin
@staticmethod
def on(event_name, callback):
listeners = Clayful.listeners.get(event_name, None)
if listeners is None:
return
listeners.append(callback)
@staticmethod
def off(event_name, callback):
listeners = Clayful.listeners.get(event_name, None)
if (listeners is None) or (not callback in listeners):
return
listeners.remove(callback)
@staticmethod
def trigger(event_name, data):
listeners = Clayful.listeners.get(event_name, None)
if listeners is None:
return
for listener in listeners:
listener(data)
@staticmethod
def format_image_url(base_url, options = {}):
query = []
normalized = Clayful.normalize_query_values(options)
for key in normalized:
query.append(key + '=' + normalized.get(key, ''))
query = '&'.join(query)
if bool(query):
query = '?' + query
return base_url + query
@staticmethod
def format_number(number, currency = {}):
if not isinstance(number, numbers.Number):
return ''
precision = currency.get('precision', None)
delimiter = currency.get('delimiter', {})
thousands = delimiter.get('thousands', '')
decimal = delimiter.get('decimal', '.')
if isinstance(precision, numbers.Number):
n = 10 ** precision
number = round(number * n) / n
# To deal with 0.0 case..
if precision == 0:
number = int(number)
parts = str(number).split('.')
a = thousands.join(re.findall('.{1,3}', parts[0][::-1]))[::-1]
b = parts[1] if len(parts) > 1 else ''
if isinstance(precision, numbers.Number):
diff = precision - len(b)
diff = 0 if diff < 0 else diff
b += '0' * diff
decimal = decimal if bool(b) else ''
return decimal.join([a, b])
@staticmethod
def format_price(number, currency = {}):
formatted_number = Clayful.format_number(number, currency)
if not bool(formatted_number):
return ''
symbol = currency.get('symbol', '')
format = currency.get('format', '{price}')
return format.replace('{symbol}', symbol).replace('{price}', formatted_number)
# Register models
register_models(Clayful)
| 19.992857
| 128
| 0.66363
| 5,385
| 0.961951
| 0
| 0
| 5,060
| 0.903894
| 0
| 0
| 1,102
| 0.196856
|
ac6eca8aa04c226da4e5ecb684240f7192f29f63
| 1,478
|
py
|
Python
|
racecar_gym/envs/scenarios.py
|
luigiberducci/racecar_gym
|
fd2ff7fb14e9319530786ef54a4a6864bf1f1c26
|
[
"MIT"
] | 16
|
2020-11-27T02:55:24.000Z
|
2022-03-24T01:27:29.000Z
|
racecar_gym/envs/scenarios.py
|
luigiberducci/racecar_gym
|
fd2ff7fb14e9319530786ef54a4a6864bf1f1c26
|
[
"MIT"
] | 5
|
2020-08-24T15:59:39.000Z
|
2020-10-20T19:45:46.000Z
|
racecar_gym/envs/scenarios.py
|
luigiberducci/racecar_gym
|
fd2ff7fb14e9319530786ef54a4a6864bf1f1c26
|
[
"MIT"
] | 4
|
2020-10-08T16:14:19.000Z
|
2021-12-26T18:19:53.000Z
|
from dataclasses import dataclass
from typing import Dict
from racecar_gym.bullet import load_world, load_vehicle
from racecar_gym.tasks import Task, get_task
from racecar_gym.core import World, Agent
from .specs import ScenarioSpec, TaskSpec
def task_from_spec(spec: TaskSpec) -> Task:
task = get_task(spec.task_name)
return task(**spec.params)
@dataclass
class MultiAgentScenario:
world: World
agents: Dict[str, Agent]
@staticmethod
def from_spec(path: str, rendering: bool = None) -> 'MultiAgentScenario':
spec = ScenarioSpec()
spec.load(path)
if rendering:
spec.world.rendering = rendering
agents = dict([
(s.id, Agent(id=s.id, vehicle=load_vehicle(s.vehicle), task=task_from_spec(s.task)))
for s in spec.agents
])
return MultiAgentScenario(world=load_world(spec.world, agents=list(agents.values())), agents=agents)
@dataclass
class SingleAgentScenario:
world: World
agent: Agent
@staticmethod
def from_spec(path: str, rendering: bool = None) -> 'SingleAgentScenario':
spec = ScenarioSpec()
spec.load(path)
if rendering:
spec.world.rendering = rendering
agent_spec = spec.agents[0]
agent = Agent(id=agent_spec.id, vehicle=load_vehicle(agent_spec.vehicle), task=task_from_spec(agent_spec.task))
return SingleAgentScenario(world=load_world(spec.world, agents=[agent]), agent=agent)
| 30.791667
| 119
| 0.688092
| 1,094
| 0.740189
| 0
| 0
| 1,116
| 0.755074
| 0
| 0
| 41
| 0.02774
|
ac6eda3ee83ebc645d67258023269c71e7def1cb
| 26,972
|
py
|
Python
|
vkts/real.py
|
smurphik/vkts
|
21e16f37eebf80cd41fd02d7401e523e772e98f9
|
[
"MIT"
] | null | null | null |
vkts/real.py
|
smurphik/vkts
|
21e16f37eebf80cd41fd02d7401e523e772e98f9
|
[
"MIT"
] | null | null | null |
vkts/real.py
|
smurphik/vkts
|
21e16f37eebf80cd41fd02d7401e523e772e98f9
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
"""Implementation of the main functions of the application: editing user data,
thematic search, other console commands"""
import sys
import re
from collections import Counter
from .report import Report
from . import vklib as vk
from .vklib import apply_vk_method
from .usrdata import UsrData
from .utils import exception_handler
####################################################################
## Account commands ##
####################################################################
def add_account():
"""Interactive function of account adding"""
try:
# Read data about account
# Account type
in_str = input('What type of account do you want to create?\n'
+ 'Input please one letter - [e]mail, [v]k.ru\n'
+ 'or [t]elegram: ').lower()
ac_type = {'e': 'email', 'v': 'vk', 't': 'telegram'}[in_str]
# Account name
ac_name = input('\nInput name of new account: ').lower()
# Privacy
print('\nAre your account fake (password will be stored\n'
+ 'unencrypted on your computer) or private (you\n'
+ 'will be forced to enter the password every run)?')
in_str = input('Input please [f]ake or [p]rivate: ').lower()
ac_privacy = {'f': 'fake', 'p': 'private'}[in_str]
# User name
if ac_type == 'email' or ac_type == 'vk':
ac_user_name = input('\nInput email address: ')
elif ac_type == 'telegram':
ac_user_name = input('\nInput nickname: ').lstrip('@')
else:
raise
# Password (it's fake account, so we don't use smth. like getpass)
if ac_privacy == 'fake':
ac_password = input('\nInput password: ')
else:
ac_password = None
# Save account data
acc_obj = {'uname': ac_user_name,
'password': ac_password,
'token': None,
'is_activated': False}
UsrData().set(acc_obj, 'acc', ac_type, ac_name, correct_is_act=True)
except Exception as e:
exception_handler(e, 'Failed to enter account data')
def delete_account(ac_type, ac_name):
"""Deletion account ac_name of type ac_type from registry"""
UsrData().del_('acc', ac_type, ac_name, correct_is_act=True)
def activate_account(ac_type, ac_name):
"""Choose active account"""
u = UsrData()
if ac_name in u.get('acc', ac_type):
u.drop_activations('acc', ac_type)
u.set(True, 'acc', ac_type, ac_name, 'is_activated')
def display_accounts():
"""Display all accounts and active marks"""
accs = UsrData().get('acc')
for ac_type in accs:
# check existance
if not accs[ac_type]:
continue
# print accounts of type ac_type
print(ac_type)
for ac_name in accs[ac_type]:
acc_obj = accs[ac_type][ac_name]
print(' {}: \t[{}|{}]{}'.format(ac_name,
acc_obj['uname'],
acc_obj['password'],
(' \t<- activated'
if acc_obj['is_activated']
else '')))
print('')
####################################################################
## University commands ##
####################################################################
# Search hot university ids by un_groups
def search_hot_university_ids(un_groups, un_ids):
# load members of groups
groups = [vk.Group(group_id['id']) for group_id in un_groups]
vk.load_groups(groups)
# unite groups members in a single users list
users = []
for group in groups:
users += group.members
users = list(set(users))
# load education info about users
step = 100
e = vk.Executor()
users_education = []
for i in range(0, len(users), step):
ids_str = ','.join(list(map(str, users[i:i+step])))
e.add_request('users.get', user_ids=ids_str,
fields='occupation,education,universities',
processor=users_education.extend)
e.emit_requests()
# Count university appearances
un_ids_cnt = {un_id: 0 for un_id in un_ids}
for item in users_education:
if 'university' in item:
un_id = str(item['university'])
if un_id in un_ids_cnt:
un_ids_cnt[un_id] += 1
continue
if ('occupation' in item
and item['occupation']['type'] == 'university'):
un_id = str(item['occupation']['id'])
if un_id in un_ids_cnt:
un_ids_cnt[un_id] += 1
continue
# Filter out unused ids
hot_ids = list(un_ids_cnt.items())
hot_ids = [x for x in hot_ids if x[1] >= 10]
hot_ids.sort(key=(lambda x: x[1]), reverse=True)
hot_ids = [{'id': x[0], 'temp': x[1]} for x in hot_ids]
return hot_ids
def add_university():
"""Interactive function of adding data abaot university"""
# Read data about university
try:
# Name of university
un_name = input(
'Input please simple name of university (better in short latin;\n'
+ 'for example: mipt, msu, mgimo): \n> ')
# Titles of university
print('Input please all titles of university (for example:\n'
+ 'МФТИ, Физтех, MIPT). After every title press [ENTER].\n'
+ 'Finish input by [ENTER].')
un_titles = []
while True:
title = input('> ')
if not title:
break
un_titles.append(title)
assert un_titles
# Looking for a university in the vk database
e = vk.Executor()
for title in un_titles:
e.add_request('database.getUniversities', q=title)
e.emit_requests()
un_items = []
for r in e.responses:
un_items.extend(r['items'])
# Clarify the results by checking exact words match
tmp = un_items
un_items = []
unique_ids = set()
for item in tmp:
# ensuring uniqueness of search results
if item['id'] in unique_ids:
continue
unique_ids.add(item['id'])
# check title existence
for title in un_titles:
for word in title.split():
if not re.search(r'\b' + word.lower() + r'\b',
item['title'].lower()):
break
else:
un_items.append(item)
break
del tmp, unique_ids
assert un_items
# Ask the user to clarify the results
print(
'Database search results may contain extra items.\n'
+ 'Check it please. You will see chanks of 10 items in turn.\n'
+ 'For every chank enter the numbers corresponding to the wrong\n'
+ 'elements (for example, 239 if corr. three items are wrong).\n'
+ 'Then press [ENTER].')
tmp = un_items
un_items = []
for i in range(0, len(tmp), 10):
chunk = tmp[i:i+10]
for j, item in enumerate(chunk):
print('{}> {}'.format(j, item['title']))
in_str = input('Wrong numbers: ')
extra_indexes_list = [int(c) for c in in_str if c.isdigit()]
extra_indexes_list.sort(reverse=True)
for j in extra_indexes_list:
del chunk[j]
un_items.extend(chunk)
del tmp
assert un_items
# Read very big groups which consists many students
print('Input please id or domains of very very big groups\n'
+ 'which consists many students of added university.\n'
+ 'After every id press [ENTER]. Finish input by [ENTER].')
un_big_groups = []
while True:
gr_id = input('> ')
if not gr_id:
break
un_big_groups.append(gr_id)
un_big_groups = vk.resolve_group_ids(un_big_groups)
un_big_groups = [{'id': x[0], 'domain': x[1]} for x in un_big_groups]
assert un_big_groups
# Read groups which consists almost exclusively of students
print('Input please id or domains of groups which consists\n'
+ 'almost exclusively of students of added university.\n'
+ 'After every id press [ENTER]. Finish input by [ENTER].')
un_groups = []
while True:
gr_id = input('> ')
if not gr_id:
break
un_groups.append(gr_id)
un_groups = vk.resolve_group_ids(un_groups)
un_groups = [{'id': x[0], 'domain': x[1]} for x in un_groups]
# Read users whose almost all friends are students
print('Input please id or domains of users whose almost all\n'
+ 'friends are students of added university.\n'
+ 'After every id press [ENTER]. Finish input by [ENTER].')
un_users = []
while True:
us_id = input('> ')
if not us_id:
break
us_id, us_scr = vk.resolve_user_ids(us_id)
un_users.append({'id': us_id, 'domain': us_scr})
# Search hot university ids by un_big_groups (unfortunately,
# vk search method is not stable, so we use this approach)
print('Start analysis of the prevalence of university identifiers.\n'
+ '(It could take several minutes)')
un_ids = list(map(str, [x['id'] for x in un_items]))
hot_ids = search_hot_university_ids(un_big_groups, un_ids)
# Save university data
univ_obj = {'titles': un_titles,
'big_groups': un_big_groups,
'crystal_groups': un_groups,
'crystal_users': un_users,
'all_ids': [x['id'] for x in un_items],
'hot_ids': hot_ids,
'is_activated': False}
UsrData().set(univ_obj, 'univ', un_name, correct_is_act=True)
except Exception as e:
exception_handler(e, 'Failed to enter or add university data')
def delete_university(un_name):
"""Deletion un_name from registry"""
UsrData().del_('univ', un_name, correct_is_act=True)
def activate_university(un_name):
"""Choose active university for analysis"""
u = UsrData()
if un_name in u.get('univ'):
u.drop_activations('univ')
u.set(True, 'univ', un_name, 'is_activated')
def display_universities():
"""Display data for all universities"""
# Read universities registry
univs = UsrData().get('univ')
# Print
for un_name in univs:
univ_obj = univs[un_name]
print(un_name + '{}'.format('\t\t\t\t<- ACTIVATED'
if univ_obj['is_activated']
else ''))
print('Title: {}'.format(', '.join(univ_obj['titles'])))
s = ', '.join([x['domain'] for x in univ_obj['big_groups']])
print('Big vk groups: {}'.format(s))
s = ', '.join([x['domain'] for x in univ_obj['crystal_groups']])
print('Crystal vk groups: {}'.format(s))
s = ', '.join([x['domain'] for x in univ_obj['crystal_users']])
print('Crystal vk users: {}'.format(s))
# Identifier list with carry and indents
print('VK ids: ', end='')
ids = [str(x) for x in univ_obj['all_ids']]
ids2 = []
for i in range(0, len(ids), 5):
ids2.append(', '.join(ids[i:i+5]))
s = ',\n '.join(x for x in ids2)
print(s)
# Hot identifiers with temperature in parentheses
sum_cnt = sum([x['temp'] for x in univ_obj['hot_ids']])
s = ', '.join(['{} ({:.1f} %)'.format(x['id'], 100*x['temp']/sum_cnt)
for x in univ_obj['hot_ids']])
print('Hot VK ids: {}'.format(s))
print('')
####################################################################
## VK API commands ##
####################################################################
# Any user-defined vk API method
def vk_method(method_name, args):
args_list = [x.split('=', 1) for x in args]
args_dict = {x[0]: x[1] for x in args_list}
return apply_vk_method(method_name, handle_api_errors=False, **args_dict)
####################################################################
## Other commands ##
####################################################################
# Search new crystal students among friends of given set of crystal students
def load_crystal_students_from_friends(given_crystal,
exceptions_list, # users not to be
# searched for
univer_ids):
new_crystal = []
for user_id in given_crystal:
u = vk.User(user_id, univer_ids)
u.load_and_save_info_if_absent()
u.read_info()
new_crystal += u.univer_friends
print('new_crystal: ' + str(len(new_crystal)))
new_crystal = list(set(new_crystal))
print('unique new_crystal: ' + str(len(new_crystal)))
new_crystal = list(set(new_crystal) - set(exceptions_list))
print('with except new_crystal: ' + str(len(new_crystal)))
return new_crystal
# Load friends of given users list with counters of repetitions
# Return: generate dict {id0:num_of_repeat(id0), id1:..., ...}
def load_friends_of_users_with_cnt(users_list):
friends_list = []
for user_id in users_list:
u = vk.User(user_id)
u.read_info()
friends_list += u.friends
# Count repeating elements in list
# (generate dict {id0:num_of_repeat(id0), id1:..., ...})
friends_cnt = dict(Counter(friends_list))
# Exclude original users
users_set = set(users_list)
friends_cnt_filtered = \
{x: friends_cnt[x] for x in friends_cnt if x not in users_set}
return friends_cnt_filtered
# Load ids of friends of users and write them to data/users_packs/pack_name
def load_users_friends_to_users_pack(user_ids, pack_name, univer_ids):
friends_list = []
for user_id in user_ids:
u = vk.User(user_id, univer_ids)
u.load_and_save_info_if_absent()
u.read_info()
friends_list += u.friends
# Delete repeats and sort
friends_list = list(set(friends_list))
friends_list.sort()
# Write
vk.write_users_pack(friends_list, pack_name)
# Wrapper for method users.search
def users_search_wrapper(university_id, **p):
response = apply_vk_method('users.search', university=university_id,
offset=0, count=1000, **p)
if bool(response):
return [x['id'] for x in response['response']['items']]
else:
return []
# Load list of phystechs using method users.search
# WARNING: Don't use it too often. Otherwise account will be banned.
# TODO: Обобщить на другие вузы (МГИМО, СПбГУ, Бауманка, МГЮА, универсальный),
# хранить в спец. файле в data список вузов с id и ключевыми словами
def load_phystechs_by_search(pack_name):
# Getting list of phystechs
phystechs_list = users_search_wrapper(55111, {'sort': 0})
# TODO: После отладки новой версии раскоментить и удалить след. строку
phystechs_list = phystechs_list[10]
# phystechs_list += users_search_wrapper(55111, **{'sort': 1})
# phystechs_list += users_search_wrapper(55111, **{'age_to': 25})
# phystechs_list += users_search_wrapper(55111, **{'age_from': 26})
# phystechs_list += users_search_wrapper(55111, **{'city': 1}) # Москва
# #print('55111: ' + str(len(list(set(phystechs_list)))))
# phystechs_list += users_search_wrapper(297, **{'sort': 0, 'sex': 1})
# phystechs_list += users_search_wrapper(297, **{'sort': 1, 'sex': 1})
# #print('sex 1: ' + str(len(list(set(phystechs_list)))))
# phystechs_list += users_search_wrapper(297, **{'sort': 0, 'sex': 2})
# phystechs_list += users_search_wrapper(297, **{'sort': 1, 'sex': 2})
# #print('sex 2: ' + str(len(list(set(phystechs_list)))))
# phystechs_list += users_search_wrapper(297, **{'age_to': 18})
# for i in range(19, 36):
# phystechs_list += users_search_wrapper(297, **{'age_from': i, 'age_to': i})
# phystechs_list += users_search_wrapper(297, **{'age_from': 36, 'age_to': 37})
# phystechs_list += users_search_wrapper(297, **{'age_from': 38, 'age_to': 40})
# phystechs_list += users_search_wrapper(297, **{'age_from': 41, 'age_to': 45})
# phystechs_list += users_search_wrapper(297, **{'age_from': 46, 'age_to': 50})
# phystechs_list += users_search_wrapper(297, **{'age_from': 51, 'age_to': 55})
# phystechs_list += users_search_wrapper(297, **{'age_from': 56, 'age_to': 60})
# phystechs_list += users_search_wrapper(297, **{'age_from': 61, 'age_to': 70})
# phystechs_list += users_search_wrapper(297, **{'age_from': 71})
# #print('ages: ' + str(len(list(set(phystechs_list)))))
# phystechs_list += users_search_wrapper(297, **{'city': 1}) # Москва
# #print('Moscow: ' + str(len(list(set(phystechs_list)))))
# phystechs_list += users_search_wrapper(297, **{'city': 857}) # Долгопрудный
# #print('Dolgoprudny: ' + str(len(list(set(phystechs_list)))))
# for n in ['Александр', 'Сергей', 'Дмитрий', 'Андрей', 'Алексей',
# 'Владимир', 'Михаил', 'Игорь', 'Евгений', 'Юрий', 'Никита',
# 'Олег', 'Николай', 'Иван', 'Павел']:
# phystechs_list += users_search_wrapper(297,
# **{'q': urllib.parse.quote(n)})
# #print('Names: ' + str(len(list(set(phystechs_list)))))
# Sort result & delete repeats
phystechs_list = list(set(phystechs_list))
phystechs_list.sort()
# Write & return
vk.write_users_pack(phystechs_list, pack_name)
return
# Read from groups of set members of university (field 'univer_members')
def read_crystal_students_of_groups_pack(pack_name):
groups = vk.read_groups_pack(pack_name)
crystal_students = []
for group_id in groups:
g = vk.Group(group_id)
g.open()
crystal_students += g.univer_members
return crystal_students
#
def make_users_list_with_add_info(users_list,
univer_groups_packs_list,
thematic_groups_packs_list):
ext_users_list = [{'id': x,
'univer_groups': [],
'thematic_groups': []} for x in users_list]
for groups_pack in univer_groups_packs_list:
groups_list = vk.read_groups_pack(groups_pack)
for group_id in groups_list:
g = vk.Group(group_id)
g.open()
members_list = g.members
for ext_user in ext_users_list:
if ext_user['id'] in members_list:
ext_user['univer_groups'].append([group_id, g.name])
for groups_pack in thematic_groups_packs_list:
groups_list = vk.read_groups_pack(groups_pack)
for group_id in groups_list:
g = vk.Group(group_id)
g.open()
members_list = g.members
for ext_user in ext_users_list:
if ext_user['id'] in members_list:
ext_user['thematic_groups'].append([group_id, g.name])
return ext_users_list
# Create report about thematic student by list with
# information (id, their groups packs) about them
def make_thematic_report(ext_users_list, univer_ids):
# Begin report
r = Report('thematic_students_list')
i = 0
r.add_str('<table>\n')
for ext_user in ext_users_list:
# Read user info
u = vk.User(ext_user['id'], univer_ids)
u.load_and_save_info_if_absent()
u.read_info()
# Write user description to report
i += 1
r.add_str('<tr>\n')
r.add_str(' <td valign="top">' + str(i) + '.</td>\n')
r.add_str(' <td><a href="https://vk.com/id' + str(ext_user['id'])
+ '" target="_blank"><img height=50 src="' + u.photo_max
+ '"></a></td>\n')
r.add_str(' <td valign="top">\n')
r.add_line(' ' + u.first_name + ' ' + u.last_name)
r.add_str(' <td valign="top">\n')
r.add_str(' MIPT: ')
if u.is_student:
r.add_str('PROFILE, ')
for group in ext_user['univer_groups']:
r.add_str('<a = href="https://vk.com/club' + group[0]
+ '" target="_blank">' + group[1] + '</a>, ')
r.add_str('<br>')
r.add_str(' Thematic: ')
for group in ext_user['thematic_groups']:
r.add_str('<a = href="https://vk.com/club' + group[0]
+ '" target="_blank">' + group[1] + '</a>, ')
r.add_str(' </td>\n')
r.add_str('</tr>\n')
# Conclude report
r.add_str('</table>\n')
if not r.is_empty():
r.conclude()
r.dump()
def get_thematic_students_by_their_groups():
univer_ids = [297, 55111]
# Load members of mipt groups
#vk.load_users_pack_by_groups_pack('mipt_crystal_gr', univer_ids)
#vk.load_users_pack_by_groups_pack('mipt_very', univer_ids)
#vk.load_users_pack_by_groups_pack('mipt_roughly', univer_ids)
# Load friends of user 23681294 (Концертный Физтеха)
# to data/users_packs/mipt_crystal_us
#load_users_friends_to_users_pack(['23681294'], 'mipt_crystal_us',
# univer_ids)
## TODO: Фильтрации здесь или в конце
## TODO: убрать всё crystal в одну ф-ию
## TODO: раскоментить search
## TODO: clear в users и groups если нет в pack-ах
# Search phystechs by and dump to data/users_packs/mipt_crystal_search
# WARNING: too frequent calls to this function result in
# the method users.search being blocked
#load_phystechs_by_search('mipt_crystal_search')
## TODO: Всем, кто найден поиском нужно проставлять is_student
# Read phystechs from crystal sources
#mipt_crystal_1_gusm = vk.read_users_pack('mipt_crystal_gr')
#print('+ mipt_crystal_gr: ' + str(len(list(set(mipt_crystal_1_gusm)))))
#mipt_crystal_1_gusm += vk.read_users_pack('mipt_crystal_us')
#print('+ mipt_crystal_us: ' + str(len(list(set(mipt_crystal_1_gusm)))))
#mipt_crystal_1_gusm += vk.read_users_pack('mipt_crystal_search')
#print('+ mipt_crystal_search: ' + str(len(list(set(mipt_crystal_1_gusm)))))
#mipt_crystal_1_gusm = list(set(mipt_crystal_1_gusm))
# Search crystal phystechs in mipt-groups
#mipt_crystal_1_gusm += \
# read_crystal_students_of_groups_pack('mipt_very')
#print('+ mipt_very: ' + str(len(list(set(mipt_crystal_1_gusm)))))
#mipt_crystal_1_gusm += \
# read_crystal_students_of_groups_pack('mipt_roughly')
#print('+ mipt_roughly: ' + str(len(list(set(mipt_crystal_1_gusm)))))
#mipt_crystal_1_gusm = list(set(mipt_crystal_1_gusm))
# Search crystal phystechs among friends of crystal
# WARNING: execution of this code block lasts > 2 hour
#new_crystal = load_crystal_students_from_friends(mipt_crystal_1_gusm,
# mipt_crystal_1_gusm,
# univer_ids)
#mipt_crystal_1_gusm += new_crystal
#print('+ mipt_friends: ' + str(len(list(set(mipt_crystal_1_gusm)))))
#while len(new_crystal) > 0:
# new_crystal = \
# load_crystal_students_from_friends(new_crystal,
# mipt_crystal_1_gusm,
# univer_ids)
# mipt_crystal_1_gusm += new_crystal
# print('+ mipt_friends: ' + str(len(list(set(mipt_crystal_1_gusm)))))
# Write students to data/users_packs/
#vk.write_users_pack(mipt_crystal_1_gusm, 'mipt_crystal_1_gusm')
# Search students among friends of crystal on the basis of
# the number of crystals in friends
#mipt_crystal_1_gusm = vk.read_users_pack('mipt_crystal_1_gusm')
#cryst_friends_cnt = load_friends_of_users_with_cnt(mipt_crystal_1_gusm)
#print('cryst_friends_cnt: ' + str(len(cryst_friends_cnt)))
#cryst_friends_2 = \
# [x for x in cryst_friends_cnt if cryst_friends_cnt[x] >= 2]
#vk.write_users_pack(cryst_friends_2, 'cryst_friends_2')
#print('cryst_friends_2: ' + str(len(cryst_friends_2)))
#cryst_friends_3 = \
# [x for x in cryst_friends_cnt if cryst_friends_cnt[x] >= 3]
#vk.write_users_pack(cryst_friends_3, 'cryst_friends_3')
#print('cryst_friends_3: ' + str(len(cryst_friends_3)))
#cryst_friends_5 = \
# [x for x in cryst_friends_cnt if cryst_friends_cnt[x] >= 5]
#vk.write_users_pack(cryst_friends_5, 'cryst_friends_5')
#print('cryst_friends_5: ' + str(len(cryst_friends_5)))
#cryst_friends_10 = \
# [x for x in cryst_friends_cnt if cryst_friends_cnt[x] >= 10]
#vk.write_users_pack(cryst_friends_10, 'cryst_friends_10')
#print('cryst_friends_10: ' + str(len(cryst_friends_10)))
#vk.load_users_pack_by_groups_pack('thematic_very')
#vk.load_users_pack_by_groups_pack('thematic_roughly')
#vk.load_users_pack_by_groups_pack('thematic_side') # TODO Докачать
# Read phystechs and thematic from dump
mipt_crystal = set(vk.read_users_pack('mipt_crystal_1_gusm'))
cryst_friends_10 = set(vk.read_users_pack('cryst_friends_10'))
cryst_friends_5 = set(vk.read_users_pack('cryst_friends_5'))
thematic_very = set(vk.read_users_pack('thematic_very'))
exceptions = set(vk.read_users_pack('exceptions'))
intersection = list((mipt_crystal & thematic_very) - exceptions)
intersection.sort()
extended_intersection = \
make_users_list_with_add_info(intersection,
['mipt_crystal_gr', 'mipt_very'],
['thematic_very', 'thematic_roughly'])
make_thematic_report(extended_intersection, univer_ids)
#intersection = list((cryst_friends_10 & thematic_very) - exceptions)
#intersection.sort()
#extended_intersection = \
# make_users_list_with_add_info(intersection,
# ['mipt_crystal_gr', 'mipt_very'],
# ['thematic_very', 'thematic_roughly'])
#make_thematic_report(extended_intersection, univer_ids)
#sys.exit()
#intersection = list(set(cryst_friends_5) & set(thematic_very))
#intersection.sort()
#print(len(intersection))
#print(intersection)
| 38.752874
| 84
| 0.58075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 13,977
| 0.510519
|
ac6f2dbc609bab1cd3af2ace2bafd614f0610168
| 10,226
|
py
|
Python
|
sample_facemesh.py
|
swipswaps/mediapipe-python
|
00700129ced41dcdab174cd46454f5e7e3d9e25b
|
[
"Apache-2.0"
] | 92
|
2021-03-09T08:27:17.000Z
|
2022-03-09T08:20:48.000Z
|
sample_facemesh.py
|
swipswaps/mediapipe-python
|
00700129ced41dcdab174cd46454f5e7e3d9e25b
|
[
"Apache-2.0"
] | 1
|
2021-12-23T05:15:26.000Z
|
2022-02-21T20:35:21.000Z
|
sample_facemesh.py
|
swipswaps/mediapipe-python
|
00700129ced41dcdab174cd46454f5e7e3d9e25b
|
[
"Apache-2.0"
] | 46
|
2021-03-08T10:24:54.000Z
|
2021-12-20T07:12:48.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import argparse
import cv2 as cv
import numpy as np
import mediapipe as mp
from utils import CvFpsCalc
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--device", type=int, default=0)
parser.add_argument("--width", help='cap width', type=int, default=960)
parser.add_argument("--height", help='cap height', type=int, default=540)
parser.add_argument("--max_num_faces", type=int, default=1)
parser.add_argument("--min_detection_confidence",
help='min_detection_confidence',
type=float,
default=0.7)
parser.add_argument("--min_tracking_confidence",
help='min_tracking_confidence',
type=int,
default=0.5)
parser.add_argument('--use_brect', action='store_true')
args = parser.parse_args()
return args
def main():
# 引数解析 #################################################################
args = get_args()
cap_device = args.device
cap_width = args.width
cap_height = args.height
max_num_faces = args.max_num_faces
min_detection_confidence = args.min_detection_confidence
min_tracking_confidence = args.min_tracking_confidence
use_brect = args.use_brect
# カメラ準備 ###############################################################
cap = cv.VideoCapture(cap_device)
cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)
# モデルロード #############################################################
mp_face_mesh = mp.solutions.face_mesh
face_mesh = mp_face_mesh.FaceMesh(
max_num_faces=max_num_faces,
min_detection_confidence=min_detection_confidence,
min_tracking_confidence=min_tracking_confidence,
)
# FPS計測モジュール ########################################################
cvFpsCalc = CvFpsCalc(buffer_len=10)
while True:
display_fps = cvFpsCalc.get()
# カメラキャプチャ #####################################################
ret, image = cap.read()
if not ret:
break
image = cv.flip(image, 1) # ミラー表示
debug_image = copy.deepcopy(image)
# 検出実施 #############################################################
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
results = face_mesh.process(image)
# 描画 ################################################################
if results.multi_face_landmarks is not None:
for face_landmarks in results.multi_face_landmarks:
# 外接矩形の計算
brect = calc_bounding_rect(debug_image, face_landmarks)
# 描画
debug_image = draw_landmarks(debug_image, face_landmarks)
debug_image = draw_bounding_rect(use_brect, debug_image, brect)
cv.putText(debug_image, "FPS:" + str(display_fps), (10, 30),
cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2, cv.LINE_AA)
# キー処理(ESC:終了) #################################################
key = cv.waitKey(1)
if key == 27: # ESC
break
# 画面反映 #############################################################
cv.imshow('MediaPipe Face Mesh Demo', debug_image)
cap.release()
cv.destroyAllWindows()
def calc_bounding_rect(image, landmarks):
image_width, image_height = image.shape[1], image.shape[0]
landmark_array = np.empty((0, 2), int)
for _, landmark in enumerate(landmarks.landmark):
landmark_x = min(int(landmark.x * image_width), image_width - 1)
landmark_y = min(int(landmark.y * image_height), image_height - 1)
landmark_point = [np.array((landmark_x, landmark_y))]
landmark_array = np.append(landmark_array, landmark_point, axis=0)
x, y, w, h = cv.boundingRect(landmark_array)
return [x, y, x + w, y + h]
def draw_landmarks(image, landmarks):
image_width, image_height = image.shape[1], image.shape[0]
landmark_point = []
for index, landmark in enumerate(landmarks.landmark):
if landmark.visibility < 0 or landmark.presence < 0:
continue
landmark_x = min(int(landmark.x * image_width), image_width - 1)
landmark_y = min(int(landmark.y * image_height), image_height - 1)
landmark_point.append((landmark_x, landmark_y))
cv.circle(image, (landmark_x, landmark_y), 1, (0, 255, 0), 1)
if len(landmark_point) > 0:
# 参考:https://github.com/tensorflow/tfjs-models/blob/master/facemesh/mesh_map.jpg
# 左眉毛(55:内側、46:外側)
cv.line(image, landmark_point[55], landmark_point[65], (0, 255, 0), 2)
cv.line(image, landmark_point[65], landmark_point[52], (0, 255, 0), 2)
cv.line(image, landmark_point[52], landmark_point[53], (0, 255, 0), 2)
cv.line(image, landmark_point[53], landmark_point[46], (0, 255, 0), 2)
# 右眉毛(285:内側、276:外側)
cv.line(image, landmark_point[285], landmark_point[295], (0, 255, 0),
2)
cv.line(image, landmark_point[295], landmark_point[282], (0, 255, 0),
2)
cv.line(image, landmark_point[282], landmark_point[283], (0, 255, 0),
2)
cv.line(image, landmark_point[283], landmark_point[276], (0, 255, 0),
2)
# 左目 (133:目頭、246:目尻)
cv.line(image, landmark_point[133], landmark_point[173], (0, 255, 0),
2)
cv.line(image, landmark_point[173], landmark_point[157], (0, 255, 0),
2)
cv.line(image, landmark_point[157], landmark_point[158], (0, 255, 0),
2)
cv.line(image, landmark_point[158], landmark_point[159], (0, 255, 0),
2)
cv.line(image, landmark_point[159], landmark_point[160], (0, 255, 0),
2)
cv.line(image, landmark_point[160], landmark_point[161], (0, 255, 0),
2)
cv.line(image, landmark_point[161], landmark_point[246], (0, 255, 0),
2)
cv.line(image, landmark_point[246], landmark_point[163], (0, 255, 0),
2)
cv.line(image, landmark_point[163], landmark_point[144], (0, 255, 0),
2)
cv.line(image, landmark_point[144], landmark_point[145], (0, 255, 0),
2)
cv.line(image, landmark_point[145], landmark_point[153], (0, 255, 0),
2)
cv.line(image, landmark_point[153], landmark_point[154], (0, 255, 0),
2)
cv.line(image, landmark_point[154], landmark_point[155], (0, 255, 0),
2)
cv.line(image, landmark_point[155], landmark_point[133], (0, 255, 0),
2)
# 右目 (362:目頭、466:目尻)
cv.line(image, landmark_point[362], landmark_point[398], (0, 255, 0),
2)
cv.line(image, landmark_point[398], landmark_point[384], (0, 255, 0),
2)
cv.line(image, landmark_point[384], landmark_point[385], (0, 255, 0),
2)
cv.line(image, landmark_point[385], landmark_point[386], (0, 255, 0),
2)
cv.line(image, landmark_point[386], landmark_point[387], (0, 255, 0),
2)
cv.line(image, landmark_point[387], landmark_point[388], (0, 255, 0),
2)
cv.line(image, landmark_point[388], landmark_point[466], (0, 255, 0),
2)
cv.line(image, landmark_point[466], landmark_point[390], (0, 255, 0),
2)
cv.line(image, landmark_point[390], landmark_point[373], (0, 255, 0),
2)
cv.line(image, landmark_point[373], landmark_point[374], (0, 255, 0),
2)
cv.line(image, landmark_point[374], landmark_point[380], (0, 255, 0),
2)
cv.line(image, landmark_point[380], landmark_point[381], (0, 255, 0),
2)
cv.line(image, landmark_point[381], landmark_point[382], (0, 255, 0),
2)
cv.line(image, landmark_point[382], landmark_point[362], (0, 255, 0),
2)
# 口 (308:右端、78:左端)
cv.line(image, landmark_point[308], landmark_point[415], (0, 255, 0),
2)
cv.line(image, landmark_point[415], landmark_point[310], (0, 255, 0),
2)
cv.line(image, landmark_point[310], landmark_point[311], (0, 255, 0),
2)
cv.line(image, landmark_point[311], landmark_point[312], (0, 255, 0),
2)
cv.line(image, landmark_point[312], landmark_point[13], (0, 255, 0), 2)
cv.line(image, landmark_point[13], landmark_point[82], (0, 255, 0), 2)
cv.line(image, landmark_point[82], landmark_point[81], (0, 255, 0), 2)
cv.line(image, landmark_point[81], landmark_point[80], (0, 255, 0), 2)
cv.line(image, landmark_point[80], landmark_point[191], (0, 255, 0), 2)
cv.line(image, landmark_point[191], landmark_point[78], (0, 255, 0), 2)
cv.line(image, landmark_point[78], landmark_point[95], (0, 255, 0), 2)
cv.line(image, landmark_point[95], landmark_point[88], (0, 255, 0), 2)
cv.line(image, landmark_point[88], landmark_point[178], (0, 255, 0), 2)
cv.line(image, landmark_point[178], landmark_point[87], (0, 255, 0), 2)
cv.line(image, landmark_point[87], landmark_point[14], (0, 255, 0), 2)
cv.line(image, landmark_point[14], landmark_point[317], (0, 255, 0), 2)
cv.line(image, landmark_point[317], landmark_point[402], (0, 255, 0),
2)
cv.line(image, landmark_point[402], landmark_point[318], (0, 255, 0),
2)
cv.line(image, landmark_point[318], landmark_point[324], (0, 255, 0),
2)
cv.line(image, landmark_point[324], landmark_point[308], (0, 255, 0),
2)
return image
def draw_bounding_rect(use_brect, image, brect):
if use_brect:
# 外接矩形
cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[3]),
(0, 255, 0), 2)
return image
if __name__ == '__main__':
main()
| 38.588679
| 88
| 0.552415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,336
| 0.127798
|
ac6f3083292de976db6a89e3601228fd50986b48
| 1,413
|
py
|
Python
|
andromeda/modules/loans/views/inventory_loans.py
|
sango09/andromeda_api_rest
|
b4a3267146f4f9a985fb3f512e652d4ff354bba2
|
[
"MIT"
] | 1
|
2021-09-08T18:58:16.000Z
|
2021-09-08T18:58:16.000Z
|
andromeda/modules/loans/views/inventory_loans.py
|
sango09/andromeda_api_rest
|
b4a3267146f4f9a985fb3f512e652d4ff354bba2
|
[
"MIT"
] | null | null | null |
andromeda/modules/loans/views/inventory_loans.py
|
sango09/andromeda_api_rest
|
b4a3267146f4f9a985fb3f512e652d4ff354bba2
|
[
"MIT"
] | null | null | null |
"""Vista del inventario del modulo de prestamos tecnologicos."""
# Django REST Framework
from rest_framework import viewsets, mixins
# Permisos
from rest_framework.permissions import IsAuthenticated
from andromeda.modules.inventory.permissions import IsAdmin, IsStaff
# Modelos
from andromeda.modules.loans.models import InventoryLoans
# Serializers
from andromeda.modules.loans.serializers import InventoryLoansSerializer, CreateInventoryLoansSerializer
class InventoryLoansViewSet(mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
"""View set del inventario para el modulo de prestamos tecnologicos."""
queryset = InventoryLoans.objects.all()
def get_permissions(self):
"""Asigna los permisos basados en la acción."""
permissions = [IsAuthenticated]
if self.action in ['destroy']:
permissions.append(IsAdmin)
elif self.action in ['update', 'partial_update']:
permissions.append(IsStaff)
return (p() for p in permissions)
def get_serializer_class(self):
"""Asigna el serializer basado en la acción."""
if self.action == 'create':
return CreateInventoryLoansSerializer
return InventoryLoansSerializer
| 36.230769
| 104
| 0.685775
| 954
| 0.674205
| 0
| 0
| 0
| 0
| 0
| 0
| 327
| 0.231095
|
ac6fc1b210632046a04f35464d2d89383a795143
| 876
|
py
|
Python
|
N-Gram/PlotUtils.py
|
FindTheTruth/Natural-Language-Processing
|
a52c777e505dd5ccd9f892fbf98ba50d4c29b31b
|
[
"Apache-2.0"
] | 1
|
2022-03-23T09:26:59.000Z
|
2022-03-23T09:26:59.000Z
|
N-Gram/PlotUtils.py
|
FindTheTruth/Natural-Language-Processing
|
a52c777e505dd5ccd9f892fbf98ba50d4c29b31b
|
[
"Apache-2.0"
] | null | null | null |
N-Gram/PlotUtils.py
|
FindTheTruth/Natural-Language-Processing
|
a52c777e505dd5ccd9f892fbf98ba50d4c29b31b
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib.pyplot as plt
x = ["N=1", "N=2", "N=3", "N=4", "N=5","N=6"]
y = [0.9365, 0.9865, 0.9895, 0.9950,0.9880,0.9615]
rects = plt.barh(x, y, color=["red", "blue", "purple", "violet", "green", "black"])
for rect in rects: # rects 是三根柱子的集合
width = rect.get_width()
print(width)
plt.text(width, rect.get_y() + rect.get_height() / 2, str(width), size=10)
plt.xlim(0.0,1.3)
# plt.legend()
plt.show()
x = ["k=1e-5","k=1e-4", "k=1e-3", "k=1e-2", "k=1e-1", "k=1.0"]
y = [0.9895, 0.9900, 0.9950, 0.9885,0.9740,0.831]
# y = [0.9365, 0.9865, 0.9895, 0.9950,0.9880,0.9615]
rects = plt.barh(x, y, color=["red", "blue", "purple", "violet", "green", "black"])
for rect in rects: # rects 是三根柱子的集合
width = rect.get_width()
print(width)
plt.text(width, rect.get_y() + rect.get_height() / 2, str(width), size=10)
plt.xlim(0.0,1.3)
# plt.legend()
plt.show()
| 36.5
| 83
| 0.589041
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 303
| 0.3337
|
ac70a851d5c96469acf3749c8c769ab79086f7dc
| 226
|
py
|
Python
|
tests/test_logger.py
|
sp-95/python-template
|
3d4fab175314fe2b200e77c7c71c464e897749b9
|
[
"MIT"
] | null | null | null |
tests/test_logger.py
|
sp-95/python-template
|
3d4fab175314fe2b200e77c7c71c464e897749b9
|
[
"MIT"
] | null | null | null |
tests/test_logger.py
|
sp-95/python-template
|
3d4fab175314fe2b200e77c7c71c464e897749b9
|
[
"MIT"
] | null | null | null |
from _pytest.logging import LogCaptureFixture
from loguru import logger
def test_log_configuration(caplog: LogCaptureFixture) -> None:
message = "Test message"
logger.info(message)
assert message in caplog.text
| 22.6
| 62
| 0.774336
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 14
| 0.061947
|
ac714ac75d2b71ae4b1604d302d1232df43b166e
| 142
|
py
|
Python
|
Semana 09/fase.py
|
heltonricardo/grupo-estudos-maratonas-programacao
|
0c07d84a900858616647d07574ec56b0533cddfb
|
[
"MIT"
] | null | null | null |
Semana 09/fase.py
|
heltonricardo/grupo-estudos-maratonas-programacao
|
0c07d84a900858616647d07574ec56b0533cddfb
|
[
"MIT"
] | null | null | null |
Semana 09/fase.py
|
heltonricardo/grupo-estudos-maratonas-programacao
|
0c07d84a900858616647d07574ec56b0533cddfb
|
[
"MIT"
] | null | null | null |
n, k, v = int(input()), int(input()), []
for i in range(n): v.append(int(input()))
v = sorted(v, reverse=True)
print(k + v[k:].count(v[k-1]))
| 28.4
| 41
| 0.570423
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
ac715495b2bb97e43a63daf0a85ca3c192ec09c8
| 1,160
|
py
|
Python
|
02_crowsnest/ragz_crowsnest.py
|
zrucker/tiny_python_projects
|
7760c9db2e89640c6485e01891a0022927a46c3b
|
[
"MIT"
] | null | null | null |
02_crowsnest/ragz_crowsnest.py
|
zrucker/tiny_python_projects
|
7760c9db2e89640c6485e01891a0022927a46c3b
|
[
"MIT"
] | null | null | null |
02_crowsnest/ragz_crowsnest.py
|
zrucker/tiny_python_projects
|
7760c9db2e89640c6485e01891a0022927a46c3b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Date : 2021-09-06
Purpose: learning to work with strings
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Crow\'s Nest -- choose the correct article',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('word',
metavar='word',
help='A word')
return parser.parse_args()
# --------------------------------------------------
def get_article(user_input):
"""Determine which article to use"""
# vowels = ['a', 'e', 'i', 'o', 'u']
if user_input[0] in 'aeiouAEIOU':
solution = "an"
else:
solution = "a"
return solution
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
word = args.word
article = get_article(word)
print("Ahoy, Captain, {} {} off the larboard bow!".format(article, word))
# --------------------------------------------------
if __name__ == '__main__':
main()
| 21.481481
| 77
| 0.483621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 568
| 0.489655
|
ac71f74611bb270f9befb24fa9ac317b196927ef
| 9,397
|
py
|
Python
|
lvsfunc/render.py
|
End-of-Eternity/lvsfunc
|
1c9ff7f1d9731378536fb428f077075285e25843
|
[
"MIT"
] | null | null | null |
lvsfunc/render.py
|
End-of-Eternity/lvsfunc
|
1c9ff7f1d9731378536fb428f077075285e25843
|
[
"MIT"
] | null | null | null |
lvsfunc/render.py
|
End-of-Eternity/lvsfunc
|
1c9ff7f1d9731378536fb428f077075285e25843
|
[
"MIT"
] | null | null | null |
"""
Clip rendering helpers.
"""
import vapoursynth as vs
from enum import Enum
from threading import Condition
from typing import BinaryIO, Callable, Dict, List, Optional, TextIO, Union
from concurrent.futures import Future
from functools import partial
from .progress import Progress, BarColumn, FPSColumn, TextColumn, TimeRemainingColumn
from .util import get_prop
core = vs.core
RenderCallback = Callable[[int, vs.VideoFrame], None]
class RenderContext:
"""
Contains info on the current render operation.
"""
clip: vs.VideoNode
queued: int
frames: Dict[int, vs.VideoFrame]
frames_rendered: int
timecodes: List[float]
condition: Condition
def __init__(self, clip: vs.VideoNode, queued: int) -> None:
self.clip = clip
self.queued = queued
self.frames = {}
self.frames_rendered = 0
self.timecodes = [0.0]
self.condition = Condition()
def finish_frame(outfile: Optional[BinaryIO], timecodes: Optional[TextIO], ctx: RenderContext) -> None:
"""
Output a frame.
:param outfile: Output IO handle for Y4MPEG
:param timecodes: Output IO handle for timecodesv2
:param ctx: Rendering context
"""
if timecodes:
timecodes.write(f"{round(ctx.timecodes[ctx.frames_rendered]*1000):d}\n")
if outfile is None:
return
f: vs.VideoFrame = ctx.frames[ctx.frames_rendered]
outfile.write("FRAME\n".encode("utf-8"))
for i, p in enumerate(f.planes()):
if f.get_stride(i) != p.width * f.format.bytes_per_sample:
outfile.write(bytes(p)) # type: ignore
else:
outfile.write(p) # type: ignore
def clip_async_render(clip: vs.VideoNode,
outfile: Optional[BinaryIO] = None,
timecodes: Optional[TextIO] = None,
progress: Optional[str] = "Rendering clip...",
callback: Union[RenderCallback, List[RenderCallback], None] = None) -> List[float]:
"""
Render a clip by requesting frames asynchronously using clip.get_frame_async,
providing for callback with frame number and frame object.
This is mostly a re-implementation of VideoNode.output, but a little bit slower since it's pure python.
You only really need this when you want to render a clip while operating on each frame in order
or you want timecodes without using vspipe.
:param clip: Clip to render.
:param outfile: Y4MPEG render output BinaryIO handle. If None, no Y4M output is performed.
Use ``sys.stdout.buffer`` for stdout. (Default: None)
:param timecodes: Timecode v2 file TextIO handle. If None, timecodes will not be written.
:param progress: String to use for render progress display.
If empty or ``None``, no progress display.
:param callback: Single or list of callbacks to be preformed. The callbacks are called
when each sequential frame is output, not when each frame is done.
Must have signature ``Callable[[int, vs.VideoNode], None]``
See :py:func:`lvsfunc.comparison.diff` for a use case (Default: None).
:return: List of timecodes from rendered clip.
"""
cbl = [] if callback is None else callback if isinstance(callback, list) else [callback]
if progress:
p = get_render_progress()
task = p.add_task(progress, total=clip.num_frames)
def _progress_cb(n: int, f: vs.VideoFrame) -> None:
p.update(task, advance=1)
cbl.append(_progress_cb)
ctx = RenderContext(clip, core.num_threads)
bad_timecodes: bool = False
def cb(f: Future[vs.VideoFrame], n: int) -> None:
ctx.frames[n] = f.result()
nn = ctx.queued
while ctx.frames_rendered in ctx.frames:
nonlocal timecodes
nonlocal bad_timecodes
frame = ctx.frames[ctx.frames_rendered]
# if a frame is missing timing info, clear timecodes because they're worthless
if ("_DurationNum" not in frame.props or "_DurationDen" not in frame.props) and not bad_timecodes:
bad_timecodes = True
if timecodes:
timecodes.seek(0)
timecodes.truncate()
timecodes = None
ctx.timecodes = []
print("clip_async_render: frame missing duration information, discarding timecodes")
elif not bad_timecodes:
ctx.timecodes.append(ctx.timecodes[-1]
+ get_prop(frame, "_DurationNum", int)
/ get_prop(frame, "_DurationDen", int))
finish_frame(outfile, timecodes, ctx)
[cb(ctx.frames_rendered, ctx.frames[ctx.frames_rendered]) for cb in cbl]
del ctx.frames[ctx.frames_rendered] # tfw no infinite memory
ctx.frames_rendered += 1
# enqueue a new frame
if nn < clip.num_frames:
ctx.queued += 1
cbp = partial(cb, n=nn)
clip.get_frame_async(nn).add_done_callback(cbp) # type: ignore
ctx.condition.acquire()
ctx.condition.notify()
ctx.condition.release()
if outfile:
if clip.format is None:
raise ValueError("clip_async_render: 'Cannot render a variable format clip to y4m!'")
if clip.format.color_family not in (vs.YUV, vs.GRAY):
raise ValueError("clip_async_render: 'Can only render YUV and GRAY clips to y4m!'")
if clip.format.color_family == vs.GRAY:
y4mformat = "mono"
else:
ss = (clip.format.subsampling_w, clip.format.subsampling_h)
if ss == (1, 1):
y4mformat = "420"
elif ss == (1, 0):
y4mformat = "422"
elif ss == (0, 0):
y4mformat = "444"
elif ss == (2, 2):
y4mformat = "410"
elif ss == (2, 0):
y4mformat = "411"
elif ss == (0, 1):
y4mformat = "440"
else:
raise ValueError("clip_async_render: 'What have you done'")
y4mformat = f"{y4mformat}p{clip.format.bits_per_sample}" if clip.format.bits_per_sample > 8 else y4mformat
header = f"YUV4MPEG2 C{y4mformat} W{clip.width} H{clip.height} F{clip.fps_num}:{clip.fps_den} Ip A0:0\n"
outfile.write(header.encode("utf-8"))
if timecodes:
timecodes.write("# timestamp format v2\n")
ctx.condition.acquire()
# seed threads
if progress:
p.start()
try:
for n in range(min(clip.num_frames, core.num_threads)):
cbp = partial(cb, n=n) # lambda won't bind the int immediately
clip.get_frame_async(n).add_done_callback(cbp) # type: ignore
while ctx.frames_rendered != clip.num_frames:
ctx.condition.wait()
finally:
if progress:
p.stop()
return ctx.timecodes # might as well
def get_render_progress() -> Progress:
return Progress(
TextColumn("{task.description}"),
BarColumn(),
TextColumn("{task.completed}/{task.total}"),
TextColumn("{task.percentage:>3.02f}%"),
FPSColumn(),
TimeRemainingColumn(),
)
class SceneChangeMode(Enum):
WWXD = 0
SCXVID = 1
WWXD_SCXVID_UNION = 2
WWXD_SCXVID_INTERSECTION = 3
def find_scene_changes(clip: vs.VideoNode, mode: SceneChangeMode = SceneChangeMode.WWXD) -> List[int]:
"""
Generate a list of scene changes (keyframes).
Dependencies:
* vapoursynth-wwxd
* vapoursynth-scxvid (Optional: scxvid mode)
:param clip: Clip to search for scene changes. Will be rendered in its entirety.
:param mode: Scene change detection mode:
* WWXD: Use wwxd
* SCXVID: Use scxvid
* WWXD_SCXVID_UNION: Union of wwxd and sxcvid (must be detected by at least one)
* WWXD_SCXVID_INTERSECTION: Intersection of wwxd and scxvid (must be detected by both)
:return: List of scene changes.
"""
frames = []
clip = clip.resize.Bilinear(640, 360, format=vs.YUV420P8)
if mode in (SceneChangeMode.WWXD, SceneChangeMode.WWXD_SCXVID_UNION, SceneChangeMode.WWXD_SCXVID_INTERSECTION):
clip = clip.wwxd.WWXD()
if mode in (SceneChangeMode.SCXVID, SceneChangeMode.WWXD_SCXVID_UNION, SceneChangeMode.WWXD_SCXVID_INTERSECTION):
clip = clip.scxvid.Scxvid()
def _cb(n: int, f: vs.VideoFrame) -> None:
if mode == SceneChangeMode.WWXD:
if get_prop(f, "Scenechange", int) == 1:
frames.append(n)
elif mode == SceneChangeMode.SCXVID:
if get_prop(f, "_SceneChangePrev", int) == 1:
frames.append(n)
elif mode == SceneChangeMode.WWXD_SCXVID_UNION:
if get_prop(f, "Scenechange", int) == 1 or get_prop(f, "_SceneChangePrev", int) == 1:
frames.append(n)
elif mode == SceneChangeMode.WWXD_SCXVID_INTERSECTION:
if get_prop(f, "Scenechange", int) == 1 and get_prop(f, "_SceneChangePrev", int) == 1:
frames.append(n)
clip_async_render(clip, progress="Detecting scene changes...", callback=_cb)
return sorted(frames)
| 36.85098
| 117
| 0.612536
| 601
| 0.063957
| 0
| 0
| 0
| 0
| 0
| 0
| 3,200
| 0.340534
|
ac72f9700e343b6945d908397ce48596d4d77b7e
| 1,689
|
py
|
Python
|
geekjobs/forms.py
|
paconte/geekjobs
|
4f5f72c9a08dd4e7bf58dc68364dedce9c248c3e
|
[
"MIT"
] | null | null | null |
geekjobs/forms.py
|
paconte/geekjobs
|
4f5f72c9a08dd4e7bf58dc68364dedce9c248c3e
|
[
"MIT"
] | null | null | null |
geekjobs/forms.py
|
paconte/geekjobs
|
4f5f72c9a08dd4e7bf58dc68364dedce9c248c3e
|
[
"MIT"
] | 1
|
2019-09-14T21:59:18.000Z
|
2019-09-14T21:59:18.000Z
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from geekjobs.models import Job
"""
class JobForm(forms.Form):
title = forms.CharField(label='Job title', max_length=380)
city = forms.CharField(label='City', max_length=100, required=False)
state = forms.ChoiceField(label='State', choices=DE_STATE_CHOICES)
remote = forms.BooleanField(label='Remote', required=False)
salary = forms.CharField(label='Salary', max_length=100, required=False)
description = forms.CharField(label='Job Description', max_length=10000)
description.widget = forms.HiddenInput()
instructions = forms.CharField(label='How do people apply for this job?', max_length=380)
instructions.widget = forms.Textarea(attrs={'rows': 3})
name = forms.CharField(label='Company Name', max_length=100)
url = forms.CharField(label='Job URL', max_length=150)
email = forms.EmailField(label='Email')
"""
class JobForm(forms.ModelForm):
class Meta:
model = Job
fields = ('title', 'city', 'state', 'remote', 'salary', 'description', 'instructions', 'name', 'url', 'email')
widgets = {
'description': forms.HiddenInput,
'instructions': forms.Textarea(attrs={'rows': 3})
}
labels = {
'title': _('Job Title'),
'city': _('City'),
'state': _('State'),
'remote': _('Remote'),
'salary': _('Salary'),
'description': _('Job Description'),
'instructions': _('How do people apply for this job?'),
'name': _('Company Name'),
'url': _('Job URL'),
'email': _('Email')
}
| 40.214286
| 118
| 0.616341
| 745
| 0.441089
| 0
| 0
| 0
| 0
| 0
| 0
| 1,143
| 0.676732
|
ac7444482c933dd6646be8739b9a9cde05c17711
| 1,133
|
py
|
Python
|
solutions/day3/solution.py
|
JavierLuna/advent-of-code-2020
|
57429a7973446472fffb07dc770f260160407f0c
|
[
"MIT"
] | 1
|
2020-12-03T08:57:20.000Z
|
2020-12-03T08:57:20.000Z
|
solutions/day3/solution.py
|
JavierLuna/advent-of-code-2020
|
57429a7973446472fffb07dc770f260160407f0c
|
[
"MIT"
] | null | null | null |
solutions/day3/solution.py
|
JavierLuna/advent-of-code-2020
|
57429a7973446472fffb07dc770f260160407f0c
|
[
"MIT"
] | null | null | null |
import math
from typing import Tuple, Set
from solutions.runner.base_solution import BaseSolution
from solutions.runner.readers.base_reader import BaseReader
TREE = "#"
class Day3Reader(BaseReader):
@staticmethod
def transform_raw_line(line: str):
line = line.strip()
return line
class Day3Solution(BaseSolution):
__reader__ = Day3Reader
def solve_first(self):
return self._count_trees(self.input_data, 1, 3)
def _count_trees(self, input_data: str, down: int, right: int):
tree_count = 0
x_position = 0
for i, line in enumerate(input_data):
if i % down != 0:
continue
if line[x_position] == TREE:
tree_count += 1
x_position += right
x_position = x_position % len(line)
return tree_count
def solve_second(self):
combos = [
(1, 1),
(1, 3),
(1, 5),
(1, 7),
(2, 1)
]
input_data = self.input_data
return math.prod([self._count_trees(input_data, *combo) for combo in combos])
| 24.106383
| 85
| 0.579876
| 955
| 0.842895
| 0
| 0
| 100
| 0.088261
| 0
| 0
| 3
| 0.002648
|
ac74f61ecc9c4ba33c87adf303e2474d5cc2e06f
| 10,003
|
py
|
Python
|
lib/core_tools/tools.py
|
rocketcapital-ai/competition_submission
|
bb9663dfe17733cc7de841f48e9e2770d911c599
|
[
"MIT"
] | 1
|
2022-02-12T08:38:42.000Z
|
2022-02-12T08:38:42.000Z
|
lib/core_tools/tools.py
|
rocketcapital-ai/competition_submission
|
bb9663dfe17733cc7de841f48e9e2770d911c599
|
[
"MIT"
] | null | null | null |
lib/core_tools/tools.py
|
rocketcapital-ai/competition_submission
|
bb9663dfe17733cc7de841f48e9e2770d911c599
|
[
"MIT"
] | null | null | null |
import base58
import datetime
import json
import os
import pandas as pd
import requests
import shutil
import time
import web3
import yaml
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.PublicKey import RSA
from Crypto.Random import get_random_bytes
from decimal import Decimal
from typing import Any, Callable
from web3 import types
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
CFG_DIR = os.path.abspath('{}//..//..//cfg_files'.format(CURRENT_DIR))
with open("{}//cfg.yml".format(CFG_DIR), "r") as config_file:
CFG = yaml.safe_load(config_file)
TOKEN_ADDRESS = CFG['LIVE']['TOKEN']
COMPETITION_ADDRESS = CFG['LIVE']['COMPETITION']
SUBMISSION_DIRECTORY = os.path.abspath('{}//..//..//{}'.format(CURRENT_DIR, CFG['SUBMISSION_FOLDER_NAME']))
ENCRYPTED_SUBMISSIONS_DIRECTORY = os.path.abspath('{}//..//..//{}'.format(CURRENT_DIR, CFG['ENCRYPTED_SUBMISSIONS']))
class GasPriceMode:
standard = 'standardgaspricegwei'
fast = 'fastgaspricegwei'
rapid = 'rapidgaspricegwei'
def cid_to_hash(cid: str) -> str:
res = base58.b58decode(cid).hex()
return res[4:]
def decimal_to_uint(decimal_value: Decimal or float or int, decimal_places=18) -> int:
return int(Decimal('{}e{}'.format(decimal_value, decimal_places)))
def decrypt_file(file_name: str, decrypt_key_file: str, decrypted_file_name=None, verbose=False) -> str:
with open(decrypt_key_file, 'rb') as key_f:
decrypted_key = key_f.read()
with open(file_name, 'rb') as enc_f:
key = enc_f.read()
nonce = key[:16]
ciphertext = key[16:-16]
tag = key[-16:]
cipher = AES.new(decrypted_key, AES.MODE_GCM, nonce)
decrypted_data = cipher.decrypt_and_verify(ciphertext, tag)
if decrypted_file_name == None: decrypted_file_name = file_name.split('.')[0] + '_decrypted.csv'
with open(decrypted_file_name, 'wb') as dec_f:
dec_f.write(decrypted_data)
if verbose:
print('Decrypted predictions file saved to {}.'.format(decrypted_file_name))
return decrypted_file_name
def encrypt_csv(file_name: str, submitter_address: str, public_key: RSA.RsaKey) -> (str, bytes):
symmetric_key = get_random_bytes(16)
new_submission_dir = '{}//{}'.format(ENCRYPTED_SUBMISSIONS_DIRECTORY,
datetime.datetime.now().strftime('%Y-%m-%d_%Hh%Mm%Ss'))
os.makedirs(new_submission_dir, exist_ok=False)
if file_name.split('.')[-1] != 'csv':
assert False, 'Please input a .csv file.'
# Encrypt and save predictions file.
cipher = AES.new(symmetric_key, AES.MODE_GCM)
with open('{}//{}'.format(SUBMISSION_DIRECTORY, file_name), 'rb') as f:
ciphertext, tag = cipher.encrypt_and_digest(f.read())
encrypted_predictions_path = '{}//{}.bin'.format(new_submission_dir, 'encrypted_predictions')
with open(encrypted_predictions_path, 'wb') as encrypted_predictions_file:
for x in (cipher.nonce, ciphertext, tag):
encrypted_predictions_file.write(x)
# Encrypt and save originator file.
cipher = AES.new(symmetric_key, AES.MODE_GCM)
submitter_address = web3.Web3.toChecksumAddress(submitter_address)
ciphertext, tag = cipher.encrypt_and_digest(bytes(submitter_address, 'utf-8'))
encrypted_originator_path = '{}//{}.bin'.format(new_submission_dir, 'originator')
with open(encrypted_originator_path, 'wb') as encrypted_originator_file:
for x in (cipher.nonce, ciphertext, tag):
encrypted_originator_file.write(x)
# Encrypt and save symmetric key using Competition public key for this challenge.
cipher = PKCS1_OAEP.new(public_key)
encrypted_symmetric_key = cipher.encrypt(symmetric_key)
encrypted_symmetric_key_path = '{}//{}.pem'.format(new_submission_dir, 'encrypted_symmetric_key')
with open(encrypted_symmetric_key_path, 'wb') as encrypted_symmetric_key_file:
encrypted_symmetric_key_file.write(encrypted_symmetric_key)
return new_submission_dir, symmetric_key
def get_avg_gas_price_in_gwei(mode=GasPriceMode.fast, retry_seconds=3, num_retries=10) -> int:
for tries in range(num_retries):
try:
result = requests.get(CFG['GAS_PRICE_URL'], timeout=CFG['REQUESTS_TIMEOUT']).json()['result']
avg_gas_price_in_gwei = result[mode]
base_gas_price_in_gwei = get_base_gas_price_in_gwei()
if avg_gas_price_in_gwei < (base_gas_price_in_gwei * 1.13):
continue
return avg_gas_price_in_gwei
except Exception as e:
if tries == num_retries - 1:
try:
assert False, 'Response\n{}\n\nSystem Error\n{}'.format(result, e)
except Exception as e:
assert False, 'Unspecified error.\n{}'.format(e)
time.sleep(retry_seconds)
def get_base_gas_price_in_gwei() -> int:
base_gas_wei_hex = network_read(['pending', False], 'eth_getBlockByNumber')['baseFeePerGas']
base_gas_wei = int(base_gas_wei_hex, 16)
base_gas_gwei = decimal_to_uint(base_gas_wei, -9)
return base_gas_gwei
def hash_to_cid(hash_obj: bytes or bytearray or str) -> str:
if isinstance(hash_obj, (bytes, bytearray)): hash_obj = hash_obj.hex()
hash_obj = '1220' + str(hash_obj)
hash_obj = int(hash_obj, 16)
return base58.b58encode_int(hash_obj).decode('utf-8')
def network_read(params: [Any], method="eth_call", retry_seconds=3, num_retries=10) -> str:
payload = {"jsonrpc": "2.0", "method": method, "params": params, "id": 1}
headers = {"Content-Type": "application/json"}
for retries in range(num_retries):
r = requests.post(CFG['POLYGON_GATEWAY'], headers=headers, json=payload, timeout=CFG['REQUESTS_TIMEOUT'])
if r.ok:
keys = r.json().keys()
if "result" in keys:
return r.json()["result"]
elif "error" in keys:
assert False, r.json()["error"]["message"]
else:
assert False, "Unspecified network error."
else:
time.sleep(retry_seconds)
assert False, "network read exceeded max retries. Please try again later."
def pin_file_to_ipfs(filename: str, jwt: str, cid_version=0, verbose=False, retry_seconds=3, num_retries=10) -> str:
url = '{}/{}'.format(CFG['IPFS_API_URL'], 'pinning/pinFileToIPFS')
headers = {"Authorization": "Bearer " + jwt}
for tries in range(num_retries):
try:
with open(filename, 'rb') as f:
files = {"file": f}
params = {"cidVersion": cid_version}
response = requests.post(url, headers=headers, files=files, params=params)
if verbose:
print('Pinned payload with size {} bytes to {} at {}.'.format(
response['PinSize'], response['IpfsHash'], response['Timestamp']))
return response.json()['IpfsHash']
except Exception as e:
if tries == num_retries - 1:
assert False, 'File could not be uploaded and pinned to IPFS. Please try again later or contact {} for support.'.format(
CFG['SUPPORT_EMAIL'])
time.sleep(retry_seconds)
def retrieve_file(cid, destination=None, retry_seconds=3, num_retries=10):
content = retrieve_content(cid, retry_seconds, num_retries)
with open(destination, 'wb') as f:
f.write(content)
return destination
def retrieve_content(cid, retry_seconds=3, num_retries=10):
for tries in range(num_retries):
try:
requests.get('{}/{}'.format(CFG['IPFS_GATEWAY'], CFG['IPFS_DEFAULT']), timeout=CFG['REQUESTS_TIMEOUT'])
r = requests.get('{}/{}'.format(CFG['IPFS_GATEWAY'], cid), timeout=CFG['REQUESTS_TIMEOUT'])
return r.content
except Exception as e:
print(e)
if tries == num_retries - 1:
assert False, 'File could not be retrieved. Please try again later or contact {} for support.'.format(
CFG['SUPPORT_EMAIL'])
time.sleep(retry_seconds)
def send_transaction(w3: web3.Web3, controlling_account, method: Callable, args: list, gas_price_in_wei: int, verbose=True) -> web3.types.TxReceipt:
assert controlling_account is not None, 'Private key required to send blockchain transactions.'
tx_data = method(*args).buildTransaction({
'from': controlling_account.address,
'maxFeePerGas': hex(gas_price_in_wei),
'nonce': w3.eth.getTransactionCount(controlling_account.address)
})
signed_tx = w3.eth.account.sign_transaction(tx_data, controlling_account.privateKey)
tx_id = w3.eth.send_raw_transaction(signed_tx.rawTransaction)
if verbose:
print('Sending transaction {}'.format(tx_id.hex()))
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_id, CFG['W3_TIMEOUT'], CFG['W3_INTERVAL'])
if verbose:
print('Transaction sent. Tx ID: {}'.format(tx_id.hex()))
return tx_receipt
def set_gas_price_in_gwei(gas_price_in_gwei=None, verbose=True) -> int:
if gas_price_in_gwei is None:
gas_price_in_gwei = get_avg_gas_price_in_gwei()
elif type(gas_price_in_gwei) is str:
gas_price_in_gwei = get_avg_gas_price_in_gwei(gas_price_in_gwei)
if verbose:
print('Setting gas price to {} gwei.'.format(gas_price_in_gwei))
gas_price_in_wei = decimal_to_uint(gas_price_in_gwei, 9)
return gas_price_in_wei
def uint_to_decimal(uint_value: int, decimal_places=18) -> Decimal:
if uint_value == 0:
return Decimal(0)
return Decimal('{}e-{}'.format(uint_value, decimal_places))
def unzip_dir(zippedFile: str, extractDest: str, verbose=False) -> str:
shutil.unpack_archive(zippedFile, extractDest)
if verbose:
print('Data unzipped to {}.'.format(extractDest))
return extractDest
def zip_file(file_path: str, dest=None) -> str:
if dest is None:
dest = file_path
return shutil.make_archive(dest, 'zip', file_path)
| 41.853556
| 148
| 0.674298
| 119
| 0.011896
| 0
| 0
| 0
| 0
| 0
| 0
| 1,734
| 0.173348
|
ac75e12c17c4b689ad5e95e21f9b92a7a82c808e
| 2,108
|
py
|
Python
|
my_loc/__init__.py
|
PIYUSH-GEEK/my_loc
|
777eeefec3bc29f03c1be956037c10bf8457dfc9
|
[
"MIT"
] | 1
|
2019-08-18T07:06:36.000Z
|
2019-08-18T07:06:36.000Z
|
my_loc/__init__.py
|
PIYUSH-GEEK/my_loc
|
777eeefec3bc29f03c1be956037c10bf8457dfc9
|
[
"MIT"
] | null | null | null |
my_loc/__init__.py
|
PIYUSH-GEEK/my_loc
|
777eeefec3bc29f03c1be956037c10bf8457dfc9
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
def locate(i):
my_loc = requests.get('https://ipinfo.io').json()
if str(i).isdigit():
return 'Invalid input. Try a string.'
else:
if i == 'ip':
return my_loc['ip']
elif i == 'city' or i == 'capital':
return my_loc['city']
elif i == 'region' or i == 'state':
return my_loc['region']
elif i == 'country':
return my_loc['country']
elif i == 'loc':
return my_loc['loc']
elif i == 'org':
return my_loc['org']
elif i == 'postal' or i == 'pin':
return my_loc['postal']
elif i == 'readme':
return my_loc['readme']
def lat(input):
if input == 'Gomia' or input == 'gomia':
return 'Try \'gumia\' or \'Gumia\''
try:
page_get = requests.get('https://www.latlong.net/search.php?keyword=%s'%input)
page_content = BeautifulSoup(page_get.content, 'html.parser')
tr1 = page_content.find_all('tr')[1]
td1 = tr1 .find_all('td')[0].text
td2 = tr1 .find_all('td')[1].text
print('Place: ',td1)
return td2
except IndexError as Error:
return 'Search for some other place.'
def lng(input):
if input == 'Gomia' or input == 'gomia':
return 'Try \'gumia\' or \'Gumia\''
try:
page_get = requests.get('https://www.latlong.net/search.php?keyword=%s'%input)
page_content = BeautifulSoup(page_get.content, 'html.parser')
tr1 = page_content.find_all('tr')[1]
td1 = tr1 .find_all('td')[0].text
td3 = tr1 .find_all('td')[2].text
print('Place: ',td1)
return td3
except IndexError as Error:
return 'Search for some other place.'
def ltlng(input):
if input == 'Gomia' or input == 'gomia':
return 'Try \'gumia\' or \'Gumia\''
try:
page_get = requests.get('https://www.latlong.net/search.php?keyword=%s'%input)
page_content = BeautifulSoup(page_get.content, 'html.parser')
tr1 = page_content.find_all('tr')[1]
td1 = tr1 .find_all('td')[0].text
td2 = tr1 .find_all('td')[1].text
td3 = tr1 .find_all('td')[2].text
print('Place: ',td1)
return td2, td3
except IndexError as Error:
return 'Search for some other place.'
| 22.913043
| 80
| 0.624763
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 639
| 0.303131
|
ac7655238bec5c95a9a31d91cc90421f9c35aee8
| 1,045
|
py
|
Python
|
Python3/03_Longest_Substring_Without_Repeating_Characters.py
|
yangjiahao106/LeetCode
|
c30ba0ef06f444951f7ab8eee495ac43613d7f4f
|
[
"RSA-MD"
] | 1
|
2018-04-28T09:07:11.000Z
|
2018-04-28T09:07:11.000Z
|
Python3/03_Longest_Substring_Without_Repeating_Characters.py
|
yangjiahao106/LeetCode
|
c30ba0ef06f444951f7ab8eee495ac43613d7f4f
|
[
"RSA-MD"
] | 1
|
2018-02-24T16:26:30.000Z
|
2018-02-24T16:26:44.000Z
|
Python3/03_Longest_Substring_Without_Repeating_Characters.py
|
yangjiahao106/LeetCode
|
c30ba0ef06f444951f7ab8eee495ac43613d7f4f
|
[
"RSA-MD"
] | null | null | null |
#! python3
# __author__ = "YangJiaHao"
# date: 2018/1/26
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
d = dict()
l, r = 0, 0
res = 0
while r < len(s):
if s[r] not in d:
d[s[r]] = None
r += 1
res = max(res, r - l)
else:
del d[s[l]]
l += 1
return res
class Solution2:
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
lookup = {}
offset = -1
longest = 0
for idx, char in enumerate(s):
if char in lookup:
if offset < lookup[char]:
offset = lookup[char]
lookup[char] = idx
length = idx - offset
if length > longest:
longest = length
return longest
if __name__ == '__main__':
solution = Solution()
theMax = solution.lengthOfLongestSubstring("aaaabc")
print(theMax)
| 23.222222
| 56
| 0.453589
| 853
| 0.816268
| 0
| 0
| 0
| 0
| 0
| 0
| 128
| 0.122488
|
ac78bb16539f516e7b674abcc8a2e5d7d2d059dd
| 2,636
|
py
|
Python
|
youwol/backends/cdn/resources_initialization.py
|
youwol/py-youwol
|
85a8877e302c9da1aea168bf1d964d19036c1134
|
[
"MIT"
] | null | null | null |
youwol/backends/cdn/resources_initialization.py
|
youwol/py-youwol
|
85a8877e302c9da1aea168bf1d964d19036c1134
|
[
"MIT"
] | 1
|
2022-03-14T09:40:15.000Z
|
2022-03-14T09:40:15.000Z
|
youwol/backends/cdn/resources_initialization.py
|
youwol/py-youwol
|
85a8877e302c9da1aea168bf1d964d19036c1134
|
[
"MIT"
] | null | null | null |
import asyncio
import os
from youwol_utils import WhereClause, QueryBody, Query, Path, flatten
from .configurations import Configuration
from .utils import format_download_form, post_storage_by_chunk, md5_from_folder
from .utils_indexing import format_doc_db_record, post_indexes, get_version_number_str
async def init_resources(config: Configuration):
print("### Ensure database resources ###")
headers = await config.admin_headers if config.admin_headers else {}
doc_db = config.doc_db
storage = config.storage
table_ok, bucket_ok = await asyncio.gather(
doc_db.ensure_table(headers=headers),
storage.ensure_bucket(headers=headers)
)
if bucket_ok and not table_ok:
print("Need to re-index stuffs of bucket")
raise Exception("The table index is not up-to-date w/ bucket content, manual index-synchronisation needed")
clauses = [[WhereClause(column="library_name", relation="eq", term=lib.split("#")[0]),
WhereClause(column="version_number", relation="eq", term=get_version_number_str(lib.split("#")[1]))
]
for lib in Configuration.required_libs]
bodies = [QueryBody(query=Query(where_clause=c)) for c in clauses]
responses = await asyncio.gather(*[doc_db.query(query_body=b, owner=Configuration.owner, headers=headers)
for b in bodies])
if all([len(r['documents']) == 1 for r in responses]):
print("Found required resources")
return
print("post initial resources")
await synchronize(Path(__file__).parent / "initial_resources", "", config, headers=headers)
print("### resources initialization done ###")
async def synchronize(dir_path: Path, zip_dir_name: str, configuration: any, headers: any):
paths = flatten([[Path(root) / f for f in files] for root, _, files in os.walk(str(dir_path))])
paths = list(paths)
forms = await asyncio.gather(*[format_download_form(path, Path(), dir_path / zip_dir_name, False)
for path in paths])
await post_storage_by_chunk(configuration.storage, list(forms), 1, headers)
paths_index = flatten([[Path(root) / f for f in files if f == "package.json"]
for root, _, files in os.walk(str(dir_path))])
check_dum = md5_from_folder(dir_path)
indexes = [format_doc_db_record(package_path=path, fingerprint=check_dum) for path in paths_index]
namespaces = {d["namespace"] for d in indexes}
await post_indexes(configuration.doc_db, indexes, 25, headers)
return len(forms), len(indexes), namespaces
| 42.516129
| 115
| 0.685129
| 0
| 0
| 0
| 0
| 0
| 0
| 2,324
| 0.881639
| 350
| 0.132777
|
ac79385f0f5c532c4496afb0999dcc78d78a4e70
| 174
|
py
|
Python
|
Python32/hackeandoface3.py
|
andersonsilvade/python_C
|
ffc00184883089f1c2d9b8a6c32503b2c8b8d035
|
[
"MIT"
] | null | null | null |
Python32/hackeandoface3.py
|
andersonsilvade/python_C
|
ffc00184883089f1c2d9b8a6c32503b2c8b8d035
|
[
"MIT"
] | null | null | null |
Python32/hackeandoface3.py
|
andersonsilvade/python_C
|
ffc00184883089f1c2d9b8a6c32503b2c8b8d035
|
[
"MIT"
] | 1
|
2020-11-04T08:36:28.000Z
|
2020-11-04T08:36:28.000Z
|
import urllib.request
import json
url = 'http://graph.facebook.com/fmasanori'
resp = urllib.request.urlopen(url).read()
data = json.loads(resp.decode('utf-8'))
print(data)
| 19.333333
| 43
| 0.735632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 44
| 0.252874
|
ac79cb182a550452c6328b4a381ff03422af574d
| 1,769
|
py
|
Python
|
oasislmf/cli/admin.py
|
fl-ndaq/OasisLMF
|
921718bfad2eb12844960df7f7330284d4e0bedc
|
[
"BSD-3-Clause"
] | 88
|
2018-03-24T11:57:10.000Z
|
2022-03-21T13:04:41.000Z
|
oasislmf/cli/admin.py
|
fl-ndaq/OasisLMF
|
921718bfad2eb12844960df7f7330284d4e0bedc
|
[
"BSD-3-Clause"
] | 558
|
2018-03-14T14:16:30.000Z
|
2022-03-29T12:48:14.000Z
|
oasislmf/cli/admin.py
|
fl-ndaq/OasisLMF
|
921718bfad2eb12844960df7f7330284d4e0bedc
|
[
"BSD-3-Clause"
] | 41
|
2018-04-09T11:13:12.000Z
|
2021-10-05T14:43:11.000Z
|
__all__ = [
'AdminCmd',
'CreateComplexModelCmd',
'CreateSimpleModelCmd',
'EnableBashCompleteCmd',
]
from argparse import RawDescriptionHelpFormatter
from .command import OasisBaseCommand, OasisComputationCommand
class EnableBashCompleteCmd(OasisComputationCommand):
"""
Adds required command to `.bashrc` Linux or .bash_profile for mac
so that Command autocomplete works for oasislmf CLI
"""
formatter_class = RawDescriptionHelpFormatter
computation_name = 'HelperTabComplete'
class CreateSimpleModelCmd(OasisComputationCommand):
"""
Creates a local Git repository for a "simple model" (using the
``cookiecutter`` package) on a "simple model" repository template
on GitHub)
"""
formatter_class = RawDescriptionHelpFormatter
computation_name = 'CreateModelRepo'
class CreateComplexModelCmd(OasisComputationCommand):
"""
Creates a local Git repository for a "complex model" (using the
``cookiecutter`` package) on a "simple model" repository template
on GitHub)
"""
formatter_class = RawDescriptionHelpFormatter
computation_name = 'CreateComplexModelRepo'
class AdminCmd(OasisBaseCommand):
"""
Admin subcommands::
* creates a local Git repository for a "simple model" (using the
``cookiecutter`` package) on a "simple model" repository template
on GitHub)
* creates a local Git repository for a "complex model" (using the
``cookiecutter`` package) on a "complex model" repository template
on GitHub)
"""
sub_commands = {
'create-simple-model': CreateSimpleModelCmd,
'create-complex-model': CreateComplexModelCmd,
'enable-bash-complete': EnableBashCompleteCmd
}
| 30.5
| 76
| 0.70944
| 1,524
| 0.861504
| 0
| 0
| 0
| 0
| 0
| 0
| 1,045
| 0.590729
|
ac7ad18388f99ac970094896775d9050ae74ed49
| 1,802
|
py
|
Python
|
examples/spend_non_std_tx.py
|
kanzure/python-bitcoin-utils
|
a75b470676edb70bd71cb6a57e7e86f78ccc63ce
|
[
"MIT"
] | null | null | null |
examples/spend_non_std_tx.py
|
kanzure/python-bitcoin-utils
|
a75b470676edb70bd71cb6a57e7e86f78ccc63ce
|
[
"MIT"
] | null | null | null |
examples/spend_non_std_tx.py
|
kanzure/python-bitcoin-utils
|
a75b470676edb70bd71cb6a57e7e86f78ccc63ce
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2018 The python-bitcoin-utils developers
#
# This file is part of python-bitcoin-utils
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoin-utils, including this file, may be copied,
# modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
from bitcoinutils.setup import setup
from bitcoinutils.transactions import Transaction, TxInput, TxOutput
from bitcoinutils.keys import P2pkhAddress
from bitcoinutils.script import Script
#
# Note that a non-standard transaction can only be included in a block if a
# miner agrees with it. For this to work one needs to use a node setup up
# for regtest so that you can mine your own blocks; unless you mine your own
# testnet/mainnet blocks.
# Node's config file requires:
# regtest=1
# acceptnonstdtxn=1
#
def main():
# always remember to setup the network
setup('regtest')
# create transaction input from tx id of UTXO (contained 0.4 tBTC)
txin = TxInput('4d9a6baf45d4b57c875fe83d5e0834568eae4b5ef6e61d13720ef6685168e663', 0)
# provide unlocking script
# note that no signing is required to unlock: OP_ADD OP_5 OP_EQUAL
txin.script_sig = Script(['OP_2', 'OP_3'])
# create transaction output using P2PKH scriptPubKey (locking script)
addr = P2pkhAddress('mrCDrCybB6J1vRfbwM5hemdJz73FwDBC8r')
# locking script expects 2 numbers that when added equal 5 (silly example)
txout = TxOutput( 0.8, addr.to_script_pub_key() )
# create transaction from inputs/outputs -- default locktime is used
tx = Transaction([txin], [txout])
# print raw transaction
print("\nRaw transaction:\n" + tx.serialize())
if __name__ == "__main__":
main()
| 34.653846
| 89
| 0.743618
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,287
| 0.714206
|
ac7e3f648dd1532e3a366df0f18eb0ba06867a86
| 801
|
py
|
Python
|
src/nutman_field_names.py
|
hudsonburgess/nutcracker
|
2533d7659873d1ec75beb251f941e8a90bdebb89
|
[
"MIT"
] | null | null | null |
src/nutman_field_names.py
|
hudsonburgess/nutcracker
|
2533d7659873d1ec75beb251f941e8a90bdebb89
|
[
"MIT"
] | null | null | null |
src/nutman_field_names.py
|
hudsonburgess/nutcracker
|
2533d7659873d1ec75beb251f941e8a90bdebb89
|
[
"MIT"
] | null | null | null |
def get_field_name_from_line(line):
return line.split(':')[1].strip()
def remove_description(line):
if '-' in line:
return line.split('-')[0].strip()
else:
return line
def split_multiple_field_names(line):
if ',' in line:
field_names = line.split(',')
return map(lambda x: x.strip().upper(), field_names)
else:
return [line.upper()]
f = open('../data/8-20-17.nm2')
out = open('../data/8-20-17-field-names.txt', 'w')
for line in f:
if line.startswith('FieldName'):
field_name = get_field_name_from_line(line)
field_name = remove_description(field_name)
field_name_list = split_multiple_field_names(field_name)
for name in field_name_list:
out.writelines([name, '\n'])
f.close()
out.close()
| 27.62069
| 64
| 0.627965
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 87
| 0.108614
|
ac7e4bb5b2639e3ce5fc5958996d880251838bdd
| 317
|
py
|
Python
|
setup.py
|
algerbrex/plex
|
0d7096634d13ee4d695b580892894910eba6a4eb
|
[
"MIT"
] | 2
|
2018-02-15T16:26:54.000Z
|
2021-11-08T12:26:12.000Z
|
setup.py
|
algerbrex/plex
|
0d7096634d13ee4d695b580892894910eba6a4eb
|
[
"MIT"
] | null | null | null |
setup.py
|
algerbrex/plex
|
0d7096634d13ee4d695b580892894910eba6a4eb
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
setup(
name='plex',
version='0.1.0',
author='Christian Dean',
author_email='c1dea2n@gmail.com',
packages=['plex'],
license='MIT',
platforms='any',
description='Generic, lighweight regex based lexer.',
long_description=open('README.md').read(),
)
| 21.133333
| 57
| 0.649842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 0.362776
|
ac8080a2e2bb7b553d1d31e52508d8e6de00b522
| 595
|
py
|
Python
|
kwikposts/admin.py
|
Vicynet/kwiktalk
|
198efdd5965cc0cd3ee8dcf5e469d9022330ec25
|
[
"bzip2-1.0.6"
] | null | null | null |
kwikposts/admin.py
|
Vicynet/kwiktalk
|
198efdd5965cc0cd3ee8dcf5e469d9022330ec25
|
[
"bzip2-1.0.6"
] | null | null | null |
kwikposts/admin.py
|
Vicynet/kwiktalk
|
198efdd5965cc0cd3ee8dcf5e469d9022330ec25
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.contrib import admin
from .models import KwikPost, Comment, Like
# Register your models here.
@admin.register(KwikPost)
class KwikPostAdmin(admin.ModelAdmin):
list_display = ['user', 'featured_image', 'slug', 'post_body', 'created_at']
list_filter = ['created_at']
prepopulated_fields = {'slug': ('post_body',)[:20]}
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ['user', 'post', 'user_comment', 'created_at']
@admin.register(Like)
class LikeAdmin(admin.ModelAdmin):
list_display = ['user', 'post', 'values', 'created_at']
| 25.869565
| 80
| 0.710924
| 405
| 0.680672
| 0
| 0
| 478
| 0.803361
| 0
| 0
| 178
| 0.29916
|
ac82ebe59874721741e7d60b6d0389e4f4666104
| 3,113
|
py
|
Python
|
solvebio/resource/solveobject.py
|
PolinaBevad/solvebio-python
|
f6c736baa01b5a868a385cb0baf8f9dc2007cec3
|
[
"MIT"
] | 14
|
2015-01-07T15:31:00.000Z
|
2021-11-02T10:03:28.000Z
|
solvebio/resource/solveobject.py
|
PolinaBevad/solvebio-python
|
f6c736baa01b5a868a385cb0baf8f9dc2007cec3
|
[
"MIT"
] | 200
|
2015-01-26T17:12:21.000Z
|
2022-01-14T08:59:30.000Z
|
solvebio/resource/solveobject.py
|
PolinaBevad/solvebio-python
|
f6c736baa01b5a868a385cb0baf8f9dc2007cec3
|
[
"MIT"
] | 9
|
2015-02-18T22:49:28.000Z
|
2020-09-01T17:48:35.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
import sys
from ..client import client
from .util import json
def convert_to_solve_object(resp, **kwargs):
from . import types
_client = kwargs.pop('client', None)
if isinstance(resp, list):
return [convert_to_solve_object(i, client=_client) for i in resp]
elif isinstance(resp, dict) and not isinstance(resp, SolveObject):
resp = resp.copy()
klass_name = resp.get('class_name')
if isinstance(klass_name, six.string_types):
klass = types.get(klass_name, SolveObject)
else:
klass = SolveObject
return klass.construct_from(resp, client=_client)
else:
return resp
class SolveObject(dict):
"""Base class for all SolveBio API resource objects"""
ID_ATTR = 'id'
# Allows pre-setting a SolveClient
_client = None
def __init__(self, id=None, **params):
super(SolveObject, self).__init__()
self._client = params.pop('client', self._client or client)
# store manually updated values for partial updates
self._unsaved_values = set()
if id:
self[self.ID_ATTR] = id
def __setattr__(self, k, v):
if k[0] == '_' or k in self.__dict__:
return super(SolveObject, self).__setattr__(k, v)
else:
self[k] = v
def __getattr__(self, k):
if k[0] == '_':
raise AttributeError(k)
try:
return self[k]
except KeyError as err:
raise AttributeError(*err.args)
def __setitem__(self, k, v):
super(SolveObject, self).__setitem__(k, v)
self._unsaved_values.add(k)
@classmethod
def construct_from(cls, values, **kwargs):
"""Used to create a new object from an HTTP response"""
instance = cls(values.get(cls.ID_ATTR), **kwargs)
instance.refresh_from(values)
return instance
def refresh_from(self, values):
self.clear()
self._unsaved_values = set()
for k, v in six.iteritems(values):
super(SolveObject, self).__setitem__(
k, convert_to_solve_object(v, client=self._client))
def request(self, method, url, **kwargs):
response = self._client.request(method, url, **kwargs)
return convert_to_solve_object(response, client=self._client)
def __repr__(self):
if isinstance(self.get('class_name'), six.string_types):
ident_parts = [self.get('class_name')]
else:
ident_parts = [type(self).__name__]
if isinstance(self.get(self.ID_ATTR), int):
ident_parts.append(
'%s=%d' % (self.ID_ATTR, self.get(self.ID_ATTR),))
_repr = '<%s at %s> JSON: %s' % (
' '.join(ident_parts), hex(id(self)), str(self))
if sys.version_info[0] < 3:
return _repr.encode('utf-8')
return _repr
def __str__(self):
return json.dumps(self, sort_keys=True, indent=2)
@property
def solvebio_id(self):
return self.id
| 28.3
| 73
| 0.603919
| 2,369
| 0.761002
| 0
| 0
| 302
| 0.097013
| 0
| 0
| 317
| 0.101831
|