hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6848d05c72fa374d993685eb4210477d11796461
| 1,006
|
py
|
Python
|
profiler/undecorate_for_profiling.py
|
co2meal/-bnpy-dev
|
74f69afde6c9dac8de4c074842df53ae87a15ac1
|
[
"BSD-3-Clause"
] | null | null | null |
profiler/undecorate_for_profiling.py
|
co2meal/-bnpy-dev
|
74f69afde6c9dac8de4c074842df53ae87a15ac1
|
[
"BSD-3-Clause"
] | null | null | null |
profiler/undecorate_for_profiling.py
|
co2meal/-bnpy-dev
|
74f69afde6c9dac8de4c074842df53ae87a15ac1
|
[
"BSD-3-Clause"
] | null | null | null |
'''
undecorate_for_profiling.py
Explore all the python functions in the user-specified directory,
and remove decoration @profile from appropriate functions
'''
import os
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('rootdir')
args = parser.parse_args()
main(args.rootdir)
| 29.588235
| 69
| 0.624254
|
'''
undecorate_for_profiling.py
Explore all the python functions in the user-specified directory,
and remove decoration @profile from appropriate functions
'''
import os
def main(bnpyrootdir):
list_of_files = {}
for (dirpath, contentdirs, contentfiles) in os.walk(bnpyrootdir):
for fname in contentfiles:
if fname[-3:] == '.py':
fullpathkey = os.sep.join([dirpath, fname])
list_of_files[fullpathkey] = fname
for origPath in list_of_files.keys():
profPath = origPath + 'CLEAN'
profFileObj = open(profPath, 'w')
with open(origPath, 'r') as f:
for line in f.readlines():
if line.count('@profile') == 0:
profFileObj.write(line)
profFileObj.close()
os.rename(profPath, origPath)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('rootdir')
args = parser.parse_args()
main(args.rootdir)
| 635
| 0
| 23
|
e0006a8f256b6783c06d6c65cdca6a156de2c8d9
| 1,734
|
py
|
Python
|
Proxy/MachineLearn/classification.py
|
Crispae/BBPRED
|
226c6347d986da4b0f573f1b7a978b9418d0eeb4
|
[
"MIT"
] | null | null | null |
Proxy/MachineLearn/classification.py
|
Crispae/BBPRED
|
226c6347d986da4b0f573f1b7a978b9418d0eeb4
|
[
"MIT"
] | null | null | null |
Proxy/MachineLearn/classification.py
|
Crispae/BBPRED
|
226c6347d986da4b0f573f1b7a978b9418d0eeb4
|
[
"MIT"
] | null | null | null |
## classification.py
__all__ = ["Lazy",]
from .lazy import LazyClassifier
from .utils import *
from sklearn.model_selection import train_test_split
import pandas
| 23.12
| 140
| 0.672434
|
## classification.py
__all__ = ["Lazy",]
from .lazy import LazyClassifier
from .utils import *
from sklearn.model_selection import train_test_split
import pandas
class Lazy:
def _lazy_split(self,descriptors_data,test_size,random_state):
if Data_frame_validator(descriptors_data):
data = descriptors_data.drop("Target",axis=1)
Target = descriptors_data["Target"]
X_train, X_test, y_train, y_test = train_test_split(data, Target,test_size=test_size,random_state =random_state,stratify=Target)
return [X_train,X_test,y_train,y_test]
def _lazy_classifier(self,tests,verbose,ignore_warnings):
clf = LazyClassifier(verbose=verbose,ignore_warnings=ignore_warnings, custom_metric=None)
models,predictions = clf.fit(*tests)
return (models,predictions)
def lazy_classify(self,descriptors,test_size=0.3,random_state=42,verbose=False,ignore_warnings=True):
return self._lazy_classifier(self._lazy_split(descriptors,test_size,random_state),verbose=verbose,ignore_warnings=ignore_warnings)
class custom:
pass
class NaiveByes(custom):
# def Naive_fit(self):
# def pick_best(X_train, X_test, y_train, y_test,):
# best = (None, 0)
# for var_smoothing in range(-7, 1):
# clf = GaussianNB(var_smoothing=pow(10, var_smoothing))
# clf.fit(X_train, y_train)
# y_pred = clf.predict(X_test)
# accuracy = (y_pred == y_test).sum()
# if accuracy > best[1]:
# best = (clf, accuracy)
# print('best accuracy', best[1] / len(y_test))
# return best[0]
# model = pick_best(*cl_data1,)
pass
class Svm(custom):
pass
| 824
| 523
| 181
|
9d438aadf58244488ff98e5078d8104573590578
| 3,099
|
py
|
Python
|
pkgs/sdk-pkg/src/genie/libs/sdk/libs/abstracted_libs/iosxr/subsection.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 94
|
2018-04-30T20:29:15.000Z
|
2022-03-29T13:40:31.000Z
|
pkgs/sdk-pkg/src/genie/libs/sdk/libs/abstracted_libs/iosxr/subsection.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 67
|
2018-12-06T21:08:09.000Z
|
2022-03-29T18:00:46.000Z
|
pkgs/sdk-pkg/src/genie/libs/sdk/libs/abstracted_libs/iosxr/subsection.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 49
|
2018-06-29T18:59:03.000Z
|
2022-03-10T02:07:59.000Z
|
# Python
import logging
from os import path
# Abstract
from genie.abstract import Lookup
# Parser
from genie.libs import parser
from genie.metaparser.util.exceptions import SchemaEmptyParserError
# unicon
from unicon.eal.dialogs import Statement, Dialog
log = logging.getLogger(__name__)
def save_device_information(device, **kwargs):
"""Install the commit packages. This is for IOSXR devices.
Args:
Mandatory:
device (`obj`) : Device object.
Returns:
True: Result is PASSED
False: Result is PASSX
Raises:
None
Example:
>>> save_device_information(device=Device())
"""
# Checking the config-register has 0x2
# if not configure 0x2
# RP/0/RSP1/CPU0:PE1#admin config-register 0x2
if device.is_ha:
conn = device.active
else:
conn = device
# Install commit ( when thre are package to bring up features)
# from admin prompt
conn.admin_execute('install commit')
def get_default_dir(device):
""" Get the default directory of this device
Args:
Mandatory:
device (`obj`) : Device object.
Returns:
default_dir (`str`): Default directory of the system
Raises:
Exception
Example:
>>> get_default_dir(device=device)
"""
try:
lookup = Lookup.from_device(device)
parsed_dict = lookup.parser.show_platform.Dir(device=device).parse()
if ":" in parsed_dict['dir']['dir_name']:
default_dir = parsed_dict['dir']['dir_name']
else:
default_dir = ''
except SchemaEmptyParserError as e:
raise Exception("No output when executing 'dir' command") from e
except Exception as e:
raise Exception("Unable to execute 'dir' command") from e
# Return default_dir to caller
log.info("Default directory on '{d}' is '{dir}'".format(d=device.name,
dir=default_dir))
return default_dir
def configure_replace(device, file_location, timeout=60, file_name=None):
"""Configure replace on device
Args:
device (`obj`): Device object
file_location (`str`): File location
timeout (`int`): Timeout value in seconds
file_name (`str`): File name
Returns:
None
Raises:
pyATS Results
"""
if file_name:
file_location = '{}{}'.format(
file_location,
file_name)
try:
# check if file exist
device.execute.error_pattern.append('.*Path does not exist.*')
device.execute("dir {}".format(file_location))
except Exception:
raise Exception("File {} does not exist".format(file_location))
dialog = Dialog([
Statement(pattern=r'\[no\]',
action='sendline(y)',
loop_continue=True,
continue_timer=False)])
device.configure("load {}\ncommit replace".format(file_location),
timeout=timeout, reply=dialog)
| 26.042017
| 77
| 0.601162
|
# Python
import logging
from os import path
# Abstract
from genie.abstract import Lookup
# Parser
from genie.libs import parser
from genie.metaparser.util.exceptions import SchemaEmptyParserError
# unicon
from unicon.eal.dialogs import Statement, Dialog
log = logging.getLogger(__name__)
def save_device_information(device, **kwargs):
"""Install the commit packages. This is for IOSXR devices.
Args:
Mandatory:
device (`obj`) : Device object.
Returns:
True: Result is PASSED
False: Result is PASSX
Raises:
None
Example:
>>> save_device_information(device=Device())
"""
# Checking the config-register has 0x2
# if not configure 0x2
# RP/0/RSP1/CPU0:PE1#admin config-register 0x2
if device.is_ha:
conn = device.active
else:
conn = device
# Install commit ( when thre are package to bring up features)
# from admin prompt
conn.admin_execute('install commit')
def get_default_dir(device):
""" Get the default directory of this device
Args:
Mandatory:
device (`obj`) : Device object.
Returns:
default_dir (`str`): Default directory of the system
Raises:
Exception
Example:
>>> get_default_dir(device=device)
"""
try:
lookup = Lookup.from_device(device)
parsed_dict = lookup.parser.show_platform.Dir(device=device).parse()
if ":" in parsed_dict['dir']['dir_name']:
default_dir = parsed_dict['dir']['dir_name']
else:
default_dir = ''
except SchemaEmptyParserError as e:
raise Exception("No output when executing 'dir' command") from e
except Exception as e:
raise Exception("Unable to execute 'dir' command") from e
# Return default_dir to caller
log.info("Default directory on '{d}' is '{dir}'".format(d=device.name,
dir=default_dir))
return default_dir
def configure_replace(device, file_location, timeout=60, file_name=None):
"""Configure replace on device
Args:
device (`obj`): Device object
file_location (`str`): File location
timeout (`int`): Timeout value in seconds
file_name (`str`): File name
Returns:
None
Raises:
pyATS Results
"""
if file_name:
file_location = '{}{}'.format(
file_location,
file_name)
try:
# check if file exist
device.execute.error_pattern.append('.*Path does not exist.*')
device.execute("dir {}".format(file_location))
except Exception:
raise Exception("File {} does not exist".format(file_location))
dialog = Dialog([
Statement(pattern=r'\[no\]',
action='sendline(y)',
loop_continue=True,
continue_timer=False)])
device.configure("load {}\ncommit replace".format(file_location),
timeout=timeout, reply=dialog)
| 0
| 0
| 0
|
fb43dcb45f5d7511c1b7ad5465521087e7f16242
| 3,879
|
py
|
Python
|
open_fmri/apps/dataset/migrations/0001_initial.py
|
rwblair/open_fmri
|
5e3052878b6d514553a074a6d9d44fe740daa034
|
[
"BSD-3-Clause"
] | 5
|
2016-01-18T20:54:18.000Z
|
2021-02-10T10:43:59.000Z
|
open_fmri/apps/dataset/migrations/0001_initial.py
|
rwblair/open_fmri
|
5e3052878b6d514553a074a6d9d44fe740daa034
|
[
"BSD-3-Clause"
] | 25
|
2015-12-02T17:37:45.000Z
|
2018-02-05T22:07:51.000Z
|
open_fmri/apps/dataset/migrations/0001_initial.py
|
rwblair/open_fmri
|
5e3052878b6d514553a074a6d9d44fe740daa034
|
[
"BSD-3-Clause"
] | 6
|
2015-11-19T23:26:47.000Z
|
2021-02-10T10:44:01.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
| 41.265957
| 224
| 0.565094
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Dataset',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('workflow_stage', models.CharField(default='SUBMITTED', choices=[('SUBMITTED', 'Submitted'), ('IN_PROCESS', 'In Process'), ('STAGED', 'Staged'), ('SHARED', 'Shared'), ('REVIEW', 'Review')], max_length=200)),
('project_name', models.CharField(max_length=255)),
('summary', models.TextField()),
('sample_size', models.IntegerField()),
('scanner_type', models.TextField()),
('accession_number', models.CharField(max_length=200)),
('acknowledgements', models.TextField()),
('license_title', models.CharField(max_length=255)),
('license_url', models.TextField(validators=[django.core.validators.URLValidator()])),
('aws_link_title', models.CharField(max_length=255)),
('aws_link_url', models.TextField(validators=[django.core.validators.URLValidator()])),
],
),
migrations.CreateModel(
name='Investigator',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('investigator', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='PublicationDocument',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('document', models.FileField(upload_to='')),
],
),
migrations.CreateModel(
name='PublicationFullText',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('full_text', models.TextField()),
],
),
migrations.CreateModel(
name='PublicationPubMedLink',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('title', models.CharField(max_length=255)),
('url', models.TextField(validators=[django.core.validators.URLValidator()])),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('cogat_id', models.TextField()),
('name', models.TextField()),
],
),
migrations.AddField(
model_name='dataset',
name='investigator',
field=models.ManyToManyField(to='dataset.Investigator'),
),
migrations.AddField(
model_name='dataset',
name='publication_document',
field=models.ManyToManyField(to='dataset.PublicationDocument'),
),
migrations.AddField(
model_name='dataset',
name='publication_full_text',
field=models.ManyToManyField(to='dataset.PublicationFullText'),
),
migrations.AddField(
model_name='dataset',
name='publication_pubmed_link',
field=models.ManyToManyField(to='dataset.PublicationPubMedLink'),
),
migrations.AddField(
model_name='dataset',
name='task',
field=models.ManyToManyField(to='dataset.Task'),
),
]
| 0
| 3,719
| 23
|
9fc19f32a9cf06da8897f17f045d1bb765cb8c81
| 2,849
|
py
|
Python
|
packages/core/minos-microservice-saga/tests/test_saga/test_executions/test_repositories/test_abc.py
|
minos-framework/minos-python
|
9a6ad6783361f3d8a497a088808b55ea7a938c6c
|
[
"MIT"
] | 247
|
2022-01-24T14:55:30.000Z
|
2022-03-25T12:06:17.000Z
|
packages/core/minos-microservice-saga/tests/test_saga/test_executions/test_repositories/test_abc.py
|
minos-framework/minos-python
|
9a6ad6783361f3d8a497a088808b55ea7a938c6c
|
[
"MIT"
] | 168
|
2022-01-24T14:54:31.000Z
|
2022-03-31T09:31:09.000Z
|
packages/core/minos-microservice-saga/tests/test_saga/test_executions/test_repositories/test_abc.py
|
minos-framework/minos-python
|
9a6ad6783361f3d8a497a088808b55ea7a938c6c
|
[
"MIT"
] | 21
|
2022-02-06T17:25:58.000Z
|
2022-03-27T04:50:29.000Z
|
import unittest
from unittest.mock import (
AsyncMock,
call,
)
from uuid import (
UUID,
)
from minos.saga import (
SagaExecution,
SagaExecutionRepository,
)
from tests.utils import (
ADD_ORDER,
SagaTestCase,
)
if __name__ == "__main__":
unittest.main()
| 29.371134
| 69
| 0.692524
|
import unittest
from unittest.mock import (
AsyncMock,
call,
)
from uuid import (
UUID,
)
from minos.saga import (
SagaExecution,
SagaExecutionRepository,
)
from tests.utils import (
ADD_ORDER,
SagaTestCase,
)
class _SagaExecutionRepository(SagaExecutionRepository):
async def _store(self, execution: SagaExecution) -> None:
"""For testing purposes."""
async def _load(self, uuid: UUID) -> SagaExecution:
"""For testing purposes."""
async def _delete(self, key: UUID) -> None:
"""For testing purposes."""
class TestSagaExecutionRepository(SagaTestCase):
async def test_store(self):
mock = AsyncMock()
repository = _SagaExecutionRepository()
repository._store = mock
execution = SagaExecution.from_definition(ADD_ORDER)
await repository.store(execution)
self.assertEqual([call(execution)], mock.call_args_list)
async def test_load(self):
execution = SagaExecution.from_definition(ADD_ORDER)
repository = _SagaExecutionRepository()
mock = AsyncMock(return_value=execution)
repository._load = mock
observed = await repository.load(execution.uuid)
self.assertEqual(execution, observed)
self.assertEqual([call(execution.uuid)], mock.call_args_list)
async def test_load_from_str(self):
execution = SagaExecution.from_definition(ADD_ORDER)
repository = _SagaExecutionRepository()
mock = AsyncMock(return_value=execution)
repository._load = mock
observed = await repository.load(str(execution.uuid))
self.assertEqual(execution, observed)
self.assertEqual([call(execution.uuid)], mock.call_args_list)
async def test_delete(self):
execution = SagaExecution.from_definition(ADD_ORDER)
repository = _SagaExecutionRepository()
mock = AsyncMock(return_value=execution)
repository._delete = mock
await repository.delete(execution)
self.assertEqual([call(execution.uuid)], mock.call_args_list)
async def test_delete_from_uuid(self):
execution = SagaExecution.from_definition(ADD_ORDER)
repository = _SagaExecutionRepository()
mock = AsyncMock(return_value=execution)
repository._delete = mock
await repository.delete(execution.uuid)
self.assertEqual([call(execution.uuid)], mock.call_args_list)
async def test_delete_from_str(self):
execution = SagaExecution.from_definition(ADD_ORDER)
repository = _SagaExecutionRepository()
mock = AsyncMock(return_value=execution)
repository._delete = mock
await repository.delete(str(execution.uuid))
self.assertEqual([call(execution.uuid)], mock.call_args_list)
if __name__ == "__main__":
unittest.main()
| 2,014
| 338
| 207
|
fc3a1fcfc078a5900b3d3012da99b9ea49b5adb1
| 768
|
py
|
Python
|
data_assimilation/forwardModel.py
|
MagneticEarth/book.magneticearth.org
|
c8c1e3403b682a508a61053ce330b0e891992ef3
|
[
"CC-BY-4.0"
] | null | null | null |
data_assimilation/forwardModel.py
|
MagneticEarth/book.magneticearth.org
|
c8c1e3403b682a508a61053ce330b0e891992ef3
|
[
"CC-BY-4.0"
] | null | null | null |
data_assimilation/forwardModel.py
|
MagneticEarth/book.magneticearth.org
|
c8c1e3403b682a508a61053ce330b0e891992ef3
|
[
"CC-BY-4.0"
] | null | null | null |
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
| 26.482759
| 83
| 0.605469
|
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
def Lorenz63(t, y, sig, rho, beta):
# Lorenz '63 model
out = np.zeros_like(y)
out[0] = sig * ( y[1] - y[0] )
out[1] = y[0] * ( rho - y[2] ) - y[1]
out[2] = y[0] * y[1] - beta * y[2]
return out
def forwardModel_r( xt0, time, rayleigh, prandtl, b):
# perform integration of Lorentz63 model
# default integrator for solve_ivp is RK4
rho = rayleigh
beta = b
sig = prandtl
myParams = np.array( [sig, rho, beta], dtype=float )
tstart = time[0]
tend = time[-1]
y0 = np.array( xt0, dtype=float )
sol = solve_ivp( Lorenz63, [tstart,tend], y0, args=myParams, dense_output=True)
xt = sol.sol(time)
return xt
| 632
| 0
| 47
|
25fcb95f8a4a06a86c76be9be03f3139ee47ed77
| 8,133
|
py
|
Python
|
samples/example_by.py
|
jokva/windrose
|
99a2f636a6558a29e7ded63d0d233f25dc7986b6
|
[
"CECILL-B",
"BSD-3-Clause"
] | null | null | null |
samples/example_by.py
|
jokva/windrose
|
99a2f636a6558a29e7ded63d0d233f25dc7986b6
|
[
"CECILL-B",
"BSD-3-Clause"
] | null | null | null |
samples/example_by.py
|
jokva/windrose
|
99a2f636a6558a29e7ded63d0d233f25dc7986b6
|
[
"CECILL-B",
"BSD-3-Clause"
] | 1
|
2020-10-04T18:48:35.000Z
|
2020-10-04T18:48:35.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
"""
sample using "by" keyword
"""
import click
# import matplotlib
# matplotlib.use("Agg")
# import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import pandas as pd
from windrose import (WindroseAxes, FIGSIZE_DEFAULT, DPI_DEFAULT)
class Layout(object):
"""
Inspired from PdfPages
https://github.com/matplotlib/matplotlib/blob/master/lib/matplotlib/backends/backend_pdf.py - PdfPages
http://matplotlib.org/api/backend_pdf_api.html
http://matplotlib.org/examples/pylab_examples/multipage_pdf.html
Inspired also from FFMpegWriter
http://matplotlib.org/examples/animation/moviewriter.html
https://github.com/matplotlib/matplotlib/blob/master/lib/matplotlib/animation.py
MovieWriter
"""
@property
S_FIGSIZE_DEFAULT = ",".join(map(str, FIGSIZE_DEFAULT))
@click.command()
@click.option("--filename", default="samples/sample_wind_poitiers.csv", help="Input filename")
@click.option("--filename_out", default="windrose_animation.mp4", help="Output filename")
@click.option("--dpi", default=DPI_DEFAULT, help="Dot per inch for plot generation")
@click.option("--figsize", default=S_FIGSIZE_DEFAULT, help="Figure size x,y - default=%s" % S_FIGSIZE_DEFAULT)
@click.option("--fps", default=7, help="Number of frame per seconds for video generation")
@click.option("--bins_min", default=0.01, help="Bins minimum value")
@click.option("--bins_max", default=20, help="Bins maximum value")
@click.option("--bins_step", default=2, help="Bins step value")
if __name__ == "__main__":
main()
| 31.523256
| 110
| 0.624985
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
"""
sample using "by" keyword
"""
import click
# import matplotlib
# matplotlib.use("Agg")
# import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import pandas as pd
from windrose import (WindroseAxes, FIGSIZE_DEFAULT, DPI_DEFAULT)
class AxCollection(object):
def __init__(self, fig=None, *args, **kwargs):
if fig is None:
self.fig = plt.figure(figsize=FIGSIZE_DEFAULT, dpi=DPI_DEFAULT, facecolor='w', edgecolor='w')
else:
self.fig = fig
def animate(self):
pass
def show(self):
pass
class Layout(object):
"""
Inspired from PdfPages
https://github.com/matplotlib/matplotlib/blob/master/lib/matplotlib/backends/backend_pdf.py - PdfPages
http://matplotlib.org/api/backend_pdf_api.html
http://matplotlib.org/examples/pylab_examples/multipage_pdf.html
Inspired also from FFMpegWriter
http://matplotlib.org/examples/animation/moviewriter.html
https://github.com/matplotlib/matplotlib/blob/master/lib/matplotlib/animation.py
MovieWriter
"""
def __init__(self, ncols=4, nrows=6, nsheets=1):
self.ncols = ncols
self.nrows = nrows
self.nsheets = nsheets
self._resize()
self._i = 0
@property
def fig(self):
return self._array_fig
def _resize(self):
# self._array_ax = np.empty((self.nsheets, self.nrows, self.ncols), dtype=object)
self._array_ax = np.empty(self.nsheets, dtype=object)
# self._array_ax.fill(None)
self._array_fig = np.empty(self.nsheets, dtype=object)
# self._array_fig.fill(None)
for i in range(self.nsheets):
fig, axs = plt.subplots(nrows=self.nrows, ncols=self.ncols)
# print(fig, axs)
self._array_fig[i] = fig
self._array_ax[i] = axs
def __repr__(self):
s = """<Layout
cols: %s
rows: %s
sheets: %s
>""" % (self.ncols, self.nrows, self.nsheets)
return s
def __enter__(self, *args, **kwargs):
print("enter %s %s" % (args, kwargs))
return self
def __exit__(self, type, value, traceback):
# print("exit %s %s" % (args, kwargs))
print("exit %s %s %s" % (type, value, traceback))
# print("exit")
self.close()
def close(self):
print("close")
def saveax(self):
print("saveax")
self._i += 1
class NormalLayout(Layout):
def __init__(self):
super(NormalLayout, self).__init__()
S_FIGSIZE_DEFAULT = ",".join(map(str, FIGSIZE_DEFAULT))
def by_func_yearly(dt):
return dt.year
def by_func_monthly(dt):
return dt.year, dt.month
def by_func_daily(dt):
return dt.year, dt.month, dt.day
@click.command()
@click.option("--filename", default="samples/sample_wind_poitiers.csv", help="Input filename")
@click.option("--filename_out", default="windrose_animation.mp4", help="Output filename")
@click.option("--dpi", default=DPI_DEFAULT, help="Dot per inch for plot generation")
@click.option("--figsize", default=S_FIGSIZE_DEFAULT, help="Figure size x,y - default=%s" % S_FIGSIZE_DEFAULT)
@click.option("--fps", default=7, help="Number of frame per seconds for video generation")
@click.option("--bins_min", default=0.01, help="Bins minimum value")
@click.option("--bins_max", default=20, help="Bins maximum value")
@click.option("--bins_step", default=2, help="Bins step value")
def main(filename, dpi, figsize, fps, bins_min, bins_max, bins_step, filename_out):
# convert figsize (string like "8,9" to a list of float [8.0, 9.0]
figsize = figsize.split(",")
figsize = map(float, figsize)
# Read CSV file to a Pandas DataFrame
df_all = pd.read_csv(filename)
df_all['Timestamp'] = pd.to_datetime(df_all['Timestamp'])
df_all = df_all.set_index('Timestamp')
df_all.index = df_all.index.tz_localize('UTC').tz_convert('UTC')
# df_all = df_all.iloc[-10000:,:]
df_all = df_all.ix['2011-07-01':'2011-12-31']
# Get Numpy arrays from DataFrame
direction_all = df_all['direction'].values
var_all = df_all['speed'].values
index_all = df_all.index.to_datetime() # Fixed: .values -> to_datetime()
by_all = df_all.index.map(by_func_monthly)
by_unique = np.unique(by_all)
print(by_unique)
(ncols, nrows, nsheets) = (4, 3, 2) # noqa
# layout = Layout(4, 3, 2) # ncols, nrows, nsheets
# layout = Layout(ncols, nrows, nsheets)
# layout = Layout(4, 6, 1)
# layout.save(ax)
# layout.to_pdf("filename.pdf")
# layout.to_video("filename.mp4")
# fig, ax = plt.subplots(nrows=2, ncols=3)
# with Layout(4, 6, 1) as layout:
# print(layout)
# #layout.save(ax)
def tuple_position(i, ncols, nrows):
i_sheet, sheet_pos = divmod(i, ncols * nrows)
i_row, i_col = divmod(sheet_pos, ncols)
return i_sheet, i_row, i_col
def position_from_tuple(t, ncols, nrows):
i_sheet, i_row, i_col = t
return i_sheet * ncols * nrows + i_row * ncols + i_col
assert tuple_position(0, ncols, nrows) == (0, 0, 0)
assert tuple_position(1, ncols, nrows) == (0, 0, 1)
assert tuple_position(2, ncols, nrows) == (0, 0, 2)
assert tuple_position(3, ncols, nrows) == (0, 0, 3)
assert tuple_position(4, ncols, nrows) == (0, 1, 0)
assert tuple_position(5, ncols, nrows) == (0, 1, 1)
assert tuple_position(6, ncols, nrows) == (0, 1, 2)
assert tuple_position(7, ncols, nrows) == (0, 1, 3)
assert tuple_position(8, ncols, nrows) == (0, 2, 0)
assert tuple_position(9, ncols, nrows) == (0, 2, 1)
assert tuple_position(10, ncols, nrows) == (0, 2, 2)
assert tuple_position(11, ncols, nrows) == (0, 2, 3)
assert tuple_position(12, ncols, nrows) == (1, 0, 0)
assert tuple_position(13, ncols, nrows) == (1, 0, 1)
assert tuple_position(14, ncols, nrows) == (1, 0, 2)
assert tuple_position(15, ncols, nrows) == (1, 0, 3)
assert tuple_position(16, ncols, nrows) == (1, 1, 0)
assert tuple_position(17, ncols, nrows) == (1, 1, 1)
assert position_from_tuple((0, 0, 0), ncols, nrows) == 0
assert position_from_tuple((1, 0, 0), ncols, nrows) == ncols * nrows
assert position_from_tuple((2, 0, 0), ncols, nrows) == 2 * ncols * nrows
assert position_from_tuple((1, 0, 1), ncols, nrows) == ncols * nrows + 1
assert position_from_tuple((1, 1, 1), ncols, nrows) == ncols * nrows + ncols + 1
assert position_from_tuple((1, 2, 3), ncols, nrows) == ncols * nrows + 2 * ncols + 3
for i in range(20):
t = tuple_position(i, ncols, nrows)
assert position_from_tuple(t, ncols, nrows) == i
# layout = NormalLayout()
# with layout.append() as ax:
# pass
# layout.show()
# Define bins
bins = np.arange(bins_min, bins_max, bins_step)
for by_value in by_unique:
# by_value = (2011, 5)
# mask = (by == by_value).all(axis=1)
# ToFix: see http://stackoverflow.com/questions/32005403/boolean-indexing-with-numpy-array-and-tuples
mask = (pd.Series(by_all) == by_value).values
# print(mask)
index = index_all[mask]
var = var_all[mask]
direction = direction_all[mask]
# Create figure
# fig = plt.figure(figsize=figsize, dpi=dpi, facecolor='w', edgecolor='w')
# Same as above, but with contours over each filled region...
ax = WindroseAxes.from_ax()
ax.contourf(direction, var, bins=bins, cmap=cm.hot)
ax.contour(direction, var, bins=bins, colors='black')
fontname = "Courier"
# title = by_value
dt1 = index[0]
dt2 = index[-1]
# dt1 = df.index[mask][0]
# dt2 = df.index[mask][-1]
# td = dt2 - dt1
title = "From %s\n to %s" % (dt1, dt2)
ax.set_title(title, fontname=fontname)
ax.set_legend()
plt.show()
# time.sleep(10)
# print("Save file to '%s'" % filename_out)
if __name__ == "__main__":
main()
| 5,964
| 12
| 458
|
8aa086dfd06626e4b4e36485c4d38dd75160c536
| 8,316
|
py
|
Python
|
gt/reid.py
|
solapark/frcnn_keras_original
|
3561d1de18f41868efc9cec927761613d75a5dc3
|
[
"Apache-2.0"
] | null | null | null |
gt/reid.py
|
solapark/frcnn_keras_original
|
3561d1de18f41868efc9cec927761613d75a5dc3
|
[
"Apache-2.0"
] | null | null | null |
gt/reid.py
|
solapark/frcnn_keras_original
|
3561d1de18f41868efc9cec927761613d75a5dc3
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import utility
from epipolar import EPIPOLAR
import cv2
if __name__ == '__main__':
from option import args
import pickle
with open('/home/sap/frcnn_keras/mv_train_two_reid.pickle', 'rb') as f:
reid_pickle = pickle.load(f)
pred_box, pred_box_emb, pred_box_prob, reid_box_gt = reid_pickle
reid = REID(args)
reid_box_pred, is_valid = reid.get_reid_box(pred_box, pred_box_emb, pred_box_prob)
print('reid_box_pred.shape', reid_box_pred.shape, 'is_valid', is_valid.shape)
pred_box_batch, pred_box_emb_batch, pred_box_prob_batch = list(map(lambda a : np.expand_dims(a, 0), [pred_box, pred_box_emb, pred_box_prob]))
reid_box_pred_batch, is_valid_batch = reid.get_batch(pred_box_batch, pred_box_emb_batch, pred_box_prob_batch)
print('reid_box_pred_batch.shape', reid_box_pred_batch.shape, 'is_valid_batch', is_valid_batch.shape)
print(np.array_equal(reid_box_pred_batch[0], reid_box_pred), np.array_equal(is_valid_batch[0], is_valid))
'''
is_valid = np.ones((self.num_nms, self.num_valid_cam))
with open('/home/sap/frcnn_keras/pred_box_is_valid.pickle', 'wb') as f:
pickle.dump(is_valid, f)
for i in range(10) :
print('gt', reid_box_gt[i])
print('pred', reid_box_pred[i])
print('valid', is_valid[i])
if(np.array_equal(reid_box_gt, reid_box_pred)) :
print('good')
else :
print('bad')
'''
| 46.983051
| 162
| 0.641775
|
import numpy as np
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import utility
from epipolar import EPIPOLAR
import cv2
class REID:
def __init__(self, args):
self.num_valid_cam = args.num_valid_cam
self.num_nms = args.num_nms
self.batch_size = args.batch_size
self.rpn_stride = args.rpn_stride
self.reid_min_emb_dist = args.reid_min_emb_dist
self.cam_idx = np.repeat(np.arange(self.num_valid_cam), self.num_nms).reshape(self.num_valid_cam, self.num_nms, 1)
self.num_nms_arange = np.arange(self.num_nms)
self.num_nms_arange_repeat = np.repeat(self.num_nms_arange, self.num_valid_cam).reshape(self.num_nms, self.num_valid_cam, 1).transpose(1, 0, 2)
self.box_idx_stack = np.concatenate([self.cam_idx, self.num_nms_arange_repeat], 2).reshape(self.num_valid_cam*self.num_nms, 2) #(num_valid_cam*num_nms, 2)
self.epipolar = EPIPOLAR(args)
self.args = args
def get_min_emb_dist_idx(self, emb, embs, thresh = np.zeros(0), is_want_dist = 0, epi_dist = np.zeros(0)):
'''
Args :
emb (shape : m, n)
embs (shape : m, k, n)
thresh_dist : lower thersh. throw away too small dist (shape : m, )
Return :
min_dist_idx (shape : m, 1)
'''
emb_ref = emb[:, np.newaxis, :]
dist = utility.calc_emb_dist(emb_ref, embs) #(m, k)
if epi_dist.any() :
dist[np.where(epi_dist > self.args.epi_dist_thresh)] = np.inf
if(thresh.size) :
thresh = thresh[:, np.newaxis] #(m, 1)
dist[dist<=thresh] = np.inf
min_dist_idx = np.argmin(dist, 1) #(m, )
if(is_want_dist):
min_dist = dist[np.arange(len(dist)), min_dist_idx]
return min_dist_idx, min_dist
return min_dist_idx
def get_ref(self, pred_box_prob, pred_box, pred_box_emb) :
pred_box_prob_stack = np.reshape(pred_box_prob, (self.num_valid_cam*self.num_nms, ))
top_N_box_idx = self.box_idx_stack[pred_box_prob_stack.argsort()[-self.num_nms:]]
top_N_box_idx = tuple(top_N_box_idx.T)
ref_cam_idx = top_N_box_idx[0]
ref_box = pred_box[top_N_box_idx]
ref_emb = pred_box_emb[top_N_box_idx]
return ref_cam_idx, ref_box, ref_emb
def get_ref_cam_idx_batch(self, pred_box_prob_batch, pred_box_batch, pred_box_emb_batch) :
ref_cam_idx_batch = []
for pred_box_prob, pred_box, pred_box_emb in zip(pred_box_prob_batch, pred_box_batch, pred_box_emb_batch) :
ref_cam_idx, _, _ = self.get_ref(pred_box_prob, pred_box, pred_box_emb)
ref_cam_idx_batch.append(ref_cam_idx)
return np.array(ref_cam_idx_batch)
def get_batch(self, *args):
reid_box_pred_batch, is_valid_batch, dist_batch = [], [], []
for args_one_batch in zip(*args) :
reid_box_pred, is_valid, dist = self.get_reid_box(*args_one_batch)
reid_box_pred_batch.append(reid_box_pred)
is_valid_batch.append(is_valid)
dist_batch.append(dist)
return np.array(reid_box_pred_batch), np.array(is_valid_batch), np.array(dist_batch)
def get_reid_box(self, pred_box, pred_box_emb, pred_box_prob, extrins, debug_img_np):
"""get ref idx, postive idx, negative idx for reid training
Args :
pred_box : x1, y1, x2, y2 #(num_valid_cam, 300, 4)
pred_box_emb #(num_valid_cam, 300, featsize)
pred_box_prob #(num_valid_cam, 300)
extrins #(num_cam, 3, 3)
Return :
reid_box #(300, num_valid_cam, 4)
is_valid #(300, num_valid_cam)
dist #(300, num_valid_cam)
distance from ref box
"""
reid_box = np.zeros((self.num_nms, self.num_valid_cam, 4))
is_valid = np.ones((self.num_nms, self.num_valid_cam))
dist = np.zeros((self.num_nms, self.num_valid_cam))
ref_cam_idx, ref_box, ref_emb = self.get_ref(pred_box_prob, pred_box, pred_box_emb)
reid_box[self.num_nms_arange, ref_cam_idx] = ref_box
self.epipolar.reset(extrins, debug_img_np)
for offset in range(1, self.num_valid_cam):
target_cam_idx = (ref_cam_idx + offset) % self.num_valid_cam
cand_emb = pred_box_emb[target_cam_idx]
cand_box = pred_box[target_cam_idx]
epi_dist = np.ones((self.num_nms, self.num_nms))
for i in range(self.num_nms):
epi_dist[i] = self.epipolar.get_epipolar_dist(ref_cam_idx[i], target_cam_idx[i], ref_box[i].reshape(-1, 4), cand_box[i])
min_dist_idx, min_dist = self.get_min_emb_dist_idx(ref_emb, cand_emb, is_want_dist=True, epi_dist = epi_dist)
reid_box[self.num_nms_arange, target_cam_idx] = pred_box[target_cam_idx, min_dist_idx]
dist[self.num_nms_arange, target_cam_idx] = min_dist
invalid_idx = np.where(min_dist > self.reid_min_emb_dist)
invalid_nms_idx = self.num_nms_arange[invalid_idx]
invalid_target_cam_idx = target_cam_idx[invalid_idx]
is_valid[invalid_nms_idx, invalid_target_cam_idx] = 0
return reid_box, is_valid, dist
def draw_reid_batch(self, box_batch, is_valid_batch, ref_cam_idx_batch, imgs_batch, dist_batch, waitKey=0):
box_batch = box_batch.astype(int)*self.rpn_stride
for batch_idx in range(self.batch_size):
imgs_in_one_batch = imgs_batch[batch_idx]
boxes_in_one_batch = box_batch[batch_idx]
is_valids_in_one_batch = is_valid_batch[batch_idx]
ref_cam_idx_in_one_batch = ref_cam_idx_batch[batch_idx]
dist_in_one_batch = dist_batch[batch_idx]
img_list = list(imgs_in_one_batch)
for box_idx in range(self.num_nms) :
boxes = boxes_in_one_batch[box_idx]
is_valids = is_valids_in_one_batch[box_idx]
ref_cam_idx = ref_cam_idx_in_one_batch[box_idx]
dists = dist_in_one_batch[box_idx]
result_img_list = []
for cam_idx in range(self.num_valid_cam):
box = boxes[cam_idx]
is_valid = is_valids[cam_idx]
dist = dists[cam_idx]
if cam_idx == ref_cam_idx :
color = (0, 0, 255)
elif is_valid :
color = (0, 0, 100)
else :
color = (0, 0, 0)
reuslt_img = utility.draw_box(img_list[cam_idx], box, name = None, color = color, is_show = False, text = 'dist : %.2f'%dist)
result_img_list.append(reuslt_img)
concat_img = utility.get_concat_img(result_img_list)
cv2.imshow('reid', concat_img)
cv2.waitKey(waitKey)
if __name__ == '__main__':
from option import args
import pickle
with open('/home/sap/frcnn_keras/mv_train_two_reid.pickle', 'rb') as f:
reid_pickle = pickle.load(f)
pred_box, pred_box_emb, pred_box_prob, reid_box_gt = reid_pickle
reid = REID(args)
reid_box_pred, is_valid = reid.get_reid_box(pred_box, pred_box_emb, pred_box_prob)
print('reid_box_pred.shape', reid_box_pred.shape, 'is_valid', is_valid.shape)
pred_box_batch, pred_box_emb_batch, pred_box_prob_batch = list(map(lambda a : np.expand_dims(a, 0), [pred_box, pred_box_emb, pred_box_prob]))
reid_box_pred_batch, is_valid_batch = reid.get_batch(pred_box_batch, pred_box_emb_batch, pred_box_prob_batch)
print('reid_box_pred_batch.shape', reid_box_pred_batch.shape, 'is_valid_batch', is_valid_batch.shape)
print(np.array_equal(reid_box_pred_batch[0], reid_box_pred), np.array_equal(is_valid_batch[0], is_valid))
'''
is_valid = np.ones((self.num_nms, self.num_valid_cam))
with open('/home/sap/frcnn_keras/pred_box_is_valid.pickle', 'wb') as f:
pickle.dump(is_valid, f)
for i in range(10) :
print('gt', reid_box_gt[i])
print('pred', reid_box_pred[i])
print('valid', is_valid[i])
if(np.array_equal(reid_box_gt, reid_box_pred)) :
print('good')
else :
print('bad')
'''
| 3,683
| 3,084
| 23
|
869daa2374c26f25d0517f0a880b5bc63581fcf1
| 3,413
|
py
|
Python
|
dagflow/sdk.py
|
GodQ/autoflow
|
74954dafb9cdb16c29b9f3a7d081a3f3a12e808a
|
[
"Apache-2.0"
] | 1
|
2019-06-20T15:31:13.000Z
|
2019-06-20T15:31:13.000Z
|
dagflow/sdk.py
|
GodQ/dagflow
|
74954dafb9cdb16c29b9f3a7d081a3f3a12e808a
|
[
"Apache-2.0"
] | null | null | null |
dagflow/sdk.py
|
GodQ/dagflow
|
74954dafb9cdb16c29b9f3a7d081a3f3a12e808a
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'godq'
import os
import sys
from dagflow.flow_operation import send_start_flow_msg as sdk_send_start_flow_msg, \
send_finish_step_msg as sdk_send_finish_step_msg
from dagflow.loader import get_DagRepo_Object
dag_repo = get_DagRepo_Object()
| 31.601852
| 98
| 0.745971
|
__author__ = 'godq'
import os
import sys
from dagflow.flow_operation import send_start_flow_msg as sdk_send_start_flow_msg, \
send_finish_step_msg as sdk_send_finish_step_msg
from dagflow.loader import get_DagRepo_Object
dag_repo = get_DagRepo_Object()
def send_finish_step_msg(dag_name, dag_run_id, step_name, status=None, message=None, result=None):
return sdk_send_finish_step_msg(dag_name, dag_run_id, step_name, status, message, result)
def send_start_flow_msg(dag_name, dag_run_id):
return sdk_send_start_flow_msg(dag_name, dag_run_id)
def register_dag(dag_def):
assert isinstance(dag_def, dict)
dag_name = dag_def.get("name")
dag_repo.add_dag(dag_name=dag_name, content=dag_def)
# print("Dag {} created successfully".format(dag_name))
def update_dag(dag_def):
assert isinstance(dag_def, dict)
dag_name = dag_def.get("name")
dag_repo.update_dag(dag_name=dag_name, content=dag_def)
# print("Dag {} updated successfully".format(dag_name))
def start_event_center():
# if in user folder, load user's plugins
cwd = os.path.abspath(os.getcwd())
if os.path.isdir("plugins"):
os.environ["USER_PLUGINS_PATH"] = os.path.join(cwd, "plugins")
sys.path.append(cwd)
from dagflow.event_center.event_center import start_event_center
start_event_center()
def start_worker(worker_count=None):
from dagflow.utils.command import run_cmd
from dagflow.config import Config
# if in user folder, load user's plugins
cwd = os.path.abspath(os.getcwd())
if os.path.isdir("plugins"):
os.environ["USER_PLUGINS_PATH"] = os.path.join(cwd, "plugins")
if not worker_count:
worker_count = Config.celery_configs.get("worker_count", 1)
cmd = "celery worker -A dagflow.executors.celery_executor -c {}".format(worker_count)
print(cmd)
run_cmd(cmd, daemon=True)
# print("Dagflow worker has started successfully")
def get_dag(dag_name):
assert dag_name
return dag_repo.find_dag(dag_name)
def run_dag(dag_name, dag_run_id=None):
import time
from dagflow.flow_operation import send_start_flow_msg
if not dag_run_id:
dag_run_id = str(time.time())
send_start_flow_msg(dag_name, dag_run_id)
# print("Dag {} started successfully with dag_run_id {}".format(dag_name, dag_run_id))
return dag_run_id
def stop_dag_run(dag_name, dag_run_id):
assert dag_name and dag_run_id
dag_repo.stop_dag_run(dag_name, dag_run_id)
# print("Dag {} stopped successfully with dag_run_id {}".format(dag_name, dag_run_id))
def get_dag_run(dag_name, dag_run_id):
assert dag_name and dag_run_id
return dag_repo.find_dag_run(dag_name, dag_run_id)
def list_dags(detail=False):
from dagflow.loader import get_DagRepo_Object
repo = get_DagRepo_Object()
detail = str(detail).strip().lower()
detail = True if detail == "true" else False
dag_list = repo.list_dags(detail=detail)
return dag_list
def list_dag_runs(dag_name):
from dagflow.loader import get_DagRepo_Object
repo = get_DagRepo_Object()
dag_run_list = repo.list_dag_runs(dag_name=dag_name)
return dag_run_list
def list_dag_run_events(dag_name, dag_run_id):
from dagflow.loader import get_DagRepo_Object
repo = get_DagRepo_Object()
dag_run_events_list = repo.find_dag_run_events(dag_name=dag_name, dag_run_id=dag_run_id)
return dag_run_events_list
| 2,844
| 0
| 299
|
39e69d64294449e607d3625ba49e9a9d1fcc26c9
| 3,965
|
py
|
Python
|
writer.py
|
MrBoogie27/PlagiarismPrograms
|
05564131c6849747a9a7b8c56961d488cb5a2755
|
[
"MIT"
] | null | null | null |
writer.py
|
MrBoogie27/PlagiarismPrograms
|
05564131c6849747a9a7b8c56961d488cb5a2755
|
[
"MIT"
] | null | null | null |
writer.py
|
MrBoogie27/PlagiarismPrograms
|
05564131c6849747a9a7b8c56961d488cb5a2755
|
[
"MIT"
] | null | null | null |
from common import compare_array
from prepare_hash import run_binary_hasher
import psycopg2
import tempfile
from psycopg2 import sql
TABLE_NAME='runs'
TABLE_TEXT_MATCHES= 'text_matches'
COLUMNS=('id', 'content', 'ASTHash')
COLUMNS_MATCH=('first_runs_id', 'second_runs_id', 'match_AST_v1')
PROBLEM_ID=3
COUNT_LIMIT = 100
| 38.495146
| 87
| 0.538462
|
from common import compare_array
from prepare_hash import run_binary_hasher
import psycopg2
import tempfile
from psycopg2 import sql
TABLE_NAME='runs'
TABLE_TEXT_MATCHES= 'text_matches'
COLUMNS=('id', 'content', 'ASTHash')
COLUMNS_MATCH=('first_runs_id', 'second_runs_id', 'match_AST_v1')
PROBLEM_ID=3
COUNT_LIMIT = 100
def get_connect(args):
return psycopg2.connect(dbname=args.database, user=args.db_user,
password=args.password, host=args.host)
def insert_hash(row, update_data):
with tempfile.NamedTemporaryFile(mode="w+t", suffix='.cpp') as fp:
fp.writelines(row[1])
fp.seek(0)
try:
hashes = run_binary_hasher("./hasher_AST_tool", fp.name)
update_data[row[0]] = hashes
except Exception as e:
print('error for {}'.format(row[0]))
def all_update(cursor, update_data):
sql_command = """UPDATE {}
SET {} = %s
WHERE {} = %s"""
stmt = sql.SQL(sql_command).format(
sql.Identifier(TABLE_NAME),
sql.Identifier(COLUMNS[2]),
sql.Identifier(COLUMNS[0])
)
# print(update_data)
for key, hashes in update_data.items():
cursor.execute(stmt, (hashes, key))
print("updated {}".format(key))
def writer_hasher(args):
update_data = {}
with get_connect(args) as conn:
with conn.cursor() as cursor:
stmt = sql.SQL('SELECT {} FROM {} where problems_id = %s LIMIT %s').format(
sql.SQL(',').join(map(sql.Identifier, COLUMNS)),
sql.Identifier(TABLE_NAME)
)
cursor.execute(stmt, (PROBLEM_ID, COUNT_LIMIT))
for row in cursor:
if row[2] is None:
insert_hash(row, update_data)
all_update(cursor, update_data)
def update_compared(cursor, all_compares):
sql_command = """UPDATE {}
SET {} = %s
WHERE {} = %s and {} = %s"""
stmt = sql.SQL(sql_command).format(
sql.Identifier(TABLE_TEXT_MATCHES),
sql.Identifier(COLUMNS_MATCH[2]),
sql.Identifier(COLUMNS_MATCH[0]),
sql.Identifier(COLUMNS_MATCH[1])
)
for fst_key, snd_key, comparison in all_compares:
cursor.execute(stmt, (comparison, fst_key, snd_key))
print("updated comparison for {} and {}".format(fst_key, snd_key))
def writer_similarity(args):
RUN_TABLE = 'runs'
with get_connect(args) as conn:
with conn.cursor() as cursor:
stmt = sql.SQL("""SELECT first_runs_id,
second_runs_id,
fst_run."ASTHash" as fst_hash,
snd_run."ASTHash" as snd_hash
FROM
{}
JOIN {} as fst_run
ON {}.first_runs_id = fst_run.id
JOIN {} as snd_run
ON {}.second_runs_id = snd_run.id
WHERE {}.problems_id = %s
and "match_AST_v1" IS NULL
and fst_run."ASTHash" IS NOT NULL
and snd_run."ASTHash" IS NOT NULL
LIMIT %s""").format(
sql.Identifier(TABLE_TEXT_MATCHES),
sql.Identifier(RUN_TABLE),
sql.Identifier(TABLE_TEXT_MATCHES),
sql.Identifier(RUN_TABLE),
sql.Identifier(TABLE_TEXT_MATCHES),
sql.Identifier(TABLE_TEXT_MATCHES)
)
cursor.execute(stmt, (PROBLEM_ID, COUNT_LIMIT))
all_compares = []
for row in cursor:
compared = compare_array(row[2], row[3])
all_compares.append((row[0], row[1], compared))
update_compared(cursor, all_compares)
| 3,505
| 0
| 138
|
5c92b9919c5b5d586563d3d52b13ac345250227f
| 262
|
py
|
Python
|
api/static.py
|
hartliddell/api
|
73d44d2271c01fe7540fedeee9174c4032cbbbc0
|
[
"MIT"
] | null | null | null |
api/static.py
|
hartliddell/api
|
73d44d2271c01fe7540fedeee9174c4032cbbbc0
|
[
"MIT"
] | null | null | null |
api/static.py
|
hartliddell/api
|
73d44d2271c01fe7540fedeee9174c4032cbbbc0
|
[
"MIT"
] | null | null | null |
"""Define a custom static storage class."""
from django.contrib.staticfiles.storage import ManifestStaticFilesStorage
class RyrManifestStaticFilesStorage(ManifestStaticFilesStorage):
"""Define a custom static storage class."""
manifest_strict = False
| 29.111111
| 73
| 0.79771
|
"""Define a custom static storage class."""
from django.contrib.staticfiles.storage import ManifestStaticFilesStorage
class RyrManifestStaticFilesStorage(ManifestStaticFilesStorage):
"""Define a custom static storage class."""
manifest_strict = False
| 0
| 0
| 0
|
f3e92c79550d52d83e93cc3237d16de166bd98cb
| 4,394
|
py
|
Python
|
todos/tests.py
|
shiniao/todoz
|
4cb2cf492f6cfac5e037da6e7b3b674ef548e62a
|
[
"MIT"
] | 1
|
2020-01-13T03:32:11.000Z
|
2020-01-13T03:32:11.000Z
|
todos/tests.py
|
shiniao/todoz
|
4cb2cf492f6cfac5e037da6e7b3b674ef548e62a
|
[
"MIT"
] | 6
|
2021-05-10T19:58:23.000Z
|
2022-02-26T20:29:39.000Z
|
todos/tests.py
|
shiniao/todoz
|
4cb2cf492f6cfac5e037da6e7b3b674ef548e62a
|
[
"MIT"
] | null | null | null |
# Create your tests here.
import json
from django.contrib.auth.models import User
from django.test import TestCase, Client
from .models import Todo
from django.utils import timezone
import datetime
class TestTodosModel(TestCase):
"""测试数据库model"""
class TestTodosViews(TestCase):
"""测试视图函数"""
# TODO test
def test_todo_put(self):
"""测试更新土豆"""
data = {
'title': '抽烟',
}
uuid = self.user1.todos.all()[0].uuid
rsp = self.client.put('/api/v1/todos/{}/'.format(uuid),
data=data,
content_type='application/json')
self.assertEqual(rsp.status_code, 200)
todo = self.user1.todos.all()[0]
self.assertEqual(todo.title, data['title'])
def test_todo_delete(self):
"""测试删除土豆"""
uuid = self.user1.todos.all()[0].uuid
rsp = self.client.delete('/api/v1/todos/{}/'.format(uuid))
print(rsp.content)
self.assertEqual(rsp.status_code, 200)
with self.assertRaises(Todo.DoesNotExist) as e:
self.user1.todos.get(uuid=uuid)
def test_todo_field_error(self):
"""测试字段不正确情况下报错"""
data = {
'titl': '抽烟',
}
uuid = self.user1.todos.all()[0].uuid
rsp = self.client.put('/api/v1/todos/{}/'.format(uuid),
data=data,
content_type='application/json')
self.assertEqual(rsp.status_code, 400)
class TestAuth(TestCase):
"""测试jwt认证"""
| 32.072993
| 85
| 0.574192
|
# Create your tests here.
import json
from django.contrib.auth.models import User
from django.test import TestCase, Client
from .models import Todo
from django.utils import timezone
import datetime
class TestTodosModel(TestCase):
"""测试数据库model"""
def test_is_past(self):
# 未来土豆
future_date = timezone.now() + datetime.timedelta(days=30)
future_todo = Todo(created=future_date)
self.assertIs(future_todo.is_past(), False)
# 过去土豆
past_date = timezone.now() + datetime.timedelta(days=-1)
past_todo = Todo(expired=past_date)
self.assertIs(past_todo.is_past(), True)
# 现在土豆
now_todo = Todo(created=timezone.now())
self.assertIs(now_todo.is_past(), False)
class TestTodosViews(TestCase):
"""测试视图函数"""
def setUp(self) -> None:
self.user1 = User.objects.create_user(username='user1', password='123123')
self.user2 = User.objects.create_user(username='user2', password='123123')
for i in range(5):
Todo.objects.create(
title='test{}'.format(i),
owner=self.user1)
data = {
"username": "user1",
"password": "123123"
}
# 获取token
rsp = self.client.post('/auth/login/', data, content_type='application/json')
token = rsp.json()['message']
self.client = Client(
HTTP_AUTHORIZATION='Bearer {}'.format(token),
HTTP_CONTENT_TYPE='application/json')
# TODO test
def test_todo_remove_past(self):
pass
def test_todos_get(self):
rsp = self.client.get('/api/v1/todos/', {'per_page': 3, 'page': 1})
self.assertEqual(rsp.status_code, 200, rsp.json())
self.assertEqual(rsp.json()['message']['count'], 3)
self.assertEqual(len(rsp.json()['message']['todos']), 3)
def test_todos_post(self):
todo = {
'title': '烫头'
}
rsp = self.client.post('/api/v1/todos/',
data=json.dumps(todo),
content_type='application/json')
print(rsp.json())
self.assertEqual(rsp.status_code, 200)
dtodo = self.user1.todos.get(title=todo['title'])
self.assertEqual(dtodo.title, todo['title'])
def test_todo_get(self):
uuid = self.user1.todos.all()[0].uuid
rsp = self.client.get('/api/v1/todos/{}/'.format(uuid))
self.assertEqual(rsp.status_code, 200, rsp.json())
self.assertEqual(rsp.json()['message']['title'], 'test0')
def test_todo_put(self):
"""测试更新土豆"""
data = {
'title': '抽烟',
}
uuid = self.user1.todos.all()[0].uuid
rsp = self.client.put('/api/v1/todos/{}/'.format(uuid),
data=data,
content_type='application/json')
self.assertEqual(rsp.status_code, 200)
todo = self.user1.todos.all()[0]
self.assertEqual(todo.title, data['title'])
def test_todo_delete(self):
"""测试删除土豆"""
uuid = self.user1.todos.all()[0].uuid
rsp = self.client.delete('/api/v1/todos/{}/'.format(uuid))
print(rsp.content)
self.assertEqual(rsp.status_code, 200)
with self.assertRaises(Todo.DoesNotExist) as e:
self.user1.todos.get(uuid=uuid)
def test_todo_field_error(self):
"""测试字段不正确情况下报错"""
data = {
'titl': '抽烟',
}
uuid = self.user1.todos.all()[0].uuid
rsp = self.client.put('/api/v1/todos/{}/'.format(uuid),
data=data,
content_type='application/json')
self.assertEqual(rsp.status_code, 400)
class TestAuth(TestCase):
"""测试jwt认证"""
def setUp(self) -> None:
self.user = User.objects.create_user(username='auth_user', password='123456')
def test_token(self):
data = {
"username": "auth_user",
"password": "123456"
}
rsp = self.client.post('/auth/login/', data, content_type='application/json')
self.assertEqual(rsp.status_code, 200, rsp.json())
def test_auth_requirement(self):
pass
class TestUtils(TestCase):
def test_http_methods_required(self):
rsp = self.client.get('/auth/login/')
self.assertEqual(rsp.status_code, 405)
| 2,581
| 5
| 292
|
955eed560456dca1edea91950d78789cf913e924
| 4,962
|
py
|
Python
|
quadtree.py
|
eug/quadtree
|
7a3154f06b8f52fd8338a7b73b5a8329c399281f
|
[
"MIT"
] | 1
|
2021-09-02T07:57:29.000Z
|
2021-09-02T07:57:29.000Z
|
quadtree.py
|
eug/quadtree
|
7a3154f06b8f52fd8338a7b73b5a8329c399281f
|
[
"MIT"
] | null | null | null |
quadtree.py
|
eug/quadtree
|
7a3154f06b8f52fd8338a7b73b5a8329c399281f
|
[
"MIT"
] | 1
|
2020-11-05T05:57:37.000Z
|
2020-11-05T05:57:37.000Z
|
import bisect
from scipy.spatial.distance import euclidean
from common import (NO_QUADRANT, NORTH_EAST, NORTH_WEST, SOUTH_EAST,
SOUTH_WEST, Boundary, Point, belongs, compute_knn,
intersects, quadrants)
from node import TreeNode
# Constants for tuple access optimization
BOUNDARY = 0
POINTS = 1
| 32.012903
| 78
| 0.601169
|
import bisect
from scipy.spatial.distance import euclidean
from common import (NO_QUADRANT, NORTH_EAST, NORTH_WEST, SOUTH_EAST,
SOUTH_WEST, Boundary, Point, belongs, compute_knn,
intersects, quadrants)
from node import TreeNode
# Constants for tuple access optimization
BOUNDARY = 0
POINTS = 1
class StaticQuadTree:
def __init__(self, dimension=1, max_depth=4):
self.max_depth = max_depth
self._quadrants = [0] * int(((4 ** (max_depth + 1))-1)/3)
self._quadrants[0] = (Boundary(Point(0, 0), dimension), set())
self._decompose(self._quadrants[0][BOUNDARY], 0, 0)
def _decompose(self, boundary, depth, parent):
if depth == self.max_depth:
return
x, y = boundary.center
dm = boundary.dimension / 2
index0 = 4 * parent + NORTH_WEST
index1 = 4 * parent + NORTH_EAST
index2 = 4 * parent + SOUTH_EAST
index3 = 4 * parent + SOUTH_WEST
self._quadrants[index0] = (Boundary(Point(x - dm, y + dm), dm), set())
self._quadrants[index1] = (Boundary(Point(x + dm, y + dm), dm), set())
self._quadrants[index2] = (Boundary(Point(x + dm, y - dm), dm), set())
self._quadrants[index3] = (Boundary(Point(x - dm, y - dm), dm), set())
self._decompose(self._quadrants[index0][BOUNDARY], depth + 1, index0)
self._decompose(self._quadrants[index1][BOUNDARY], depth + 1, index1)
self._decompose(self._quadrants[index2][BOUNDARY], depth + 1, index2)
self._decompose(self._quadrants[index3][BOUNDARY], depth + 1, index3)
def index(self, point):
idx = 0
q = quadrants(self._quadrants[idx][BOUNDARY], point)
if q == NO_QUADRANT: return
for _ in range(0, self.max_depth):
idx = 4 * idx + q
q = quadrants(self._quadrants[idx][BOUNDARY], point)
return idx
def __len__(self):
return sum(len(q[1]) for q in self._quadrants)
def __iter__(self):
return (point for quad in self._quadrants for point in quad[POINTS])
def __contains__(self, point):
return point in self._quadrants[self.index(point)][POINTS]
def insert(self, point):
self._quadrants[self.index(point)][POINTS].add(point)
def remove(self, point):
if not isinstance(point):
return False
try:
self._quadrants[self.index(point)][POINTS].remove(point)
return True
except:
return False
def update(self, new_point, old_point):
if not isinstance(new_point, Point) or \
not isinstance(old_point, Point):
return False
try:
self._quadrants[self.index(old_point)][POINTS].remove(old_point)
self._quadrants[self.index(new_point)][POINTS].add(new_point)
return True
except:
return False
def query_range(self, boundary):
if not isinstance(boundary, Boundary):
return ([])
for quadrant in self._quadrants:
if intersects(quadrant[BOUNDARY], boundary):
for point in quadrant[POINTS]:
if belongs(boundary, point):
yield point
def knn(self, point, k, factor=.1):
if not isinstance(point, Point) or k <= 0 or factor <= 0:
return []
if len(self) < k:
points = self.query_range(self._quadrants[BOUNDARY])
return compute_knn(points, point, k)
points_count = 0
dimension = factor
while points_count <= k:
dimension += factor
points_count = self._count_points(Boundary(point, dimension))
points = self.query_range(Boundary(point, dimension))
return compute_knn(points, point, k)
def _count_points(self, boundary):
count = 0
for quadrant in self._quadrants:
if intersects(quadrant[BOUNDARY], boundary):
for point in quadrant[POINTS]:
if belongs(boundary, point):
count += 1
return count
class DynamicQuadTree:
def __init__(self, dimension=1, max_points=1, max_depth=4):
self.max_points = max_points
self.max_depth = max_depth
self.root = TreeNode(Point(0, 0), dimension, max_points, max_depth, 0)
def __len__(self):
return len(self.root)
def __iter__(self):
return iter(self.root)
def __contains__(self, point):
return self.root.exist(point)
def insert(self, point):
return self.root.insert(point)
def remove(self, point):
return self.root.remove(point)
def update(self, new_point, old_point):
return self.root.update(new_point, old_point)
def query_range(self, boundary):
return self.root.query_range(boundary)
def knn(self, point, k):
return self.root.knn(point, k)
| 4,010
| 1
| 613
|
7289ee6470e2a8f5fb3a4ac360a43fd613597c4a
| 523
|
py
|
Python
|
competition/alibaba.py
|
Max-PJB/python-learning2
|
e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd
|
[
"MIT"
] | null | null | null |
competition/alibaba.py
|
Max-PJB/python-learning2
|
e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd
|
[
"MIT"
] | null | null | null |
competition/alibaba.py
|
Max-PJB/python-learning2
|
e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd
|
[
"MIT"
] | null | null | null |
# N, a, b, c, d = list(map(int, input().split()))
#
#
# def jc(x):
# r = 1
# for i in range(1, x + 1):
# r *= i
# return r
#
# res = int(jc(N * N) / (jc(a) * jc(b) * jc(c) * jc(d))) %
# print(res)
from collections import defaultdict
n = int(input())
edges = defaultdict(list)
for _ in range(n - 1):
u, v = list(map(int, input().split()))
edges[u].append(v)
print(subtree(1))
| 17.433333
| 68
| 0.529637
|
# N, a, b, c, d = list(map(int, input().split()))
#
#
# def jc(x):
# r = 1
# for i in range(1, x + 1):
# r *= i
# return r
#
# res = int(jc(N * N) / (jc(a) * jc(b) * jc(c) * jc(d))) %
# print(res)
from collections import defaultdict
n = int(input())
edges = defaultdict(list)
for _ in range(n - 1):
u, v = list(map(int, input().split()))
edges[u].append(v)
def subtree(k):
if edges[k]:
return max(list(map(subtree, edges[k]))) + len(edges[k]) - 1
else:
return 0
print(subtree(1))
| 107
| 0
| 23
|
5a8a1562596c85fca9c274efc7e80bd787910272
| 4,948
|
py
|
Python
|
neo4j/__main__.py
|
ank-forked/neo4j-python-driver
|
f7857791051e0a7499aea9da5f92256cef7eb014
|
[
"Apache-2.0"
] | null | null | null |
neo4j/__main__.py
|
ank-forked/neo4j-python-driver
|
f7857791051e0a7499aea9da5f92256cef7eb014
|
[
"Apache-2.0"
] | null | null | null |
neo4j/__main__.py
|
ank-forked/neo4j-python-driver
|
f7857791051e0a7499aea9da5f92256cef7eb014
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2015 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from argparse import ArgumentParser
from json import loads as json_loads
import logging
from sys import stdout, stderr
from neo4j.session import GraphDatabase, CypherError
class ColourFormatter(logging.Formatter):
""" Colour formatter for pretty log output.
"""
class Watcher(object):
""" Log watcher for debug output.
"""
handlers = {}
if __name__ == "__main__":
main()
| 37.203008
| 92
| 0.599232
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2015 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from argparse import ArgumentParser
from json import loads as json_loads
import logging
from sys import stdout, stderr
from neo4j.session import GraphDatabase, CypherError
class ColourFormatter(logging.Formatter):
""" Colour formatter for pretty log output.
"""
def format(self, record):
s = super(ColourFormatter, self).format(record)
if record.levelno == logging.CRITICAL:
return "\x1b[31;1m%s\x1b[0m" % s # bright red
elif record.levelno == logging.ERROR:
return "\x1b[33;1m%s\x1b[0m" % s # bright yellow
elif record.levelno == logging.WARNING:
return "\x1b[33m%s\x1b[0m" % s # yellow
elif record.levelno == logging.INFO:
return "\x1b[36m%s\x1b[0m" % s # cyan
elif record.levelno == logging.DEBUG:
return "\x1b[34m%s\x1b[0m" % s # blue
else:
return s
class Watcher(object):
""" Log watcher for debug output.
"""
handlers = {}
def __init__(self, logger_name):
super(Watcher, self).__init__()
self.logger_name = logger_name
self.logger = logging.getLogger(self.logger_name)
self.formatter = ColourFormatter("%(asctime)s %(message)s")
def watch(self, level=logging.INFO, out=stdout):
try:
self.logger.removeHandler(self.handlers[self.logger_name])
except KeyError:
pass
handler = logging.StreamHandler(out)
handler.setFormatter(self.formatter)
self.handlers[self.logger_name] = handler
self.logger.addHandler(handler)
self.logger.setLevel(level)
def main():
parser = ArgumentParser(description="Execute one or more Cypher statements using Bolt.")
parser.add_argument("statement", nargs="+")
parser.add_argument("-u", "--url", default="bolt://localhost", metavar="CONNECTION_URL")
parser.add_argument("-p", "--parameter", action="append", metavar="NAME=VALUE")
parser.add_argument("-q", "--quiet", action="store_true")
parser.add_argument("-s", "--secure", action="store_true")
parser.add_argument("-v", "--verbose", action="count")
parser.add_argument("-x", "--times", type=int, default=1)
parser.add_argument("-z", "--summarize", action="store_true")
args = parser.parse_args()
if args.verbose:
level = logging.INFO if args.verbose == 1 else logging.DEBUG
Watcher("neo4j").watch(level, stderr)
parameters = {}
for parameter in args.parameter or []:
name, _, value = parameter.partition("=")
if value == "" and name in parameters:
del parameters[name]
else:
try:
parameters[name] = json_loads(value)
except ValueError:
parameters[name] = value
driver = GraphDatabase.driver(args.url, secure=args.secure)
session = driver.session()
for _ in range(args.times):
for statement in args.statement:
try:
result = session.run(statement, parameters)
except CypherError as error:
stderr.write("%s: %s\r\n" % (error.code, error.message))
else:
if not args.quiet:
has_results = False
for i, record in enumerate(result):
has_results = True
if i == 0:
stdout.write("%s\r\n" % "\t".join(record.__keys__))
stdout.write("%s\r\n" % "\t".join(map(repr, record)))
if has_results:
stdout.write("\r\n")
if args.summarize:
summary = result.summarize()
stdout.write("Statement : %r\r\n" % summary.statement)
stdout.write("Parameters : %r\r\n" % summary.parameters)
stdout.write("Statement Type : %r\r\n" % summary.statement_type)
stdout.write("Statistics : %r\r\n" % summary.statistics)
stdout.write("\r\n")
session.close()
if __name__ == "__main__":
main()
| 3,659
| 0
| 104
|
b1351f952c91be994aa5c5780c6c740a6d7dd951
| 1,506
|
py
|
Python
|
naga/shared/trainer.py
|
bright1993ff66/emoji2vec
|
08281486c3b3c6da9c5ecc13e140baefc6e48326
|
[
"MIT"
] | 173
|
2016-10-03T18:28:13.000Z
|
2019-09-26T10:36:54.000Z
|
naga/shared/trainer.py
|
bright1993ff66/emoji2vec
|
08281486c3b3c6da9c5ecc13e140baefc6e48326
|
[
"MIT"
] | 2
|
2016-10-25T18:28:48.000Z
|
2019-08-04T21:50:12.000Z
|
naga/shared/trainer.py
|
bright1993ff66/emoji2vec
|
08281486c3b3c6da9c5ecc13e140baefc6e48326
|
[
"MIT"
] | 39
|
2016-10-04T13:35:29.000Z
|
2019-09-11T18:06:54.000Z
|
import tensorflow as tf
class Trainer(object):
"""
Object representing a TensorFlow trainer.
"""
| 32.73913
| 108
| 0.579681
|
import tensorflow as tf
class Trainer(object):
"""
Object representing a TensorFlow trainer.
"""
def __init__(self, optimizer, max_epochs, hooks=[]):
self.loss = None
self.optimizer = optimizer
self.max_epochs = max_epochs
self.hooks = hooks
def __call__(self, batcher, placeholders, loss, model=None, session=None):
self.loss = loss
minimization_op = self.optimizer.minimize(loss)
close_session_after_training = False
if session is None:
session = tf.Session()
close_session_after_training = True # no session existed before, we provide a temporary session
init = tf.initialize_all_variables()
session.run(init)
epoch = 1
iteration = 1
while epoch < self.max_epochs:
for values in batcher:
feed_dict = {}
for i in range(0, len(placeholders)):
feed_dict[placeholders[i]] = values[i]
_, current_loss = session.run([minimization_op, loss], feed_dict=feed_dict)
current_loss = sum(current_loss)
for hook in self.hooks:
hook(session, epoch, iteration, model, current_loss)
iteration += 1
# calling post-epoch hooks
for hook in self.hooks:
hook(session, epoch, 0, model, 0)
epoch += 1
if close_session_after_training:
session.close()
| 1,341
| 0
| 54
|
916dd0a0fef78307533ccaae6243fb5d10a059e7
| 4,107
|
py
|
Python
|
test_dp_setting_initial_conditions_bem.py
|
whitews/dpconverge
|
0e00a7f1c0be8bd291cb68598ca2b2eaa1a47448
|
[
"BSD-3-Clause"
] | null | null | null |
test_dp_setting_initial_conditions_bem.py
|
whitews/dpconverge
|
0e00a7f1c0be8bd291cb68598ca2b2eaa1a47448
|
[
"BSD-3-Clause"
] | null | null | null |
test_dp_setting_initial_conditions_bem.py
|
whitews/dpconverge
|
0e00a7f1c0be8bd291cb68598ca2b2eaa1a47448
|
[
"BSD-3-Clause"
] | null | null | null |
from dpconverge.data_set import DataSet
import numpy as np
import pandas as pd
from matplotlib import pyplot
from sklearn.datasets.samples_generator import make_blobs
n_features = 2
points_per_feature = 100
centers = [[2, 1.35], [2, 2], [2, 3], [2.5, 1.5], [2.5, 2], [2.5, 2.5]]
blob1, y1 = make_blobs(
n_samples=1000,
n_features=1,
centers=centers[0],
cluster_std=[0.1, 0.15],
random_state=1
)
blob2, y2 = make_blobs(
n_samples=6000,
n_features=1,
centers=centers[1],
cluster_std=[0.2, 0.3],
random_state=2
)
blob3, y3 = make_blobs(
n_samples=3000,
n_features=1,
centers=centers[2],
cluster_std=[0.2, 0.1],
random_state=2
)
blob4, y4 = make_blobs(
n_samples=250,
n_features=1,
centers=centers[3],
cluster_std=[0.1, 0.1],
random_state=2
)
blob5, y5 = make_blobs(
n_samples=250,
n_features=1,
centers=centers[4],
cluster_std=[0.1, 0.1],
random_state=3
)
ds = DataSet(parameter_count=2)
ds.add_blob(1, blob1)
ds.add_blob(2, blob2)
ds.add_blob(3, blob3)
ds.add_blob(4, blob4)
ds.add_blob(5, blob5)
# ds.plot_blobs(ds.classifications, x_lim=[0, 4], y_lim=[0, 4])
component_count = 128
iteration_count = 5000
# use multiple runs of BEM to estimate the number of components
# and get initial conditions
max_log_like = None # the highest value for all runs
converged = False
results = [] # will be a list of dicts to convert to a DataFrame
while not converged:
print component_count
new_comp_counts = []
for seed in range(1, 17):
ds.results = None # reset results
ds.cluster(
component_count=component_count,
burn_in=0,
iteration_count=iteration_count,
random_seed=seed,
model='bem'
)
log_like = ds.get_log_likelihood_trace()[0]
print log_like
if log_like > max_log_like:
max_log_like = log_like
# if the new log_like is close to the max (within 1%),
# see if there are any empty components (pi < 0.0001)
if abs(max_log_like - log_like) < abs(max_log_like * 0.01):
tmp_comp_count = np.sum(ds.raw_results.pis > 0.0001)
new_comp_counts.append(tmp_comp_count)
# save good run to our results
results.append(
{
'comp': component_count,
'true_comp': tmp_comp_count,
'seed': seed,
'log_like': log_like,
'pis': ds.raw_results.pis,
'mus': ds.raw_results.mus,
'sigmas': ds.raw_results.sigmas
}
)
# ds.plot_classifications(0)
if len(new_comp_counts) > 0:
if int(np.mean(new_comp_counts)) < component_count:
component_count = int(np.mean(new_comp_counts))
else:
converged = True
else:
converged = True
results_df = pd.DataFrame(
results,
columns=['comp', 'true_comp', 'seed', 'log_like']
)
min_comp_count = results_df.comp.min()
best_index = results_df[results_df.comp == min_comp_count].log_like.argmax()
best_run = results[best_index]
ds.results = None
ds.cluster(
component_count=best_run['comp'],
burn_in=0,
iteration_count=iteration_count,
random_seed=best_run['seed'],
model='bem'
)
log_like = ds.get_log_likelihood_trace()[0]
print log_like
ds.plot_classifications(0)
# Re-run a chain using the initial conditions from the last iteration
last_iter = ds.raw_results.get_iteration(0)
initial_conditions = {
'pis': last_iter.pis.flatten(),
'mus': last_iter.mus,
'sigmas': last_iter.sigmas
}
# reset DataSet results
ds.results = None
ds.cluster(
component_count=best_run['comp'],
burn_in=0,
iteration_count=iteration_count,
random_seed=1,
initial_conditions=initial_conditions
)
ds.plot_log_likelihood_trace()
pyplot.show()
valid_components = ds.get_valid_components()
for i in range(best_run['comp']):
ds.plot_iteration_traces(i)
ds.plot_animated_trace()
pass
| 22.944134
| 76
| 0.637448
|
from dpconverge.data_set import DataSet
import numpy as np
import pandas as pd
from matplotlib import pyplot
from sklearn.datasets.samples_generator import make_blobs
n_features = 2
points_per_feature = 100
centers = [[2, 1.35], [2, 2], [2, 3], [2.5, 1.5], [2.5, 2], [2.5, 2.5]]
blob1, y1 = make_blobs(
n_samples=1000,
n_features=1,
centers=centers[0],
cluster_std=[0.1, 0.15],
random_state=1
)
blob2, y2 = make_blobs(
n_samples=6000,
n_features=1,
centers=centers[1],
cluster_std=[0.2, 0.3],
random_state=2
)
blob3, y3 = make_blobs(
n_samples=3000,
n_features=1,
centers=centers[2],
cluster_std=[0.2, 0.1],
random_state=2
)
blob4, y4 = make_blobs(
n_samples=250,
n_features=1,
centers=centers[3],
cluster_std=[0.1, 0.1],
random_state=2
)
blob5, y5 = make_blobs(
n_samples=250,
n_features=1,
centers=centers[4],
cluster_std=[0.1, 0.1],
random_state=3
)
ds = DataSet(parameter_count=2)
ds.add_blob(1, blob1)
ds.add_blob(2, blob2)
ds.add_blob(3, blob3)
ds.add_blob(4, blob4)
ds.add_blob(5, blob5)
# ds.plot_blobs(ds.classifications, x_lim=[0, 4], y_lim=[0, 4])
component_count = 128
iteration_count = 5000
# use multiple runs of BEM to estimate the number of components
# and get initial conditions
max_log_like = None # the highest value for all runs
converged = False
results = [] # will be a list of dicts to convert to a DataFrame
while not converged:
print component_count
new_comp_counts = []
for seed in range(1, 17):
ds.results = None # reset results
ds.cluster(
component_count=component_count,
burn_in=0,
iteration_count=iteration_count,
random_seed=seed,
model='bem'
)
log_like = ds.get_log_likelihood_trace()[0]
print log_like
if log_like > max_log_like:
max_log_like = log_like
# if the new log_like is close to the max (within 1%),
# see if there are any empty components (pi < 0.0001)
if abs(max_log_like - log_like) < abs(max_log_like * 0.01):
tmp_comp_count = np.sum(ds.raw_results.pis > 0.0001)
new_comp_counts.append(tmp_comp_count)
# save good run to our results
results.append(
{
'comp': component_count,
'true_comp': tmp_comp_count,
'seed': seed,
'log_like': log_like,
'pis': ds.raw_results.pis,
'mus': ds.raw_results.mus,
'sigmas': ds.raw_results.sigmas
}
)
# ds.plot_classifications(0)
if len(new_comp_counts) > 0:
if int(np.mean(new_comp_counts)) < component_count:
component_count = int(np.mean(new_comp_counts))
else:
converged = True
else:
converged = True
results_df = pd.DataFrame(
results,
columns=['comp', 'true_comp', 'seed', 'log_like']
)
min_comp_count = results_df.comp.min()
best_index = results_df[results_df.comp == min_comp_count].log_like.argmax()
best_run = results[best_index]
ds.results = None
ds.cluster(
component_count=best_run['comp'],
burn_in=0,
iteration_count=iteration_count,
random_seed=best_run['seed'],
model='bem'
)
log_like = ds.get_log_likelihood_trace()[0]
print log_like
ds.plot_classifications(0)
# Re-run a chain using the initial conditions from the last iteration
last_iter = ds.raw_results.get_iteration(0)
initial_conditions = {
'pis': last_iter.pis.flatten(),
'mus': last_iter.mus,
'sigmas': last_iter.sigmas
}
# reset DataSet results
ds.results = None
ds.cluster(
component_count=best_run['comp'],
burn_in=0,
iteration_count=iteration_count,
random_seed=1,
initial_conditions=initial_conditions
)
ds.plot_log_likelihood_trace()
pyplot.show()
valid_components = ds.get_valid_components()
for i in range(best_run['comp']):
ds.plot_iteration_traces(i)
ds.plot_animated_trace()
pass
| 0
| 0
| 0
|
495def4124e4516ef147f20dbb2acf3dd3301455
| 102
|
py
|
Python
|
vit_pytorch/__init__.py
|
WilliamAshbee/vit-pytorch
|
a033eae5f6ad1c609c06b762371cc43ca3930662
|
[
"MIT"
] | null | null | null |
vit_pytorch/__init__.py
|
WilliamAshbee/vit-pytorch
|
a033eae5f6ad1c609c06b762371cc43ca3930662
|
[
"MIT"
] | null | null | null |
vit_pytorch/__init__.py
|
WilliamAshbee/vit-pytorch
|
a033eae5f6ad1c609c06b762371cc43ca3930662
|
[
"MIT"
] | null | null | null |
from vit_pytorch.vit import ViT
from vit_pytorch.vit3d import ViT3d
from vit_pytorch.dino import Dino
| 25.5
| 35
| 0.852941
|
from vit_pytorch.vit import ViT
from vit_pytorch.vit3d import ViT3d
from vit_pytorch.dino import Dino
| 0
| 0
| 0
|
754d3a88e63c470befd4e37201725706f9d4214f
| 2,469
|
py
|
Python
|
pincer/objects/message/component.py
|
Kylianalex/Pincer
|
7ca530798a696c70e7d5c939902653575e3d8054
|
[
"MIT"
] | 1
|
2021-11-04T13:20:23.000Z
|
2021-11-04T13:20:23.000Z
|
pincer/objects/message/component.py
|
Kylianalex/Pincer
|
7ca530798a696c70e7d5c939902653575e3d8054
|
[
"MIT"
] | 1
|
2021-10-31T11:41:42.000Z
|
2021-10-31T11:41:42.000Z
|
pincer/objects/message/component.py
|
Kylianalex/Pincer
|
7ca530798a696c70e7d5c939902653575e3d8054
|
[
"MIT"
] | 1
|
2021-11-17T13:55:07.000Z
|
2021-11-17T13:55:07.000Z
|
# Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
from ...utils.api_object import APIObject
from ...utils.types import MISSING
if TYPE_CHECKING:
from typing import List
from ..app.select_menu import SelectOption
from ..message.button import ButtonStyle
from ..message.emoji import Emoji
from ...utils.types import APINullable
@dataclass(repr=False)
class MessageComponent(APIObject):
"""Represents a Discord Message Component object
Attributes
----------
type: :class:`int`
Component type
options: List[:class:`~pincer.objects.app.select_menu.SelectOption`]
The choices in the select, max 25
custom_id: APINullable[:class:`str`]
A developer-defined identifier for the component,
max 100 characters
disabled: APINullable[:class:`bool`]
Whether the component is disabled,
defaults to `False`
style: APINullable[:class:`~pincer.objects.message.button.ButtonStyle`]
One of button styles
label: APINullable[:class:`str`]
Text that appears on the button, max 80 characters
emoji: APINullable[:class:`~pincer.objects.message.emoji.Emoji`]
``name``, ``id``, and ``animated``
url: APINullable[:class:`str`]
A url for link-style buttons
placeholder: APINullable[:class:`str`]
Custom placeholder text if nothing is selected,
max 100 characters
min_values: APINullable[:class:`int`]
The minimum number of items that must be chosen;
|default| ``1``, min ``0``, max ``25``
max_values: APINullable[:class:`int`]
The maximum number of items that can be chosen;
|default| ``1``, max ``25``
components: APINullable[List[:class:`~pincer.objects.message.component.MessageComponent`]]
A list of child components
"""
# noqa: E501
type: int
options: List[SelectOption] = MISSING
custom_id: APINullable[str] = MISSING
disabled: APINullable[bool] = False
style: APINullable[ButtonStyle] = MISSING
label: APINullable[str] = MISSING
emoji: APINullable[Emoji] = MISSING
url: APINullable[str] = MISSING
placeholder: APINullable[str] = MISSING
min_values: APINullable[int] = 1
max_values: APINullable[int] = 1
components: APINullable[List[MessageComponent]] = MISSING
| 34.774648
| 94
| 0.687323
|
# Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
from ...utils.api_object import APIObject
from ...utils.types import MISSING
if TYPE_CHECKING:
from typing import List
from ..app.select_menu import SelectOption
from ..message.button import ButtonStyle
from ..message.emoji import Emoji
from ...utils.types import APINullable
@dataclass(repr=False)
class MessageComponent(APIObject):
"""Represents a Discord Message Component object
Attributes
----------
type: :class:`int`
Component type
options: List[:class:`~pincer.objects.app.select_menu.SelectOption`]
The choices in the select, max 25
custom_id: APINullable[:class:`str`]
A developer-defined identifier for the component,
max 100 characters
disabled: APINullable[:class:`bool`]
Whether the component is disabled,
defaults to `False`
style: APINullable[:class:`~pincer.objects.message.button.ButtonStyle`]
One of button styles
label: APINullable[:class:`str`]
Text that appears on the button, max 80 characters
emoji: APINullable[:class:`~pincer.objects.message.emoji.Emoji`]
``name``, ``id``, and ``animated``
url: APINullable[:class:`str`]
A url for link-style buttons
placeholder: APINullable[:class:`str`]
Custom placeholder text if nothing is selected,
max 100 characters
min_values: APINullable[:class:`int`]
The minimum number of items that must be chosen;
|default| ``1``, min ``0``, max ``25``
max_values: APINullable[:class:`int`]
The maximum number of items that can be chosen;
|default| ``1``, max ``25``
components: APINullable[List[:class:`~pincer.objects.message.component.MessageComponent`]]
A list of child components
"""
# noqa: E501
type: int
options: List[SelectOption] = MISSING
custom_id: APINullable[str] = MISSING
disabled: APINullable[bool] = False
style: APINullable[ButtonStyle] = MISSING
label: APINullable[str] = MISSING
emoji: APINullable[Emoji] = MISSING
url: APINullable[str] = MISSING
placeholder: APINullable[str] = MISSING
min_values: APINullable[int] = 1
max_values: APINullable[int] = 1
components: APINullable[List[MessageComponent]] = MISSING
| 0
| 0
| 0
|
d1746864f4611777bffb76d6b2649465292c511c
| 1,771
|
py
|
Python
|
resources/unite_tax_to_newick.py
|
colinbrislawn/hundo
|
38f5da8e63fdbd314e99f4eff3668c8adb5a0a5f
|
[
"MIT"
] | null | null | null |
resources/unite_tax_to_newick.py
|
colinbrislawn/hundo
|
38f5da8e63fdbd314e99f4eff3668c8adb5a0a5f
|
[
"MIT"
] | null | null | null |
resources/unite_tax_to_newick.py
|
colinbrislawn/hundo
|
38f5da8e63fdbd314e99f4eff3668c8adb5a0a5f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
"""
"""
import click
import os
from collections import defaultdict
from hundo.fasta import read_fasta, format_fasta_record
@click.group()
@click.pass_context
def cli(obj):
"""
"""
@cli.command("tax-to-newick")
@click.argument("tax", type=click.File("r"))
@click.argument("fasta", type=click.File("r"))
@click.argument("outfasta", type=click.File("w"))
@click.argument("outmap", type=click.File("w"))
@click.argument("outtre", type=click.File("w"))
def tax_to_newick(tax, fasta, outfasta, outmap, outtre):
"""
Tax and FASTA input files represent clusters at 99%% identity via:
https://unite.ut.ee/sh_files/sh_mothur_release_10.10.2017.zip
"""
t = tree()
saved = set()
for line in tax:
toks = line.strip().split("\t")
taxonomies = toks[1].strip(";").split(";")
if not taxonomies[0] == "k__Fungi": continue
assert(len(taxonomies) == 7)
tree_add(t, taxonomies)
print(toks[0], taxonomies[6], sep="\t", file=outmap)
saved.add(toks[0])
tree_str = tree_to_newick(t)
print(tree_str, file=outtre)
for name, seq in read_fasta(fasta):
if name in saved:
print(format_fasta_record(name, seq), file=outfasta)
if __name__ == '__main__':
cli()
| 25.666667
| 70
| 0.578769
|
#!/usr/bin/env python
# coding=utf-8
"""
"""
import click
import os
from collections import defaultdict
from hundo.fasta import read_fasta, format_fasta_record
@click.group()
@click.pass_context
def cli(obj):
"""
"""
@cli.command("tax-to-newick")
@click.argument("tax", type=click.File("r"))
@click.argument("fasta", type=click.File("r"))
@click.argument("outfasta", type=click.File("w"))
@click.argument("outmap", type=click.File("w"))
@click.argument("outtre", type=click.File("w"))
def tax_to_newick(tax, fasta, outfasta, outmap, outtre):
"""
Tax and FASTA input files represent clusters at 99%% identity via:
https://unite.ut.ee/sh_files/sh_mothur_release_10.10.2017.zip
"""
def tree():
return defaultdict(tree)
def tree_add(t, path):
for node in path:
t = t[node]
def tree_to_newick(root):
items = []
for k in root.keys():
s = ''
if len(root[k].keys()) > 0:
sub_tree = tree_to_newick(root[k])
if sub_tree != '':
s += '(' + sub_tree + ')'
s += k
items.append(s)
return ','.join(items)
t = tree()
saved = set()
for line in tax:
toks = line.strip().split("\t")
taxonomies = toks[1].strip(";").split(";")
if not taxonomies[0] == "k__Fungi": continue
assert(len(taxonomies) == 7)
tree_add(t, taxonomies)
print(toks[0], taxonomies[6], sep="\t", file=outmap)
saved.add(toks[0])
tree_str = tree_to_newick(t)
print(tree_str, file=outtre)
for name, seq in read_fasta(fasta):
if name in saved:
print(format_fasta_record(name, seq), file=outfasta)
if __name__ == '__main__':
cli()
| 390
| 0
| 80
|
40c987a08a1ef8707bf650d224f8475fede83800
| 1,026
|
py
|
Python
|
venv/lib/python3.6/site-packages/marshmallow/base.py
|
aitoehigie/britecore_flask
|
eef1873dbe6b2cc21f770bc6dec783007ae4493b
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/marshmallow/base.py
|
aitoehigie/britecore_flask
|
eef1873dbe6b2cc21f770bc6dec783007ae4493b
|
[
"MIT"
] | 1
|
2021-06-01T23:32:38.000Z
|
2021-06-01T23:32:38.000Z
|
venv/lib/python3.6/site-packages/marshmallow/base.py
|
aitoehigie/britecore_flask
|
eef1873dbe6b2cc21f770bc6dec783007ae4493b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Abstract base classes.
These are necessary to avoid circular imports between core.py and fields.py.
"""
import copy
class FieldABC(object):
"""Abstract base class from which all Field classes inherit.
"""
parent = None
name = None
class SchemaABC(object):
"""Abstract base class from which all Schemas inherit."""
| 21.829787
| 76
| 0.658869
|
# -*- coding: utf-8 -*-
"""Abstract base classes.
These are necessary to avoid circular imports between core.py and fields.py.
"""
import copy
class FieldABC(object):
"""Abstract base class from which all Field classes inherit.
"""
parent = None
name = None
def serialize(self, attr, obj, accessor=None):
raise NotImplementedError
def deserialize(self, value):
raise NotImplementedError
def _serialize(self, value, attr, obj):
raise NotImplementedError
def _deserialize(self, value, attr, ob):
raise NotImplementedError
def __deepcopy__(self, memo):
ret = copy.copy(self)
return ret
class SchemaABC(object):
"""Abstract base class from which all Schemas inherit."""
def dump(self, obj):
raise NotImplementedError
def dumps(self, obj, *args, **kwargs):
raise NotImplementedError
def load(self, data):
raise NotImplementedError
def loads(self, data):
raise NotImplementedError
| 416
| 0
| 243
|
e8d6ac802de2774ffc5d0f5f58cb2e9373e7792f
| 2,501
|
py
|
Python
|
CRNNHandle.py
|
zuoyuwei/crnn_torch_trt
|
ba1f7e8d113a25325389ba2435cef9a548e55210
|
[
"MIT"
] | null | null | null |
CRNNHandle.py
|
zuoyuwei/crnn_torch_trt
|
ba1f7e8d113a25325389ba2435cef9a548e55210
|
[
"MIT"
] | null | null | null |
CRNNHandle.py
|
zuoyuwei/crnn_torch_trt
|
ba1f7e8d113a25325389ba2435cef9a548e55210
|
[
"MIT"
] | 1
|
2020-07-29T05:20:04.000Z
|
2020-07-29T05:20:04.000Z
|
import torch
from torchvision import transforms
import os
import cv2
import time
import numpy as np
# alphabetfrom .keys import alphabet
import params
from torch.autograd import Variable
from PIL import Image
from utils import strLabelConverter,resizeNormalize
converter = strLabelConverter(params.alphabet)
# converter = strLabelConverter(''.join(alphabet))
| 27.788889
| 88
| 0.572171
|
import torch
from torchvision import transforms
import os
import cv2
import time
import numpy as np
# alphabetfrom .keys import alphabet
import params
from torch.autograd import Variable
from PIL import Image
from utils import strLabelConverter,resizeNormalize
converter = strLabelConverter(params.alphabet)
# converter = strLabelConverter(''.join(alphabet))
class CRNNHandle():
def __init__(self,model_path , net , gpu_id=None ):
'''
初始化pytorch模型
:param model_path: 模型地址(可以是模型的参数或者参数和计算图一起保存的文件)
:param net: 网络计算图,如果在model_path中指定的是参数的保存路径,则需要给出网络的计算图
:param gpu_id: 在哪一块gpu上运行
'''
if gpu_id is not None and isinstance(gpu_id, int) and torch.cuda.is_available():
self.device = torch.device("cuda:{}".format(gpu_id))
else:
self.device = torch.device("cpu")
self.net = torch.load(model_path, map_location=self.device)
print('device:', self.device)
if net is not None:
# 如果网络计算图和参数是分开保存的,就执行参数加载
net = net.to(self.device)
try:
sk = {}
for k in self.net:
sk[k.replace("module.","")] = self.net[k]
# sk[k[7:]] = self.net[k]
net.load_state_dict(sk)
except Exception as e:
print(e)
net.load_state_dict(self.net)
self.net = net
print('load model')
self.net.eval()
def predict(self, im):
"""
预测
"""
image = im.convert('L')
scale = image.size[1] * 1.0 / 32
w = image.size[0] / scale
w = int(w)
img = image.resize([1000, 32], Image.BILINEAR)
tft = transforms.ToTensor()
image = tft(img)
image.sub_(0.5).div_(0.5)
# transformer = resizeNormalize((1024, 32))
# image = transformer(image)
# image = image.to(self.device)
image = image.view(1, *image.size())
image = Variable(image)
preds = self.net(image)
rr = preds.cpu().detach().numpy()
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
preds_size = Variable(torch.LongTensor([preds.size(0)]))
# preds_size = Variable(torch.IntTensor([preds.size(0)]))
sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
return sim_pred
def train(self, images, labels):
return
| 27
| 2,284
| 23
|
077dc1cf93a34eabf2a6423dfc342a97bc442c61
| 6,331
|
py
|
Python
|
tests/integration/worker/fixture.py
|
geometry-labs/icon-governance
|
2f084de358525808c6f05ab99a686463d2273c8b
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/worker/fixture.py
|
geometry-labs/icon-governance
|
2f084de358525808c6f05ab99a686463d2273c8b
|
[
"Apache-2.0"
] | 23
|
2021-10-04T19:13:57.000Z
|
2022-02-16T23:50:15.000Z
|
tests/integration/worker/fixture.py
|
geometry-labs/icon-governance
|
2f084de358525808c6f05ab99a686463d2273c8b
|
[
"Apache-2.0"
] | null | null | null |
peers = {
("54.95.16.98", "hxdabfd26d6c01038acae8081580fcce86c802fd01"),
("54.184.241.211", "hx016401bba6a5474e08c925b6390e1ef1d8e0adc9"),
("35.170.9.187", "hx9fa9d224306b0722099d30471b3c2306421aead7"),
("65.108.47.101", "hx54d6f19c3d16b2ef23c09c885ca1ba776aaa80e2"),
("13.91.36.115", "hxc5e0b88cb9092bbd8b004a517996139334752f62"),
("3.139.211.90", "hx9780bfcd8d33c50f56e37f5b27313433c28eb8d8"),
("18.142.22.234", "hx4e39d214d1e682f2acba7a08c4f7591fb5baaade"),
("44.231.211.22", "hx5f5c750fef3fb5cbce15951bf2266f1e412a7797"),
("174.138.3.225", "hx6f89b2c25c15f6294c79810221753131067ed3f8"),
("3.37.81.62", "hxaf33a0c15dbf52b76590422cbb7e2d835034cdf6"),
("44.198.151.153", "hxc72bb8230e602072dc8e1b3abf9a08fd1db7464b"),
("44.235.153.121", "hxfdcd12d6cab5d684859076ded73c726f29b1ea7b"),
("144.91.102.161", "hxd2de5d155251ff62bf2f6c2aa616da6472886165"),
("35.76.191.119", "hxd0f86e4f465dadbb722b8946fcc3ce192f9298d2"),
("35.226.20.97", "hxf08bd5835fdb53dc7c764a5f4dd4e2e6359324e8"),
("3.224.23.76", "hx711c1fcab52461209a2961771fa46476c551fd84"),
("35.74.215.99", "hxfa4996155a6b3805ca2eb1a1abd8b9b488b2413d"),
("50.116.33.7", "hxff6437443e7ed76d2d7f97f0d28d7ae1071bd0bb"),
("3.35.62.17", "hx4e73ac54f410e485f203d6694178cad65df53afb"),
("52.42.174.154", "hxaea3a212ace02dfd68e5c2723d54147978940658"),
("18.162.72.204", "hxffcef8242394d121c3ab4cfb79798f54d295ed6c"),
("52.26.81.40", "hx95248f3757afcb50efa99f529183ba401a82273c"),
("13.37.8.70", "hx262afdeda4eba10fe41fa5ef21796ac2bdcc6629"),
("34.195.17.217", "hxda945d2d1dd8c8882181c1aea066e11137aa87c7"),
("51.38.62.198", "hxfac9169f5ee9d3c85c190cfa1208ca45540dcf38"),
("52.86.145.94", "hx5dc25b8845476fc50efc05f94937376e335d1840"),
("52.78.28.201", "hx76b08e824065d613f46e75445d8d0cfa5f325b33"),
("52.14.174.162", "hxa69031aef1bbb11ea0ee3647b278c158ecdab767"),
("84.201.175.168", "hx3aa778e1f00c77d3490e9e625f1f83ed26f90133"),
("52.196.159.184", "hx9c63f73d3c564a54d0eed84f90718b1ebed16f09"),
("54.185.246.89", "hxdc35f82a3a943e040ae2b9ab2baa2118781b2bc9"),
("44.228.193.64", "hxa0a2eed7b58a8659d9403152c23bd06d210becd8"),
("5.57.248.130", "hx69d49e8365659d7771f7e721d7f9b04367212557"),
("3.37.49.139", "hx280d1e165371820efc9ad431c8153f10e65febd5"),
("3.66.254.197", "hxea200f0408a2283bcd5115abd453ac55a5d15896"),
("95.217.62.85", "hxa30a0e0f59956381a02f8010eab27491026c5c33"),
("52.11.52.137", "hxbf6b8faf021972ae542b8552aa082b794d391fc0"),
("52.207.115.126", "hxf0bc67e700af57efc0a9140948c5678c50669f36"),
("35.157.211.62", "hx8f93620ffddca61bd2c6174dc8d1f55c08b1f7b3"),
("44.239.209.49", "hx9799064f00bd7f95299e3f3a291f2ebcd0d16de8"),
("44.231.14.116", "hx18268e3ab81b3b1e8bf935616aa1f2adc26887e1"),
("175.45.149.84", "hx2f3fb9a9ff98df2145936d2bfcaa3837a289496b"),
("18.162.80.224", "hxfe9762e5a426c7a5ab6775722aa137e5dbcfe8a1"),
("65.21.79.94", "hx3e9deb93255877805d8d81681b41bd2c14d65d0b"),
("3.211.83.18", "hx5b51d3e174559142875a5a2dc74696d8108190a2"),
("52.197.93.207", "hx406c9d6fa4b8a51dba98797610fe5935f613fb07"),
("138.68.63.35", "hxc97bc2b6863b5f0094de7f0e5bcf82a404c4199b"),
("3.213.22.31", "hxbc9c73670c79e8f6f8060551a792c2cf29a8c491"),
("13.213.114.126", "hxf1f6b652925692e3ff2bc394dc593aeaf19b1828"),
("44.193.171.25", "hx0315df35eef97e1e73f1a72b1e78ff7f28ba3176"),
("122.62.140.208", "hxd622e807af0e4c728be88913763ae7907e46cf34"),
("13.115.205.52", "hxfae3b72029950802a4d5721d998d6231f6594ec7"),
("44.230.104.155", "hxede3590816c42131570cf4caa5ee3bce250511fe"),
("54.179.251.173", "hx2b63a793c5b592e66ebb999fbe1fbe9f1715253d"),
("95.216.246.114", "hx1cb5883939f2fd478e92da1260438aa1f03440ca"),
("54.184.11.33", "hxdeee3ad7f5d20c58b22eb340835dbcee092dc943"),
("88.99.95.232", "hx21a916b7a01b900861ca3f3b7ac76ba316169f8c"),
("18.130.92.113", "hx2bbb1b0300f5dc0caa0e1a3ba1701a22e7590763"),
("44.237.21.74", "hxed7175f73f63ce8dfeede1db8c4b66179eb7a857"),
("13.124.96.54", "hx004783f10b0bd56764535cf931650dfe6d42cd9e"),
("23.88.64.232", "hx5b97bbec2e555289351806103833a465b7fbbd47"),
("18.168.74.158", "hx5dff0f5953e8cb0d50aaabff041a524e27718bd2"),
("54.151.184.244", "hx3d5c3ce7554f4d762f6396e53b2c5de07074ec39"),
("162.55.0.172", "hx5cb1e6d96eb1db986cee37a0a23a796de802a4bc"),
("3.86.106.108", "hx28c08b299995a88756af64374e13db2240bc3142"),
("3.37.14.12", "hx05583ffcc9a4be7ee1f72b28309d9849909f1d83"),
("54.199.171.19", "hx099b1e6b271bc6cd67d930a1c62ee5f3ed0e2c7d"),
("52.43.50.168", "hx46b5ae2cff977dba31bfd9e2de5b7b6138ee603f"),
("18.177.69.75", "hxc4abe579eb41603338b282ecaccfdd9d0b8d4501"),
("44.226.200.169", "hxa0a99a5e7036b7ed439fbbdb35f8d66f2177a2ae"),
("54.69.244.180", "hxb729903aeb2712cb0bbb3adcb10594fdf999eb11"),
("3.222.186.210", "hx80c0b58520091237df582f7adbca00425d6acf28"),
("35.75.153.60", "hx24de62ec990fc542ea36e3d4251e25ba97081b57"),
("54.68.57.186", "hx02abcab8308e2bee00b25131830fcb9f7924a71c"),
("18.118.232.28", "hx81719dcfe8f58ca07044b7bede49cecd61f9bd3f"),
("88.99.242.253", "hx35a0a985a9abd8cbbc71bec1aba74ca204369c8a"),
("34.133.160.215", "hxda4e0b39fb6df66af8a4e39942c49662a890f320"),
("135.181.27.43", "hx55f2cc3244350085734f4e405f761ecf3d2095b3"),
("54.248.178.76", "hx716c8792459d0070e6893ec57ed9f1a1bfb7ba26"),
("44.195.4.233", "hxe92a411b1b24975c4b04ea761d9a046bdc721810"),
("18.176.163.230", "hx398797e9867f859c017224627f0baf3a57268bc8"),
("44.237.243.143", "hx25b967b38d1bd23e43977eeac7363c9d6c24b873"),
("54.221.253.207", "hxd9b992f15ac1e757f4bbfc4eb9b072644eb2c269"),
("203.249.225.11", "hx157dabb9e156e5ea53af3ca33a5931257b9c9699"),
("210.104.190.246", "hxf9a10ec8d5810c49de6c1dff5f940fc5bfcd2006"),
("185.214.135.246", "hx9571df0bd727ce18adc177e8fd6bc45f885b3382"),
("44.234.218.52", "hxb03cca443a0ce9e21d343973626b2b3377a5cc36"),
("144.76.203.165", "hx554ea6c0c56e6f30682dcaa79a326da3a364899f"),
("185.172.110.75", "hxe4c008b838e8a9a7c065f20f393974de4c86f917"),
("3.94.191.92", "hx9960c2b06fbe238ffcc239fe16b3ef052d5712ce"),
("44.238.168.211", "hx49ce06eab947cb5ba2475781044d305af9d8d9d5"),
("3.143.205.135", "hxd5059fec8665bc7b769f4893aef65cf00049377a"),
}
| 66.642105
| 70
| 0.751856
|
peers = {
("54.95.16.98", "hxdabfd26d6c01038acae8081580fcce86c802fd01"),
("54.184.241.211", "hx016401bba6a5474e08c925b6390e1ef1d8e0adc9"),
("35.170.9.187", "hx9fa9d224306b0722099d30471b3c2306421aead7"),
("65.108.47.101", "hx54d6f19c3d16b2ef23c09c885ca1ba776aaa80e2"),
("13.91.36.115", "hxc5e0b88cb9092bbd8b004a517996139334752f62"),
("3.139.211.90", "hx9780bfcd8d33c50f56e37f5b27313433c28eb8d8"),
("18.142.22.234", "hx4e39d214d1e682f2acba7a08c4f7591fb5baaade"),
("44.231.211.22", "hx5f5c750fef3fb5cbce15951bf2266f1e412a7797"),
("174.138.3.225", "hx6f89b2c25c15f6294c79810221753131067ed3f8"),
("3.37.81.62", "hxaf33a0c15dbf52b76590422cbb7e2d835034cdf6"),
("44.198.151.153", "hxc72bb8230e602072dc8e1b3abf9a08fd1db7464b"),
("44.235.153.121", "hxfdcd12d6cab5d684859076ded73c726f29b1ea7b"),
("144.91.102.161", "hxd2de5d155251ff62bf2f6c2aa616da6472886165"),
("35.76.191.119", "hxd0f86e4f465dadbb722b8946fcc3ce192f9298d2"),
("35.226.20.97", "hxf08bd5835fdb53dc7c764a5f4dd4e2e6359324e8"),
("3.224.23.76", "hx711c1fcab52461209a2961771fa46476c551fd84"),
("35.74.215.99", "hxfa4996155a6b3805ca2eb1a1abd8b9b488b2413d"),
("50.116.33.7", "hxff6437443e7ed76d2d7f97f0d28d7ae1071bd0bb"),
("3.35.62.17", "hx4e73ac54f410e485f203d6694178cad65df53afb"),
("52.42.174.154", "hxaea3a212ace02dfd68e5c2723d54147978940658"),
("18.162.72.204", "hxffcef8242394d121c3ab4cfb79798f54d295ed6c"),
("52.26.81.40", "hx95248f3757afcb50efa99f529183ba401a82273c"),
("13.37.8.70", "hx262afdeda4eba10fe41fa5ef21796ac2bdcc6629"),
("34.195.17.217", "hxda945d2d1dd8c8882181c1aea066e11137aa87c7"),
("51.38.62.198", "hxfac9169f5ee9d3c85c190cfa1208ca45540dcf38"),
("52.86.145.94", "hx5dc25b8845476fc50efc05f94937376e335d1840"),
("52.78.28.201", "hx76b08e824065d613f46e75445d8d0cfa5f325b33"),
("52.14.174.162", "hxa69031aef1bbb11ea0ee3647b278c158ecdab767"),
("84.201.175.168", "hx3aa778e1f00c77d3490e9e625f1f83ed26f90133"),
("52.196.159.184", "hx9c63f73d3c564a54d0eed84f90718b1ebed16f09"),
("54.185.246.89", "hxdc35f82a3a943e040ae2b9ab2baa2118781b2bc9"),
("44.228.193.64", "hxa0a2eed7b58a8659d9403152c23bd06d210becd8"),
("5.57.248.130", "hx69d49e8365659d7771f7e721d7f9b04367212557"),
("3.37.49.139", "hx280d1e165371820efc9ad431c8153f10e65febd5"),
("3.66.254.197", "hxea200f0408a2283bcd5115abd453ac55a5d15896"),
("95.217.62.85", "hxa30a0e0f59956381a02f8010eab27491026c5c33"),
("52.11.52.137", "hxbf6b8faf021972ae542b8552aa082b794d391fc0"),
("52.207.115.126", "hxf0bc67e700af57efc0a9140948c5678c50669f36"),
("35.157.211.62", "hx8f93620ffddca61bd2c6174dc8d1f55c08b1f7b3"),
("44.239.209.49", "hx9799064f00bd7f95299e3f3a291f2ebcd0d16de8"),
("44.231.14.116", "hx18268e3ab81b3b1e8bf935616aa1f2adc26887e1"),
("175.45.149.84", "hx2f3fb9a9ff98df2145936d2bfcaa3837a289496b"),
("18.162.80.224", "hxfe9762e5a426c7a5ab6775722aa137e5dbcfe8a1"),
("65.21.79.94", "hx3e9deb93255877805d8d81681b41bd2c14d65d0b"),
("3.211.83.18", "hx5b51d3e174559142875a5a2dc74696d8108190a2"),
("52.197.93.207", "hx406c9d6fa4b8a51dba98797610fe5935f613fb07"),
("138.68.63.35", "hxc97bc2b6863b5f0094de7f0e5bcf82a404c4199b"),
("3.213.22.31", "hxbc9c73670c79e8f6f8060551a792c2cf29a8c491"),
("13.213.114.126", "hxf1f6b652925692e3ff2bc394dc593aeaf19b1828"),
("44.193.171.25", "hx0315df35eef97e1e73f1a72b1e78ff7f28ba3176"),
("122.62.140.208", "hxd622e807af0e4c728be88913763ae7907e46cf34"),
("13.115.205.52", "hxfae3b72029950802a4d5721d998d6231f6594ec7"),
("44.230.104.155", "hxede3590816c42131570cf4caa5ee3bce250511fe"),
("54.179.251.173", "hx2b63a793c5b592e66ebb999fbe1fbe9f1715253d"),
("95.216.246.114", "hx1cb5883939f2fd478e92da1260438aa1f03440ca"),
("54.184.11.33", "hxdeee3ad7f5d20c58b22eb340835dbcee092dc943"),
("88.99.95.232", "hx21a916b7a01b900861ca3f3b7ac76ba316169f8c"),
("18.130.92.113", "hx2bbb1b0300f5dc0caa0e1a3ba1701a22e7590763"),
("44.237.21.74", "hxed7175f73f63ce8dfeede1db8c4b66179eb7a857"),
("13.124.96.54", "hx004783f10b0bd56764535cf931650dfe6d42cd9e"),
("23.88.64.232", "hx5b97bbec2e555289351806103833a465b7fbbd47"),
("18.168.74.158", "hx5dff0f5953e8cb0d50aaabff041a524e27718bd2"),
("54.151.184.244", "hx3d5c3ce7554f4d762f6396e53b2c5de07074ec39"),
("162.55.0.172", "hx5cb1e6d96eb1db986cee37a0a23a796de802a4bc"),
("3.86.106.108", "hx28c08b299995a88756af64374e13db2240bc3142"),
("3.37.14.12", "hx05583ffcc9a4be7ee1f72b28309d9849909f1d83"),
("54.199.171.19", "hx099b1e6b271bc6cd67d930a1c62ee5f3ed0e2c7d"),
("52.43.50.168", "hx46b5ae2cff977dba31bfd9e2de5b7b6138ee603f"),
("18.177.69.75", "hxc4abe579eb41603338b282ecaccfdd9d0b8d4501"),
("44.226.200.169", "hxa0a99a5e7036b7ed439fbbdb35f8d66f2177a2ae"),
("54.69.244.180", "hxb729903aeb2712cb0bbb3adcb10594fdf999eb11"),
("3.222.186.210", "hx80c0b58520091237df582f7adbca00425d6acf28"),
("35.75.153.60", "hx24de62ec990fc542ea36e3d4251e25ba97081b57"),
("54.68.57.186", "hx02abcab8308e2bee00b25131830fcb9f7924a71c"),
("18.118.232.28", "hx81719dcfe8f58ca07044b7bede49cecd61f9bd3f"),
("88.99.242.253", "hx35a0a985a9abd8cbbc71bec1aba74ca204369c8a"),
("34.133.160.215", "hxda4e0b39fb6df66af8a4e39942c49662a890f320"),
("135.181.27.43", "hx55f2cc3244350085734f4e405f761ecf3d2095b3"),
("54.248.178.76", "hx716c8792459d0070e6893ec57ed9f1a1bfb7ba26"),
("44.195.4.233", "hxe92a411b1b24975c4b04ea761d9a046bdc721810"),
("18.176.163.230", "hx398797e9867f859c017224627f0baf3a57268bc8"),
("44.237.243.143", "hx25b967b38d1bd23e43977eeac7363c9d6c24b873"),
("54.221.253.207", "hxd9b992f15ac1e757f4bbfc4eb9b072644eb2c269"),
("203.249.225.11", "hx157dabb9e156e5ea53af3ca33a5931257b9c9699"),
("210.104.190.246", "hxf9a10ec8d5810c49de6c1dff5f940fc5bfcd2006"),
("185.214.135.246", "hx9571df0bd727ce18adc177e8fd6bc45f885b3382"),
("44.234.218.52", "hxb03cca443a0ce9e21d343973626b2b3377a5cc36"),
("144.76.203.165", "hx554ea6c0c56e6f30682dcaa79a326da3a364899f"),
("185.172.110.75", "hxe4c008b838e8a9a7c065f20f393974de4c86f917"),
("3.94.191.92", "hx9960c2b06fbe238ffcc239fe16b3ef052d5712ce"),
("44.238.168.211", "hx49ce06eab947cb5ba2475781044d305af9d8d9d5"),
("3.143.205.135", "hxd5059fec8665bc7b769f4893aef65cf00049377a"),
}
| 0
| 0
| 0
|
d4db0e97f8dd644aa8041e83eae4beaa62181324
| 1,952
|
py
|
Python
|
Scripts_Python/Neuronal_20200417/CGAN/Discriminator.py
|
IshanBaliyan/DEEP-TFM_with_cGAN
|
8d711c025367031197e5b8c7c768fc9fbea406ce
|
[
"MIT"
] | 1
|
2021-08-17T14:47:37.000Z
|
2021-08-17T14:47:37.000Z
|
Scripts_Python/Neuronal_20200417/CGAN/Discriminator.py
|
IshanBaliyan/DEEP-TFM_with_cGAN
|
8d711c025367031197e5b8c7c768fc9fbea406ce
|
[
"MIT"
] | null | null | null |
Scripts_Python/Neuronal_20200417/CGAN/Discriminator.py
|
IshanBaliyan/DEEP-TFM_with_cGAN
|
8d711c025367031197e5b8c7c768fc9fbea406ce
|
[
"MIT"
] | 1
|
2020-12-11T23:53:43.000Z
|
2020-12-11T23:53:43.000Z
|
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
| 33.655172
| 59
| 0.552254
|
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, hidden_dim=1024):
super(Discriminator, self).__init__()
self.hidden_dim = hidden_dim
self.conv1 = nn.Conv2d(33, 16, 4, 2, 1)
self.conv2 = nn.Conv2d(16, 32, 4, 2, 1)
self.conv3 = nn.Conv2d(32, 64, 4, 2, 1)
self.conv4 = nn.Conv2d(64, 128, 4, 2, 1)
self.conv5 = nn.Conv2d(128, 256, 4, 2, 1)
self.conv6 = nn.Conv2d(256, 512, 4, 2, 1)
self.re = nn.LeakyReLU(0.2, True)
self.bn1 = nn.BatchNorm2d(16)
self.bn2 = nn.BatchNorm2d(32)
self.bn3 = nn.BatchNorm2d(64)
self.bn4 = nn.BatchNorm2d(128)
self.bn5 = nn.BatchNorm2d(256)
self.bn7 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm2d(1)
self.dconv = nn.ConvTranspose2d(512, 256, 4, 2, 1)
self.dconv1 = nn.ConvTranspose2d(256, 128, 4, 2, 1)
self.dconv2 = nn.ConvTranspose2d(128, 64, 4, 2, 1)
self.dconv3 = nn.ConvTranspose2d(64, 32, 4, 2, 1)
self.dconv4 = nn.ConvTranspose2d(32, 16, 4, 2, 1)
self.dconv5 = nn.ConvTranspose2d(16, 1, 4, 2, 1)
def forward(self, x):
e1 = self.re(self.bn1(self.conv1(x)))
e2 = self.re(self.bn2(self.conv2(e1)))
e3 = self.re(self.bn3(self.conv3(e2)))
e4 = self.re(self.bn4(self.conv4(e3)))
e5 = self.re(self.bn5(self.conv5(e4)))
e6 = self.re(self.bn7(self.conv6(e5)))
d6 = self.dconv(e6)
d6 = self.re(self.bn5(d6))
d5 = self.dconv1(d6)
d5 = self.re(self.bn4(d5))
d4 = self.dconv2(d5)
d4 = self.re(self.bn3(d4))
d3 = self.dconv3(d4)
d3 = self.re(self.bn2(d3))
d2 = self.dconv4(d3)
d2 = self.re(self.bn1(d2))
d1 = self.dconv5(d2)
out = self.re(self.bn6(d1))
return out
| 1,769
| 10
| 84
|
68ae7d329b80fdf5298ee6cf2403df2d82abe871
| 12,483
|
py
|
Python
|
maci/distributions/real_nvp_bijector.py
|
bbrito/mapr2
|
5aa1a4c85c28918d9f16e5544793bf5574d7c49e
|
[
"Apache-2.0"
] | 35
|
2019-01-13T17:55:03.000Z
|
2022-02-23T17:06:53.000Z
|
maci/distributions/real_nvp_bijector.py
|
arita37/mapr2
|
57f76875a4a6aed1850d3fb8604683bfe8a0e09b
|
[
"Apache-2.0"
] | 18
|
2019-03-10T23:12:00.000Z
|
2022-03-21T22:17:09.000Z
|
maci/distributions/real_nvp_bijector.py
|
arita37/mapr2
|
57f76875a4a6aed1850d3fb8604683bfe8a0e09b
|
[
"Apache-2.0"
] | 19
|
2019-01-13T20:47:00.000Z
|
2021-11-09T05:59:13.000Z
|
"""RealNVP bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
ConditionalBijector = tf.contrib.distributions.bijectors.ConditionalBijector
__all__ = [
"RealNVPBijector",
]
def checkerboard(shape, parity='even', dtype=tf.bool):
"""TODO: Implement for dimensions >1"""
if len(shape) > 1:
raise NotImplementedError(
"checkerboard not yet implemented for dimensions >1")
unit = (tf.constant((True, False))
if parity == 'even' else tf.constant((False, True)))
num_elements = np.prod(shape)
tiled = tf.tile(unit, ((num_elements // 2) + 1, ))[:num_elements]
return tf.cast(tf.reshape(tiled, shape), dtype)
class CouplingBijector(ConditionalBijector):
"""TODO"""
def __init__(self,
parity,
translation_fn,
scale_fn,
event_ndims=0,
validate_args=False,
name="coupling_bijector"):
"""Instantiates the `CouplingBijector` bijector.
Args:
TODO
event_ndims: Python scalar indicating the number of dimensions
associated with a particular draw from the distribution.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
Raises:
ValueError: if TODO happens
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
self.parity = parity
self.translation_fn = translation_fn
self.scale_fn = scale_fn
super().__init__(event_ndims=event_ndims,
validate_args=validate_args,
name=name)
# TODO: Properties
def _maybe_assert_valid_x(self, x):
"""TODO"""
if not self.validate_args:
return x
raise NotImplementedError("_maybe_assert_valid_x")
def _maybe_assert_valid_y(self, y):
"""TODO"""
if not self.validate_args:
return y
raise NotImplementedError("_maybe_assert_valid_y")
class RealNVPBijector(ConditionalBijector):
"""TODO"""
def __init__(self,
num_coupling_layers=2,
translation_hidden_sizes=(25,),
scale_hidden_sizes=(25,),
event_ndims=0,
validate_args=False,
name="real_nvp"):
"""Instantiates the `RealNVPBijector` bijector.
Args:
TODO
event_ndims: Python scalar indicating the number of dimensions
associated with a particular draw from the distribution.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
Raises:
ValueError: if TODO happens
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
self._num_coupling_layers = num_coupling_layers
self._translation_hidden_sizes = tuple(translation_hidden_sizes)
self._scale_hidden_sizes = tuple(scale_hidden_sizes)
self.build()
super().__init__(event_ndims=event_ndims,
validate_args=validate_args,
name=name)
# TODO: Properties
def _maybe_assert_valid_x(self, x):
"""TODO"""
if not self.validate_args:
return x
raise NotImplementedError("_maybe_assert_valid_x")
def _maybe_assert_valid_y(self, y):
"""TODO"""
if not self.validate_args:
return y
raise NotImplementedError("_maybe_assert_valid_y")
| 32.936675
| 79
| 0.564448
|
"""RealNVP bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
ConditionalBijector = tf.contrib.distributions.bijectors.ConditionalBijector
__all__ = [
"RealNVPBijector",
]
def checkerboard(shape, parity='even', dtype=tf.bool):
"""TODO: Implement for dimensions >1"""
if len(shape) > 1:
raise NotImplementedError(
"checkerboard not yet implemented for dimensions >1")
unit = (tf.constant((True, False))
if parity == 'even' else tf.constant((False, True)))
num_elements = np.prod(shape)
tiled = tf.tile(unit, ((num_elements // 2) + 1, ))[:num_elements]
return tf.cast(tf.reshape(tiled, shape), dtype)
def feedforward_net(inputs,
layer_sizes,
activation_fn=tf.nn.tanh,
output_nonlinearity=None,
regularizer=None):
prev_size = inputs.get_shape().as_list()[-1]
out = inputs
for i, layer_size in enumerate(layer_sizes):
weight_initializer = tf.contrib.layers.xavier_initializer()
weight = tf.get_variable(
name="weight_{i}".format(i=i),
shape=(prev_size, layer_size),
initializer=weight_initializer,
regularizer=regularizer)
bias_initializer = tf.initializers.random_normal()
bias = tf.get_variable(
name="bias_{i}".format(i=i),
shape=(layer_size, ),
initializer=bias_initializer)
prev_size = layer_size
z = tf.matmul(out, weight) + bias
if i < len(layer_sizes) - 1 and activation_fn is not None:
out = activation_fn(z)
elif i == len(layer_sizes) - 1 and output_nonlinearity is not None:
out = output_nonlinearity(z)
else:
out = z
return out
class CouplingBijector(ConditionalBijector):
"""TODO"""
def __init__(self,
parity,
translation_fn,
scale_fn,
event_ndims=0,
validate_args=False,
name="coupling_bijector"):
"""Instantiates the `CouplingBijector` bijector.
Args:
TODO
event_ndims: Python scalar indicating the number of dimensions
associated with a particular draw from the distribution.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
Raises:
ValueError: if TODO happens
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
self.parity = parity
self.translation_fn = translation_fn
self.scale_fn = scale_fn
super().__init__(event_ndims=event_ndims,
validate_args=validate_args,
name=name)
# TODO: Properties
def _forward(self, x, **condition_kwargs):
self._maybe_assert_valid_x(x)
D = x.shape[1]
if self.parity == 'even':
masked_x = x[:, :D//2]
non_masked_x = x[:, D//2:]
else:
non_masked_x = x[:, :D//2]
masked_x = x[:, D//2:]
with tf.variable_scope("{name}/scale".format(name=self.name),
reuse=tf.AUTO_REUSE):
# s(x_{1:d}) in paper
scale = self.scale_fn(masked_x,
condition_kwargs['condition'],
non_masked_x.shape[-1])
with tf.variable_scope("{name}/translation".format(name=self.name),
reuse=tf.AUTO_REUSE):
# t(x_{1:d}) in paper
translation = self.translation_fn(masked_x,
condition_kwargs['condition'],
non_masked_x.shape[-1])
# exp(s(b*x)) in paper
exp_scale = tf.check_numerics(
tf.exp(scale), "tf.exp(scale) contains NaNs or infs")
# y_{d+1:D} = x_{d+1:D} * exp(s(x_{1:d})) + t(x_{1:d})
part_1 = masked_x
part_2 = non_masked_x * exp_scale + translation
to_concat = (
(part_1, part_2)
if self.parity == 'even'
else (part_2, part_1)
)
outputs = tf.concat(to_concat, axis=1)
return outputs
def _forward_log_det_jacobian(self, x, **condition_kwargs):
self._maybe_assert_valid_x(x)
D = x.shape[1]
masked_slice = (
slice(None, D//2)
if self.parity == 'even'
else slice(D//2, None))
masked_x = x[:, masked_slice]
nonlinearity_output_size = D - masked_x.shape[1]
# TODO: scale and translation could be merged into a single network
with tf.variable_scope("{name}/scale".format(name=self.name),
reuse=tf.AUTO_REUSE):
scale = self.scale_fn(
masked_x,
**condition_kwargs,
output_size=nonlinearity_output_size)
log_det_jacobian = tf.reduce_sum(
scale, axis=tuple(range(1, len(x.shape))))
return log_det_jacobian
def _inverse(self, y, **condition_kwargs):
self._maybe_assert_valid_y(y)
condition = condition_kwargs["condition"]
D = y.shape[1]
if self.parity == 'even':
masked_y = y[:, :D//2]
non_masked_y = y[:, D//2:]
else:
non_masked_y = y[:, :D//2]
masked_y = y[:, D//2:]
with tf.variable_scope("{name}/scale".format(name=self.name),
reuse=tf.AUTO_REUSE):
# s(y_{1:d}) in paper
scale = self.scale_fn(masked_y,
condition,
non_masked_y.shape[-1])
with tf.variable_scope("{name}/translation".format(name=self.name),
reuse=tf.AUTO_REUSE):
# t(y_{1:d}) in paper
translation = self.translation_fn(masked_y,
condition,
non_masked_y.shape[-1])
exp_scale = tf.exp(-scale)
# y_{d+1:D} = (y_{d+1:D} - t(y_{1:d})) * exp(-s(y_{1:d}))
part_1 = masked_y
part_2 = (non_masked_y - translation) * exp_scale
to_concat = (
(part_1, part_2)
if self.parity == 'even'
else (part_2, part_1)
)
outputs = tf.concat(to_concat, axis=1)
return outputs
def _inverse_log_det_jacobian(self, y, **condition_kwargs):
self._maybe_assert_valid_y(y)
condition = condition_kwargs["condition"]
D = y.shape[1]
masked_slice = (
slice(None, D//2)
if self.parity == 'even'
else slice(D//2, None))
masked_y = y[:, masked_slice]
nonlinearity_output_size = D - masked_y.shape[1]
# TODO: scale and translation could be merged into a single network
with tf.variable_scope("{name}/scale".format(name=self.name),
reuse=tf.AUTO_REUSE):
scale = self.scale_fn(masked_y,
condition,
nonlinearity_output_size)
log_det_jacobian = -tf.reduce_sum(
scale, axis=tuple(range(1, len(y.shape))))
return log_det_jacobian
def _maybe_assert_valid_x(self, x):
"""TODO"""
if not self.validate_args:
return x
raise NotImplementedError("_maybe_assert_valid_x")
def _maybe_assert_valid_y(self, y):
"""TODO"""
if not self.validate_args:
return y
raise NotImplementedError("_maybe_assert_valid_y")
class RealNVPBijector(ConditionalBijector):
"""TODO"""
def __init__(self,
num_coupling_layers=2,
translation_hidden_sizes=(25,),
scale_hidden_sizes=(25,),
event_ndims=0,
validate_args=False,
name="real_nvp"):
"""Instantiates the `RealNVPBijector` bijector.
Args:
TODO
event_ndims: Python scalar indicating the number of dimensions
associated with a particular draw from the distribution.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
Raises:
ValueError: if TODO happens
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
self._num_coupling_layers = num_coupling_layers
self._translation_hidden_sizes = tuple(translation_hidden_sizes)
self._scale_hidden_sizes = tuple(scale_hidden_sizes)
self.build()
super().__init__(event_ndims=event_ndims,
validate_args=validate_args,
name=name)
# TODO: Properties
def build(self):
num_coupling_layers = self._num_coupling_layers
translation_hidden_sizes = self._translation_hidden_sizes
scale_hidden_sizes = self._scale_hidden_sizes
def translation_wrapper(inputs, condition, output_size):
return feedforward_net(
tf.concat((inputs, condition), axis=1),
# TODO: should allow multi_dimensional inputs/outputs
layer_sizes=(*translation_hidden_sizes, output_size))
def scale_wrapper(inputs, condition, output_size):
return feedforward_net(
tf.concat((inputs, condition), axis=1),
# TODO: should allow multi_dimensional inputs/outputs
layer_sizes=(*scale_hidden_sizes, output_size))
self.layers = [
CouplingBijector(
parity=('even', 'odd')[i % 2],
name="coupling_{i}".format(i=i),
translation_fn=translation_wrapper,
scale_fn=scale_wrapper)
for i in range(1, num_coupling_layers + 1)
]
def _forward(self, x, **condition_kwargs):
self._maybe_assert_valid_x(x)
out = x
for layer in self.layers:
out = layer.forward(out, **condition_kwargs)
return out
def _forward_log_det_jacobian(self, x, **condition_kwargs):
self._maybe_assert_valid_x(x)
sum_log_det_jacobians = tf.reduce_sum(
tf.zeros_like(x), axis=tuple(range(1, len(x.shape))))
out = x
for layer in self.layers:
log_det_jacobian = layer.forward_log_det_jacobian(
out, **condition_kwargs)
out = layer.forward(out, **condition_kwargs)
assert (sum_log_det_jacobians.shape.as_list()
== log_det_jacobian.shape.as_list())
sum_log_det_jacobians += log_det_jacobian
return sum_log_det_jacobians
def _inverse(self, y, **condition_kwargs):
self._maybe_assert_valid_y(y)
out = y
for layer in reversed(self.layers):
out = layer.inverse(out, **condition_kwargs)
return out
def _inverse_log_det_jacobian(self, y, **condition_kwargs):
self._maybe_assert_valid_y(y)
sum_log_det_jacobians = tf.reduce_sum(
tf.zeros_like(y), axis=tuple(range(1, len(y.shape))))
out = y
for layer in reversed(self.layers):
log_det_jacobian = layer.inverse_log_det_jacobian(
out, **condition_kwargs)
out = layer.inverse(out, **condition_kwargs)
assert (sum_log_det_jacobians.shape.as_list()
== log_det_jacobian.shape.as_list())
sum_log_det_jacobians += log_det_jacobian
return sum_log_det_jacobians
def _maybe_assert_valid_x(self, x):
"""TODO"""
if not self.validate_args:
return x
raise NotImplementedError("_maybe_assert_valid_x")
def _maybe_assert_valid_y(self, y):
"""TODO"""
if not self.validate_args:
return y
raise NotImplementedError("_maybe_assert_valid_y")
| 8,279
| 0
| 266
|
e42334d7ea3bc94404b733cfe0daa07bc8199160
| 3,904
|
py
|
Python
|
politician/views.py
|
adborden/WeVoteBase
|
7fd612aee1d3638c8a74cc81873ce0687f62cf33
|
[
"MIT"
] | null | null | null |
politician/views.py
|
adborden/WeVoteBase
|
7fd612aee1d3638c8a74cc81873ce0687f62cf33
|
[
"MIT"
] | null | null | null |
politician/views.py
|
adborden/WeVoteBase
|
7fd612aee1d3638c8a74cc81873ce0687f62cf33
|
[
"MIT"
] | 1
|
2020-03-04T00:22:39.000Z
|
2020-03-04T00:22:39.000Z
|
# politician/views.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.contrib import messages
from django.contrib.messages import get_messages
from django.core.urlresolvers import reverse
from django.views import generic
from django.views.generic import TemplateView
from django.utils import timezone
from politician.forms import TagNewForm
from politician.models import Politician, PoliticianTagLink
from tag.models import Tag
# TODO Next step is to get Twitter vacuum working so we can pull in Tweets automatically based on tags/handles
def politician_tag_new_view(request, politician_id):
"""
Form to add a new link tying a politician to twitter tags
:param request:
:return:
"""
messages_on_stage = get_messages(request)
# for message in messages_on_stage:
# if message.level is ERROR:
politician_on_stage = get_object_or_404(Politician, id=politician_id)
try:
tag_link_list = politician_on_stage.tag_link.all()
except PoliticianTagLink.DoesNotExist:
tag_link_list = None
template_values = {
'politician_on_stage': politician_on_stage,
'tag_link_list': tag_link_list,
'messages_on_stage': messages_on_stage,
}
return render(request, 'politician/politician_tag_new.html', template_values)
def politician_tag_new_test_view(request, politician_id):
"""
Form to add a new link tying a politician to twitter tags
:param request:
:return:
"""
tag_new_form = TagNewForm()
politician_on_stage = get_object_or_404(Politician, id=politician_id)
# TODO Find the tags attached to this politician
try:
tag_list = PoliticianTagLink.objects.get(politician=politician_on_stage)
except PoliticianTagLink.DoesNotExist:
tag_list = None
template_values = {
'tag_new_form': tag_new_form,
'politician_on_stage': politician_on_stage,
'tag_list': tag_list,
}
return render(request, 'politician/politician_tag_new_test.html', template_values)
def politician_tag_new_process_view(request, politician_id):
"""
Process the form to add a new link tying a politician to twitter tags
"""
politician_on_stage = get_object_or_404(Politician, id=politician_id)
new_tag = request.POST['new_tag']
# If an invalid tag didn't come in, redirect back to tag_new
if not is_tag_valid(new_tag):
messages.add_message(request, messages.INFO, 'That is not a valid tag. Please enter a different tag.')
return HttpResponseRedirect(reverse('politician:politician_tag_new', args=(politician_id,)))
new_tag_temp, created = Tag.objects.get_or_create(hashtag_text=new_tag)
new_tag_link = PoliticianTagLink(tag=new_tag_temp, politician=politician_on_stage)
new_tag_link.save()
return HttpResponseRedirect(reverse('politician:politician_detail', args=(politician_id,)))
| 36.485981
| 147
| 0.742059
|
# politician/views.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.contrib import messages
from django.contrib.messages import get_messages
from django.core.urlresolvers import reverse
from django.views import generic
from django.views.generic import TemplateView
from django.utils import timezone
from politician.forms import TagNewForm
from politician.models import Politician, PoliticianTagLink
from tag.models import Tag
class PoliticianIndexView(generic.ListView):
template_name = 'politician/politician_list.html'
context_object_name = 'politician_list'
def get_queryset(self):
""""""
return Politician.objects.order_by('last_name')
# TODO Next step is to get Twitter vacuum working so we can pull in Tweets automatically based on tags/handles
def politician_detail_view(request, politician_id):
politician_on_stage = get_object_or_404(Politician, id=politician_id)
# post_list = Post.objects.filter
template_values = {
'politician_on_stage': politician_on_stage,
# 'post_list': tag_list, # This is for prototyping only -- we want to move very quickly to posts being pulled onto the page via javascript
}
return render(request, 'politician/politician_detail.html', template_values)
def politician_tag_new_view(request, politician_id):
"""
Form to add a new link tying a politician to twitter tags
:param request:
:return:
"""
messages_on_stage = get_messages(request)
# for message in messages_on_stage:
# if message.level is ERROR:
politician_on_stage = get_object_or_404(Politician, id=politician_id)
try:
tag_link_list = politician_on_stage.tag_link.all()
except PoliticianTagLink.DoesNotExist:
tag_link_list = None
template_values = {
'politician_on_stage': politician_on_stage,
'tag_link_list': tag_link_list,
'messages_on_stage': messages_on_stage,
}
return render(request, 'politician/politician_tag_new.html', template_values)
def politician_tag_new_test_view(request, politician_id):
"""
Form to add a new link tying a politician to twitter tags
:param request:
:return:
"""
tag_new_form = TagNewForm()
politician_on_stage = get_object_or_404(Politician, id=politician_id)
# TODO Find the tags attached to this politician
try:
tag_list = PoliticianTagLink.objects.get(politician=politician_on_stage)
except PoliticianTagLink.DoesNotExist:
tag_list = None
template_values = {
'tag_new_form': tag_new_form,
'politician_on_stage': politician_on_stage,
'tag_list': tag_list,
}
return render(request, 'politician/politician_tag_new_test.html', template_values)
def politician_tag_new_process_view(request, politician_id):
"""
Process the form to add a new link tying a politician to twitter tags
"""
politician_on_stage = get_object_or_404(Politician, id=politician_id)
new_tag = request.POST['new_tag']
# If an invalid tag didn't come in, redirect back to tag_new
if not is_tag_valid(new_tag):
messages.add_message(request, messages.INFO, 'That is not a valid tag. Please enter a different tag.')
return HttpResponseRedirect(reverse('politician:politician_tag_new', args=(politician_id,)))
new_tag_temp, created = Tag.objects.get_or_create(hashtag_text=new_tag)
new_tag_link = PoliticianTagLink(tag=new_tag_temp, politician=politician_on_stage)
new_tag_link.save()
return HttpResponseRedirect(reverse('politician:politician_detail', args=(politician_id,)))
def is_tag_valid(new_tag):
if not bool(new_tag.strip()): # If this doesn't evaluate true here, then it is empty and isn't valid
return False
return True
| 601
| 221
| 68
|
4ed568d66cbb02189b36d1ec6924ebb87e827d1e
| 6,209
|
py
|
Python
|
tests/compare_updates.py
|
cassianobecker/msbm
|
6ce22f93f63071dc3ca722d499db376ea678eb23
|
[
"MIT"
] | null | null | null |
tests/compare_updates.py
|
cassianobecker/msbm
|
6ce22f93f63071dc3ca722d499db376ea678eb23
|
[
"MIT"
] | null | null | null |
tests/compare_updates.py
|
cassianobecker/msbm
|
6ce22f93f63071dc3ca722d499db376ea678eb23
|
[
"MIT"
] | null | null | null |
import sys
sys.path.insert(0, '..')
import updates_msbm_vi_iter
import updates_msbm_vi
import updates_msbm2_vi_iter
import updates_msbm2_vi
import os
import util
import init_msbm_vi as im
import numpy as np
import numpy.random as npr
import pdb
# ###########################################################
# ###########################################################
# ###########################################################
if __name__ == '__main__':
file_url = os.path.join('..', 'experiments', 'two_prototype', 'data', 'twoprototype_105_250.pickle')
remove_self_loops = False
updater_einsum = updates_msbm_vi
updater_iter = updates_msbm_vi_iter
runner = TestUpdates(updater_einsum, updater_iter, file_url, remove_self_loops)
runner.test_all()
updater_einsum = updates_msbm2_vi
updater_iter = updates_msbm2_vi_iter
runner = TestUpdates(updater_einsum, updater_iter, file_url, remove_self_loops)
runner.test_all()
remove_self_loops = True
updater_einsum = updates_msbm_vi
updater_iter = updates_msbm_vi_iter
runner = TestUpdates(updater_einsum, updater_iter, file_url, remove_self_loops)
runner.test_all()
updater_einsum = updates_msbm2_vi
updater_iter = updates_msbm2_vi_iter
runner = TestUpdates(updater_einsum, updater_iter, file_url, remove_self_loops)
runner.test_all()
| 35.079096
| 113
| 0.565792
|
import sys
sys.path.insert(0, '..')
import updates_msbm_vi_iter
import updates_msbm_vi
import updates_msbm2_vi_iter
import updates_msbm2_vi
import os
import util
import init_msbm_vi as im
import numpy as np
import numpy.random as npr
import pdb
class TestUpdates:
def __init__(self, updater_einsum, updater_iter, file_url, remove_self_loops):
print("\n\n=====================================================================================")
print("Comparing '{:}' with '{:}' with remove_self_loops={:}"
.format(updater_einsum.__name__, updater_iter.__name__, remove_self_loops))
print("=====================================================================================")
data = util.load_data(file_url)
self.data = data
print('')
K = 8
data['X'] = data['X'][:K, :, :]
data['NON_X'] = data['NON_X'][:K, :, :]
data['K'] = K
data['Y'] = data['Y'][:K, :]
prior = dict()
prior['ALPHA_0'] = 0.5
prior['BETA_0'] = 0.5
prior['NU_0'] = 0.5
prior['ZETA_0'] = 0.5
self.prior = prior
# assigning hyper-parameters from ground truth (cheating)
hyper = dict()
hyper['M'] = data['M']
hyper['Q'] = data['Q']
self.hyper = hyper
# initialize moments
mom = dict()
npr.seed(1)
mode = 'random'
mom['ALPHA'] = im.init_ALPHA(data, hyper, mode)
mom['BETA'] = im.init_BETA(data, hyper, mode)
mom['NU'] = im.init_NU(data, hyper, mode)
mom['ZETA'] = im.init_ZETA(data, hyper, mode)
mom['MU'] = im.init_MU(data, hyper, mode)
mom['LOG_MU'] = np.log(mom['MU'])
mom['TAU'] = im.init_TAU(data, hyper, mode)
mom['LOG_TAU'] = np.log(mom['TAU'])
self.mom = mom
par = dict()
par['MAX_ITER'] = 1000
par['TOL_ELBO'] = 1.e-16
par['ALG'] = 'cavi'
par['kappa'] = 1.0
self.par = par
self.remove_self_loops = remove_self_loops
self.msbm_einsum = updater_einsum
self.msbm_iter = updater_iter
def test_update_Pi(self):
print('--- Pi ---')
NEW_ALPHA1, NEW_BETA1 = self.msbm_einsum.update_Pi(self.data, self.prior, self.hyper, self.mom, self.par,
remove_self_loops=self.remove_self_loops)
NEW_ALPHA2, NEW_BETA2 = self.msbm_iter.update_Pi(self.data, self.prior, self.hyper, self.mom, self.par,
remove_self_loops=self.remove_self_loops)
print("--ALPHA:")
self.eval_diff(NEW_ALPHA1, NEW_ALPHA2)
print("--BETA:")
self.eval_diff(NEW_BETA1, NEW_BETA2)
def test_update_Z(self):
print('--- Z ---')
NEW_LOG_TAU1 = self.msbm_einsum.update_Z(self.data, self.prior, self.hyper, self.mom, self.par,
remove_self_loops=self.remove_self_loops)
NEW_LOG_TAU2 = self.msbm_iter.update_Z(self.data, self.prior, self.hyper, self.mom, self.par,
remove_self_loops=self.remove_self_loops)
self.eval_diff(NEW_LOG_TAU1, NEW_LOG_TAU2)
def test_update_Y(self):
print('--- Y ---')
NEW_LOG_MU1 = self.msbm_einsum.update_Y(self.data, self.prior, self.hyper, self.mom, self.par,
remove_self_loops=self.remove_self_loops)
NEW_LOG_MU2 = self.msbm_iter.update_Y(self.data, self.prior, self.hyper, self.mom, self.par,
remove_self_loops=self.remove_self_loops)
self.eval_diff(NEW_LOG_MU1, NEW_LOG_MU2)
def test_update_gamma(self):
print('--- Gamma ---')
NEW_NU1 = self.msbm_einsum.update_gamma(self.data, self.prior, self.hyper, self.mom, self.par,
remove_self_loops=self.remove_self_loops)
NEW_NU2 = self.msbm_iter.update_gamma(self.data, self.prior, self.hyper, self.mom, self.par)
self.eval_diff(NEW_NU1, NEW_NU2)
def test_update_rho(self):
print('--- Rho ---')
NEW_ZETA1 = self.msbm_einsum.update_rho(self.data, self.prior, self.hyper, self.mom, self.par,
remove_self_loops=self.remove_self_loops)
NEW_ZETA2 = self.msbm_iter.update_rho(self.data, self.prior, self.hyper, self.mom, self.par)
self.eval_diff(NEW_ZETA1, NEW_ZETA2)
def test_all(self):
self.test_update_Pi()
self.test_update_Z()
self.test_update_Y()
self.test_update_gamma()
self.test_update_rho()
def eval_diff(self, X1, X2):
ind = np.unravel_index(np.argmax(X1 - X2), X1.shape)
diff = (X1 - X2).ravel()
print('Mean abs entry-wise error: {:1.3e}'.format(np.mean(np.abs(diff))))
print('Max abs entry-wise error: {:1.3e}'.format(np.max(np.abs(diff))))
print('On entry with index:')
print(ind)
# ###########################################################
# ###########################################################
# ###########################################################
if __name__ == '__main__':
file_url = os.path.join('..', 'experiments', 'two_prototype', 'data', 'twoprototype_105_250.pickle')
remove_self_loops = False
updater_einsum = updates_msbm_vi
updater_iter = updates_msbm_vi_iter
runner = TestUpdates(updater_einsum, updater_iter, file_url, remove_self_loops)
runner.test_all()
updater_einsum = updates_msbm2_vi
updater_iter = updates_msbm2_vi_iter
runner = TestUpdates(updater_einsum, updater_iter, file_url, remove_self_loops)
runner.test_all()
remove_self_loops = True
updater_einsum = updates_msbm_vi
updater_iter = updates_msbm_vi_iter
runner = TestUpdates(updater_einsum, updater_iter, file_url, remove_self_loops)
runner.test_all()
updater_einsum = updates_msbm2_vi
updater_iter = updates_msbm2_vi_iter
runner = TestUpdates(updater_einsum, updater_iter, file_url, remove_self_loops)
runner.test_all()
| 4,600
| -3
| 239
|
90aec8fbaf7418768c7f684cf8a72125b93b8c1d
| 595
|
py
|
Python
|
module/tools/extra/graph_clean.py
|
ObliviousJamie/opic-prototype
|
a925ce9faa38b9a6c8976d4c63b47349a53fd07e
|
[
"BSD-3-Clause"
] | null | null | null |
module/tools/extra/graph_clean.py
|
ObliviousJamie/opic-prototype
|
a925ce9faa38b9a6c8976d4c63b47349a53fd07e
|
[
"BSD-3-Clause"
] | null | null | null |
module/tools/extra/graph_clean.py
|
ObliviousJamie/opic-prototype
|
a925ce9faa38b9a6c8976d4c63b47349a53fd07e
|
[
"BSD-3-Clause"
] | null | null | null |
import networkx as nx
| 27.045455
| 73
| 0.633613
|
import networkx as nx
class GraphClean:
@staticmethod
def prune_unconnected_components(graph):
current = graph
# Remove self loops
for vertex in graph.nodes_with_selfloops():
graph.remove_edge(vertex, vertex)
if not nx.is_connected(graph):
connected_subgraphs = nx.connected_component_subgraphs(graph)
current = next(connected_subgraphs)
for sub_graph in connected_subgraphs:
if len(sub_graph.nodes) > len(current.nodes):
current = sub_graph
return current
| 508
| 41
| 23
|
4f7f939a65964f9c1af988a3620892d451aa3135
| 46,951
|
py
|
Python
|
utensor_cgen/backend/operators.py
|
dboyliao/utensor_cgen
|
aacd3adf4ee2a521a8eb2e75807fe3c1c0d1e1e5
|
[
"Apache-2.0"
] | 1
|
2017-12-29T17:40:49.000Z
|
2017-12-29T17:40:49.000Z
|
utensor_cgen/backend/operators.py
|
dboyliao/utensor_cgen
|
aacd3adf4ee2a521a8eb2e75807fe3c1c0d1e1e5
|
[
"Apache-2.0"
] | 1
|
2017-12-28T02:25:45.000Z
|
2017-12-28T02:25:45.000Z
|
utensor_cgen/backend/operators.py
|
dboyliao/utensor_cgen
|
aacd3adf4ee2a521a8eb2e75807fe3c1c0d1e1e5
|
[
"Apache-2.0"
] | 3
|
2017-12-27T17:15:38.000Z
|
2017-12-29T06:43:00.000Z
|
# -*- coding:utf8 -*-
r'''
TODO: remove all tensorflow graph construction in `build_op_info`
'''
import os
import numpy as np
import idx2numpy as idx2np
import tensorflow as tf
from utensor_cgen.ir import OperationInfo, TensorInfo
from utensor_cgen.ir.converter import (AttrValueConverter, DataTypeConverter,
GenericTensorConverterMixin)
from utensor_cgen.logger import logger
from utensor_cgen.matcher import OpEqualityDelegate, _morphism
from utensor_cgen.transformer.optimizer import RefCntOptimizer
from utensor_cgen.utils import NamescopedKWArgsParser
from .snippets import * # pylint: disable=W0401,W0614
__all__ = ['OperatorFactory', 'OpNotSupportedError']
@OperatorFactory.register
@OpEqualityDelegate.is_compatible_with("Inline", _morphism.Const2InlineMorphism)
@OperatorFactory.register
@OpEqualityDelegate.is_associative(
permutations=((0, 1), (1, 0))
)
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
#hard coding to uint8_t uint8_t int32_t for now
@OperatorFactory.register
@OperatorFactory.register
@OpEqualityDelegate.is_compatible_with("Const", _morphism.Inline2ConstMorphism)
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
@OperatorFactory.register
| 37.32194
| 114
| 0.629188
|
# -*- coding:utf8 -*-
r'''
TODO: remove all tensorflow graph construction in `build_op_info`
'''
import os
import numpy as np
import idx2numpy as idx2np
import tensorflow as tf
from utensor_cgen.ir import OperationInfo, TensorInfo
from utensor_cgen.ir.converter import (AttrValueConverter, DataTypeConverter,
GenericTensorConverterMixin)
from utensor_cgen.logger import logger
from utensor_cgen.matcher import OpEqualityDelegate, _morphism
from utensor_cgen.transformer.optimizer import RefCntOptimizer
from utensor_cgen.utils import NamescopedKWArgsParser
from .snippets import * # pylint: disable=W0401,W0614
__all__ = ['OperatorFactory', 'OpNotSupportedError']
class OpNotSupportedError(Exception): pass
class OperatorFactory():
# Can easily do something smarter
_operators = {}
def createOperatorSnippet(self, op_info, **kwargs):
op_type = op_info.op_type
if op_type not in self._operators:
err_msg = "unsupported op type in uTensor: {op.name}, {op.op_type}".format(op=op_info)
raise ValueError(err_msg)
op = self._operators[op_type](op_info, **kwargs) # Create desired object
return op.snippet # Ops know how to create their snippets
@classmethod
def get_opertor(cls, op_type):
op_cls = cls._operators.get(op_type)
if op_cls is None:
raise OpNotSupportedError(
'{} not supported in utensor_cgen'.format(op_type)
)
return op_cls
@classmethod
def build_op_info(cls, *args, ugraph, op_type, name, **kwargs):
op_cls = cls._operators.get(op_type, None)
if op_cls is None:
err_msg = "unsupported op type in uTensor: {}".format(op_type)
raise OpNotSupportedError(err_msg)
return op_cls.build_op_info(ugraph, name, *args, **kwargs)
@classmethod
def register(cls, op_cls):
cls._operators[op_cls.op_type] = op_cls
return op_cls
@classmethod
def support_op_types(cls):
"""Return the set of all supported ops
"""
return set(cls._operators.keys())
@classmethod
def is_supported(cls, op_type):
if op_type != 'Placeholder' and op_type not in cls._operators:
return False
return True
class _Operator(object):
def __init__(self):
self.name = ""
self._snippet = None
@property
def snippet(self):
return self._snippet
@classmethod
def build_op_info(cls, ugraph, name, *args, **kwargs):
raise NotImplementedError('%s does not have build_op_info method' % cls)
@OperatorFactory.register
@OpEqualityDelegate.is_compatible_with("Inline", _morphism.Const2InlineMorphism)
class _ConstOperator(_Operator):
op_type = "Const"
def __init__(self, op_info, **kwargs):
out_tensor_info = op_info.output_tensors[0]
out_tname, out_dtype = (out_tensor_info.name,
out_tensor_info.dtype)
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
pre_tname = self._tf_prepare_tensor_name(out_tname)
idx_fname = "{}.idx".format(pre_tname)
idx_dir = kwargs['idx_dir']
embed_data_dir = kwargs.get('embed_data_dir',
os.path.join("/fs", idx_dir))
self._snippet = CreateTensorIdxSnippet(embed_data_dir, out_tname,
idx_fname=idx_fname,
np_dtype=out_dtype,
ref_count=ref_count)
idx_path = os.path.join(idx_dir, idx_fname)
value = op_info.op_attr['value'].value
self._tf_save_data(idx_path, value)
@classmethod
def build_op_info(cls, ugraph, name, value, **kwargs):
generic_value = GenericTensorConverterMixin.__utensor_generic_type__(
np_array=value
)
return OperationInfo(
name=name,
input_tensors=[],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=value.dtype,
shape=list(value.shape),
ugraph=ugraph
)
],
op_type=cls.op_type,
op_attr={
'value': AttrValueConverter.__utensor_generic_type__(
value_name='tensor', value=generic_value
),
'dtype': AttrValueConverter.__utensor_generic_type__(
value_name='type', value=DataTypeConverter.get_tf_value(value.dtype)
)
},
ugraph=ugraph,
backend=kwargs.get('backend', 'tensorflow')
)
def _tf_prepare_tensor_name(self, tensor_name):
"""Replace all ':' and '/' with '_' in a given tensor name
"""
prepared = tensor_name.replace(":", "_").replace("/", "_")
return prepared
def _tf_save_data(self, path, value):
np_array = value.np_array
if np_array.shape == ():
np_array = np.array([np_array])
with open(path, "wb") as fid:
idx2np.convert_to_file(fid, np_array)
logger.info("saving %s", path)
@OperatorFactory.register
@OpEqualityDelegate.is_associative(
permutations=((0, 1), (1, 0))
)
class _AddOperator(_Operator):
op_type = "Add" # tf op type
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
tf_dtype = op_info.input_tensors[0].dtype
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
self._snippet = AddOpSnippet(inputs, output, tf_dtype, ref_count, to_eval)
@classmethod
def build_op_info(cls, ugraph, name, tensor_x, tensor_y, **kwargs):
# broadcast the shape and promote types
dummy_x = np.empty(tensor_x.shape)
dummy_y = np.empty(tensor_y.shape)
output_shape = np.broadcast(dummy_x, dummy_y).shape
output_dtype = np.promote_types(tensor_x.dtype, tensor_y.dtype)
return OperationInfo(
name=name,
input_tensors=[tensor_x, tensor_y],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=output_dtype,
shape=list(output_shape),
ugraph=ugraph
)
],
op_type=cls.op_type,
op_attr={
'T': AttrValueConverter.__utensor_generic_type__(
value_name='type',
value=DataTypeConverter.get_tf_value(output_dtype)
)
},
ugraph=ugraph,
backend=kwargs.get('backend', 'tensorflow')
)
@OperatorFactory.register
class _ArgMaxOperator(_Operator):
op_type = "ArgMax"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
out_tensor_info = op_info.output_tensors[0]
output, out_dtype = out_tensor_info.name, out_tensor_info.dtype
in_dtype = op_info.input_tensors[0].dtype
data_manager = kwargs['data_manager']
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = ArgMaxOpSnippet(inputs, output, in_dtype, out_dtype, ref_count, to_eval, address)
@classmethod
def build_op_info(cls, ugraph, name, input_tensor, dtype=np.dtype('int64'), axis=0, **kwargs):
if isinstance(axis, int):
axis, = ugraph.add_op(
np.array(axis, dtype=np.dtype('int32')),
op_type='Const',
name='{}/axis'.format(name)
)
dummy_in = np.empty(input_tensor.shape, dtype=input_tensor.dtype)
graph = tf.Graph()
with graph.as_default():
dummy_out = tf.math.argmax(
dummy_in,
axis=axis.op.op_attr['value'].value.np_array,
name='dummy',
output_type=tf.as_dtype(dtype)
)
node_def = [node for node in graph.as_graph_def().node if node.name=='dummy'][0]
output_shape = dummy_out.shape.as_list()
op_attr = {
k: AttrValueConverter.get_generic_value(v)
for k, v in node_def.attr.items()
}
return OperationInfo(
name=name,
op_type=cls.op_type,
input_tensors=[input_tensor, axis],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=dtype,
shape=output_shape,
ugraph=ugraph
)
],
op_attr=op_attr,
ugraph=ugraph,
backend=kwargs.get('backend', 'tensorflow')
)
@OperatorFactory.register
class _DequantizeOperator(_Operator):
op_type = "Dequantize"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
out_tensor_info = op_info.output_tensors[0]
data_manager = kwargs['data_manager']
output, out_dtype = out_tensor_info.name, out_tensor_info.dtype
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = DequantizeOpSnippet(inputs, output, out_dtype, ref_count, to_eval, address)
@OperatorFactory.register
class _MaxOperator(_Operator):
op_type = "Max"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
out_tensor_info = op_info.output_tensors[0]
data_manager = kwargs['data_manager']
output, out_dtype, out_shape = (out_tensor_info.name,
out_tensor_info.dtype,
out_tensor_info.shape)
# FIXME: automatic alloc for uTensor fail
if not out_shape:
out_shape = [1]
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = MaxOpSnippet(inputs, output, out_dtype, out_shape, ref_count, to_eval, address)
@classmethod
def build_op_info(cls, ugraph, name, tensor, axis=-1, keepdims=False, **kwargs):
if isinstance(axis, int):
axis, = ugraph.add_op(
np.array(axis, dtype=np.dtype('int32')),
op_type='Const',
name='{}/axis'.format(name)
)
dummy_in = np.empty(tensor.shape, dtype=tensor.dtype)
graph = tf.Graph()
with graph.as_default():
dummy_out = tf.reduce_max(
dummy_in,
axis=axis.op.op_attr['value'].value.np_array,
keepdims=keepdims,
name='dummy'
)
node_def = [node for node in graph.as_graph_def().node if node.name == 'dummy'][0]
return OperationInfo(
name=name,
input_tensors=[tensor, axis],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=tensor.dtype,
shape=dummy_out.shape.as_list(),
ugraph=ugraph
)
],
op_type=cls.op_type,
op_attr={
k: AttrValueConverter.get_generic_value(v)
for k, v in node_def.attr.items()
},
backend=kwargs.get('backend', 'tensorflow'),
ugraph=ugraph
)
@OperatorFactory.register
class _MinOperator(_Operator):
op_type = "Min"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
out_info = op_info.output_tensors[0]
output, out_dtype, out_shape = (out_info.name,
out_info.dtype,
out_info.shape)
# FIXME: automatic alloc for uTensor fail
if not out_shape:
out_shape = [1]
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
self._snippet = MinOpSnippet(inputs, output, out_dtype, out_shape, ref_count, to_eval)
@classmethod
def build_op_info(cls, ugraph, name, tensor, axis=-1, keepdims=False, **kwargs):
if isinstance(axis, int):
axis, = ugraph.add_op(
np.array(axis, dtype=np.dtype('int32')),
op_type='Const',
name='{}/axis'.format(name)
)
dummy_in = np.empty(tensor.shape, dtype=tensor.dtype)
graph = tf.Graph()
with graph.as_default():
dummy_out = tf.reduce_min(
dummy_in,
axis=axis.op.op_attr['value'].value.np_array,
keepdims=keepdims,
name='dummy'
)
node_def = [node for node in graph.as_graph_def().node if node.name == 'dummy'][0]
output_shape = dummy_out.shape.as_list()
return OperationInfo(
name=name,
input_tensors=[tensor, axis],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=tensor.dtype,
shape=output_shape,
ugraph=ugraph,
)
],
op_type=cls.op_type,
backend=kwargs.get('backend', 'tensorflow'),
ugraph=ugraph,
op_attr={
k: AttrValueConverter.get_generic_value(v)
for k, v in node_def.attr.items()
}
)
@OperatorFactory.register
class _MaxPool(_Operator):
op_type = "MaxPool"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
dtype = op_info.output_tensors[0].dtype
ksize = op_info.op_attr['ksize'].value.ints_value
strides = op_info.op_attr['strides'].value.ints_value
padding = op_info.op_attr['padding'].value.decode('utf8')
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
self._snippet = MaxPoolSnippet(inputs, output, dtype,
ksize, strides, padding,
ref_count, to_eval)
@classmethod
def build_op_info(
cls,
ugraph,
name,
tensor,
ksize_height,
ksize_width,
stride_height,
stride_width,
padding='SAME',
**kwargs
):
dummy_arr = np.empty(tensor.shape, dtype=tensor.dtype)
graph = tf.Graph()
with graph.as_default():
tf_tensor = tf.nn.max_pool(
dummy_arr,
ksize=[1, ksize_height, ksize_width, 1],
strides=[1, stride_height, stride_width, 1],
padding=padding,
name='dummy'
)
output_shape = tf_tensor.shape.as_list()
graph_def = graph.as_graph_def()
node_def = [node for node in graph_def.node if node.name == 'dummy'][0]
return OperationInfo(
name=name,
input_tensors=[tensor],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=tensor.dtype,
shape=output_shape,
ugraph=ugraph
)
],
op_type=cls.op_type,
backend=kwargs.get('backend', 'tensorflow'),
ugraph=ugraph,
op_attr={
k: AttrValueConverter.get_generic_value(v)
for k, v in node_def.attr.items()
}
)
@OperatorFactory.register
class _QuantizedMaxPool(_Operator):
op_type = "QuantizedMaxPool"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
dtype = op_info.output_tensors[0].dtype
ksize = op_info.op_attr['ksize'].value.ints_value
strides = op_info.op_attr['strides'].value.ints_value
padding = op_info.op_attr['padding'].value.decode('utf8')
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
self._snippet = QuantizedMaxPoolSnippet(inputs, outputs, dtype,
ksize, strides, padding,
ref_counts, to_eval)
@OperatorFactory.register
class _MinOperator(_Operator):
op_type = "Min"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
out_info = op_info.output_tensors[0]
data_manager = kwargs['data_manager']
output, out_dtype, out_shape = (out_info.name,
out_info.dtype,
out_info.shape)
# FIXME: automatic alloc for uTensor fail
if not out_shape:
out_shape = [1]
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = MinOpSnippet(inputs, output, out_dtype, out_shape, ref_count, to_eval, address)
@classmethod
def build_op_info(cls, ugraph, name, tensor, axis=-1, keepdims=False, **kwargs):
if isinstance(axis, int):
axis, = ugraph.add_op(
np.array(axis, dtype=np.dtype('int32')),
op_type='Const',
name='{}/axis'.format(name)
)
dummy_in = np.empty(tensor.shape, dtype=tensor.dtype)
graph = tf.Graph()
with graph.as_default():
dummy_out = tf.reduce_min(
dummy_in,
axis=axis.op.op_attr['value'].value.np_array,
keepdims=keepdims,
name='dummy'
)
node_def = [node for node in graph.as_graph_def().node if node.name == 'dummy'][0]
output_shape = dummy_out.shape.as_list()
return OperationInfo(
name=name,
input_tensors=[tensor, axis],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=tensor.dtype,
shape=output_shape,
ugraph=ugraph,
)
],
op_type=cls.op_type,
backend=kwargs.get('backend', 'tensorflow'),
ugraph=ugraph,
op_attr={
k: AttrValueConverter.get_generic_value(v)
for k, v in node_def.attr.items()
}
)
@OperatorFactory.register
class _QuantizeV2Operator(_Operator):
op_type = "QuantizeV2"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
out_dtype = op_info.output_tensors[0].dtype
data_manager = kwargs['data_manager']
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = QuantizeV2OpSnippet(inputs, outputs, out_dtype, ref_counts, to_eval, address)
@OperatorFactory.register
class _MatMulOperator(_Operator):
op_type = "MatMul"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
in_tensor_info = op_info.input_tensors[0]
x_dtype, w_dtype, out_dtype = (op_info.input_tensors[0].dtype,
op_info.input_tensors[1].dtype,
op_info.output_tensors[0].dtype)
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
self._snippet = MatMulOpSnippet(inputs, output,
x_dtype, w_dtype, out_dtype,
ref_count, to_eval)
@classmethod
def build_op_info(cls, ugraph, name, tensor_x, tensor_w, **kwargs):
dtype_x = tensor_x.dtype
dtype_w = tensor_w.dtype
out_dtype = np.promote_types(dtype_x, dtype_w)
if tensor_x.shape[-1] != tensor_w.shape[0]:
raise ValueError(
'dimension mismatch: {},{}'.format(tensor_x.shape, tensor_w.shape)
)
return OperationInfo(
name=name,
input_tensors=[
tensor_x, tensor_w
],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=out_dtype,
shape=tensor_x.shape[:-1]+tensor_w.shape[1:],
ugraph=ugraph
)
],
op_type=cls.op_type,
op_attr={
'T': AttrValueConverter.__utensor_generic_type__(
value_name='type',
value=DataTypeConverter.get_tf_value(out_dtype)
),
'transpose_a': AttrValueConverter.__utensor_generic_type__(
value_name='b',
value=kwargs.get('transpose_x', False)
),
'transpose_b': AttrValueConverter.__utensor_generic_type__(
value_name='b',
value=kwargs.get('tranpose_w', False)
)
},
ugraph=ugraph,
backend=kwargs.get('backend', 'tensorflow')
)
@OperatorFactory.register
class _QuantizedMatMulOperator(_Operator):
op_type = "QuantizedMatMul"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
in_tensor_info = op_info.input_tensors[0]
x_dtype, w_dtype, out_dtype = (op_info.input_tensors[0].dtype,
op_info.input_tensors[1].dtype,
op_info.output_tensors[0].dtype)
data_manager = kwargs['data_manager']
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = QuantizedMatMulOpSnippet(inputs, outputs,
x_dtype, w_dtype, out_dtype,
ref_counts, to_eval, address)
@OperatorFactory.register
class _ReluOperator(_Operator):
op_type = "Relu"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
in_dtype, out_dtype = (op_info.input_tensors[0].dtype,
op_info.output_tensors[0].dtype) #NT: why separate this out?
#DB: I don't know, it's in the uTensor C code
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
self._snippet = ReluOpSnippet(inputs, output, in_dtype,
out_dtype,
ref_count, to_eval)
@classmethod
def build_op_info(cls, ugraph, name, tensor, **kwargs):
return OperationInfo(
name=name,
input_tensors=[tensor],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=tensor.dtype,
shape=tensor.shape[:],
ugraph=ugraph
)
],
op_type=cls.op_type,
op_attr={
'T': AttrValueConverter.__utensor_generic_type__(
value_name='type',
value=DataTypeConverter.get_tf_value(tensor.dtype)
)
},
ugraph=ugraph,
backend=kwargs.get('backend', 'tensorflow')
)
@OperatorFactory.register
class _QuantizedReluOperator(_Operator):
op_type = "QuantizedRelu"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
in_dtype, qout_dtype = (op_info.input_tensors[0].dtype,
op_info.output_tensors[0].dtype) #NT: why separate this out?
#DB: I don't know, it's in the uTensor C code
data_manager = kwargs['data_manager']
out_dtypes = [tensor_info.dtype for tensor_info in op_info.output_tensors[1:]]
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = QuantizedReluOpSnippet(inputs, outputs, in_dtype,
out_dtypes, qout_dtype,
ref_counts, to_eval, address)
@OperatorFactory.register
class _QuantizedAddOperator(_Operator):
op_type = "QuantizedAdd"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
x_dtype, w_dtype, out_dtype = (op_info.input_tensors[0].dtype,
op_info.input_tensors[1].dtype,
op_info.output_tensors[0].dtype)
data_manager = kwargs['data_manager']
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = QuantizedAddOpSnippet(inputs, outputs,
x_dtype, w_dtype, out_dtype,
ref_counts, to_eval, address)
@OperatorFactory.register
class _QuantizedMulOperator(_Operator):
op_type = "QuantizedMul"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
x_dtype, w_dtype, out_dtype = (op_info.input_tensors[0].dtype,
op_info.input_tensors[1].dtype,
op_info.output_tensors[0].dtype)
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
self._snippet = QuantizedMulOpSnippet(inputs, outputs,
x_dtype, w_dtype, out_dtype,
ref_counts, to_eval)
@OperatorFactory.register
class _RequantizationRangeOperator(_Operator):
op_type = "RequantizationRange"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
out_dtype = op_info.output_tensors[0].dtype
data_manager = kwargs['data_manager']
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = RequantizationRangeOpSnippet(inputs, outputs, out_dtype,
ref_counts, to_eval, address)
@OperatorFactory.register
class _RequantizeOperator(_Operator):
op_type = "Requantize"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
qout_dtype = op_info.output_tensors[0].dtype
range_dtype = op_info.output_tensors[1].dtype
data_manager = kwargs['data_manager']
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
self._snippet = RequantizeOpSnippet(inputs, outputs,
qout_dtype, range_dtype,
ref_counts, to_eval, address)
@OperatorFactory.register
class _ReshapeOperator(_Operator):
op_type = "Reshape"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
data_manager = kwargs['data_manager']
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr,
data_manager,
op_info)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
address = parser.get('address', [])
dtype = op_info.input_tensors[0].dtype
self._snippet = ReshapeOpSnippet(inputs, output, dtype, ref_count, to_eval, address)
@OperatorFactory.register
class _QuantizedReshapeOperator(_Operator):
op_type = "QuantizedReshape"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
self._snippet = QuantizedReshapeOpSnippet(inputs=inputs,
outputs=outputs,
ref_counts=ref_counts,
to_eval=to_eval)
@OperatorFactory.register
class _CMSIS_NN_FCOperator(_Operator):
op_type="CMSIS_NN_FC"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
#import pdb; pdb.set_trace()
# Note order of inputs/outputs is preserved
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
out_dtype = op_info.output_tensors[0].dtype
in_dtypes = [tensor_info.dtype for tensor_info in op_info.input_tensors]
assert (op_info.input_tensors[0].shape[1] == None or op_info.input_tensors[0].shape[1] == 1)
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
self._snippet = CMSISNNFCOpSnippet(inputs=inputs,
output=output,
ref_counts=ref_counts,
in_dtypes=in_dtypes,
out_dtype=out_dtype,
to_eval=to_eval)
@OperatorFactory.register
class _Conv2DOperator(_Operator):
op_type = "Conv2D"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
in_dtype, filter_dtype = (op_info.input_tensors[0].dtype,
op_info.input_tensors[1].dtype)
out_dtype = op_info.output_tensors[0].dtype
strides = op_info.op_attr["strides"].value.ints_value
padding = op_info.op_attr["padding"].value.decode('utf8')
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
self._snippet = Conv2DOpSnippet(inputs, output, strides, padding,
in_dtype=in_dtype, filter_dtype=filter_dtype, out_dtype=out_dtype,
ref_count=ref_count, to_eval=to_eval)
@classmethod
def build_op_info(cls, ugraph, name, tensor_x, tensor_w, stride_height, stride_width, padding='SAME', **kwargs):
# dboy: I'm too lazy to implement the padding algorithm again
# simply call tf to find out the output shape
dummy_x = np.empty(tensor_x.shape, dtype=tensor_x.dtype)
dummy_w = np.empty(tensor_w.shape, dtype=tensor_w.dtype)
graph = tf.Graph()
with graph.as_default():
dummy_out = tf.nn.conv2d(
dummy_x,
dummy_w,
strides=[1, stride_height, stride_width, 1],
padding=padding,
name='dummy'
)
node_def = [node for node in graph.as_graph_def().node if node.name == 'dummy'][0]
output_shape = dummy_out.shape.as_list()
output_dtype = np.promote_types(tensor_x.dtype, tensor_w.dtype)
op_attr = {
k: AttrValueConverter.get_generic_value(v)
for k, v in node_def.attr.items()
}
return OperationInfo(
name=name,
input_tensors=[tensor_x, tensor_w],
output_tensors=[
TensorInfo(
name='{}:0'.format(name),
op_name=name,
dtype=output_dtype,
shape=output_shape,
ugraph=ugraph,
)
],
op_type=cls.op_type,
op_attr=op_attr,
ugraph=ugraph,
backend=kwargs.get('backend', 'tensorflow'),
)
@OperatorFactory.register
class _FusedConv2DMaxpoolOperator(_Operator):
op_type = "FusedConv2DMaxpool"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
in_dtype, filter_dtype = (op_info.input_tensors[0].dtype,
op_info.input_tensors[1].dtype)
out_dtype = op_info.output_tensors[0].dtype
strides = op_info.op_attr["strides"].value.ints_value
ksize = op_info.op_attr["ksize"].value.ints_value
padding = op_info.op_attr["padding"].value.decode('utf8')
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
self._snippet = FusedConv2DMaxpoolOpSnippet(inputs, output, strides, ksize, padding,
in_dtype=in_dtype, filter_dtype=filter_dtype, out_dtype=out_dtype,
ref_count=ref_count, to_eval=to_eval)
@OperatorFactory.register
class _QuantizedFusedConv2DMaxpoolOperator(_Operator):
op_type = "QuantizedFusedConv2DMaxpool"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
in_dtype, filter_dtype = (op_info.input_tensors[0].dtype,
op_info.input_tensors[1].dtype)
out_dtypes = [tensor_info.dtype for tensor_info in op_info.output_tensors]
strides = op_info.op_attr['_utensor_conv']["strides"].value.ints_value
ksize = op_info.op_attr['_utensor_pool']["ksize"].value.ints_value
padding = op_info.op_attr['_utensor_conv']["padding"].value.decode('utf8')
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_counts = parser.get('ref_counts', None)
to_eval = parser.get('to_eval', False)
self._snippet = QuantizedFusedConv2DMaxpoolOpSnippet(
inputs, outputs, strides, ksize, padding,
in_dtype=in_dtype, filter_dtype=filter_dtype, out_dtypes=out_dtypes,
ref_counts=ref_counts, to_eval=to_eval
)
@OperatorFactory.register
class _Conv2DQuantOperator(_Operator):
op_type = "QuantizedConv2D"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
in_dtype, filter_dtype = (op_info.input_tensors[0].dtype,
op_info.input_tensors[1].dtype)
out_dtypes = [tensor_info.dtype for tensor_info in op_info.output_tensors]
strides = op_info.op_attr["strides"].value.ints_value
padding = op_info.op_attr["padding"].value.decode('utf8')
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
self._snippet = Conv2DQuantOpSnippet(inputs, outputs, strides, padding,
in_dtype=in_dtype, filter_dtype=filter_dtype, out_dtypes=out_dtypes,
ref_counts=ref_counts, to_eval=to_eval)
@OperatorFactory.register
class _Uint8Q7OriginOperator(_Operator):
op_type = "Uint8Q7OriginOp"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
self._snippet = Uint8Q7OriginSnippet(inputs, output, ref_count, to_eval)
#hard coding to uint8_t uint8_t int32_t for now
@OperatorFactory.register
class _QuantRangeForMultiplication_u8_u8_int32_Operator(_Operator):
op_type = "QuantRangeForMultiplicationu8u8int32Op"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
outputs = [tensor_info.name for tensor_info in op_info.output_tensors]
if op_info.output_tensors[0].dtype != op_info.output_tensors[1].dtype:
assert "output tensors must have the same data type"
#output_type = op_info.output_tensors[0].dtype
#FIXME: hard coding the output to int32 for now
output_type = np.dtype([('qint32', '<i4')])
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_counts = parser.get('ref_counts', [])
to_eval = parser.get('to_eval', False)
self._snippet = QuantRangeForMultiplicationSnippet(inputs, outputs, output_type, ref_counts, to_eval)
@OperatorFactory.register
@OpEqualityDelegate.is_compatible_with("Const", _morphism.Inline2ConstMorphism)
class _InlineOperator(_Operator):
op_type = "Inline"
def __init__(self, op_info, **kwargs):
out_tensor_info = op_info.output_tensors[0]
out_tname, out_dtype, tensor_shape = (out_tensor_info.name,
out_tensor_info.dtype,
out_tensor_info.shape)
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
pre_tname = self._prepare_tensor_name(out_tname)
inline_tname = self._prepare_inline_array_name(out_tname)
value = op_info.op_attr['value'].value.np_array.flatten()
self._snippet = CreateTensorBinarySnippet(out_tname, tensor_shape=tensor_shape,
tf_dtype=out_dtype,
sptr_name=pre_tname,
inline_name=inline_tname,
ref_count=ref_count)
weight_snippet = WeightSnippet(inline_tname,
out_dtype,
tensor_shape,
value)
weight_container = kwargs['weight_container']
weight_container.add_snippet(weight_snippet)
def _prepare_tensor_name(self, tensor_name):
prepared = tensor_name.replace(":", "_").replace("/", "_")
return prepared
def _prepare_inline_array_name(self, tensor_name):
inline = tensor_name.replace(":", "_").replace("/", "_")
preapred = "inline_{}".format(inline)
return preapred
@OperatorFactory.register
class _RamOperator(_Operator):
op_type = "Ram"
def __init__(self, op_info, **kwargs):
out_tensor_info = op_info.output_tensors[0]
out_tname, out_dtype, tensor_shape = (out_tensor_info.name,
out_tensor_info.dtype,
out_tensor_info.shape)
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
pre_tname = self._prepare_tensor_name(out_tname)
#inline_tname = self._prepare_inline_array_name(out_tname)
#value = op_info.op_attr['value'].value.np_array.flatten()
self._snippet = CreateTensorRamSnippet(out_tname, tensor_shape=tensor_shape,
tf_dtype=out_dtype,
sptr_name=pre_tname,
ref_count=ref_count)
def _prepare_tensor_name(self, tensor_name):
prepared = tensor_name.replace(":", "_").replace("/", "_")
return prepared
@OperatorFactory.register
class _ShapeOperator(_Operator):
op_type = "Shape"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', True)
out_dtype = op_info.output_tensors[0].dtype
self._snippet = ShapeOpSnippet(inputs, output, out_dtype, ref_count, to_eval)
@OperatorFactory.register
class _StridedSliceOperator(_Operator):
op_type = "StridedSlice"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', True)
dtype = op_info.input_tensors[0].dtype
out_dtype = op_info.output_tensors[0].dtype
begin_mask = op_info.op_attr['begin_mask'].value
ellipsis_mask = op_info.op_attr['ellipsis_mask'].value
end_mask = op_info.op_attr['end_mask'].value
new_axis_mask = op_info.op_attr['begin_mask'].value
shrink_axis_mask = op_info.op_attr['shrink_axis_mask'].value
self._snippet = StridedSliceOpSnippet(inputs, output, dtype, out_dtype,
begin_mask, ellipsis_mask, end_mask,
new_axis_mask, shrink_axis_mask,
ref_count, to_eval)
@OperatorFactory.register
class _PackOperator(_Operator):
op_type = "Pack"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', True)
dtype = op_info.input_tensors[0].dtype
out_dtype = op_info.output_tensors[0].dtype
N = op_info.op_attr['N'].value
axis = op_info.op_attr['axis'].value
self._snippet = PackOpSnippet(inputs, output, dtype, out_dtype, N, axis, ref_count, to_eval)
@OperatorFactory.register
class _SoftmaxOperator(_Operator):
# NOTE: softmax in tf is a composite op, no trivial way
# to construct the op_info if we want to support
# tf quantization for softmax op. We simply just
# support uTensor softmax only.
op_type = "Softmax"
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
input_tname = op_info.input_tensors[0].name
output_tname = op_info.output_tensors[0].name
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', True)
out_dtype = op_info.output_tensors[0].dtype
in_dtype = op_info.input_tensors[0].dtype
self._snippet = SoftmaxOpSnippet(
input_tname,
output_tname,
in_dtype,
out_dtype,
ref_count,
to_eval
)
@OperatorFactory.register
class _GatherOperator(_Operator):
op_type = "Gather" # tf op type
def __init__(self, op_info, **kwargs):
_Operator.__init__(self)
inputs = [tensor_info.name for tensor_info in op_info.input_tensors]
output = op_info.output_tensors[0].name
tf_dtype = op_info.input_tensors[0].dtype
parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE,
op_info.op_attr)
ref_count = parser.get('ref_counts', [0])[0]
to_eval = parser.get('to_eval', False)
self._snippet = GatherOpSnippet(inputs, output, tf_dtype, ref_count, to_eval)
| 40,455
| 3,736
| 817
|
f1f76e0897cbb9b84dcbbc0fc3e50e81b0c75e9b
| 2,993
|
py
|
Python
|
ndg/security/server/test/unit/wsgi/authn/test_httpbasicauth.py
|
cedadev/ndg_security_server
|
6873cc0de1a01ad05ddcbeb3f074a33923dc1ca1
|
[
"BSD-3-Clause"
] | null | null | null |
ndg/security/server/test/unit/wsgi/authn/test_httpbasicauth.py
|
cedadev/ndg_security_server
|
6873cc0de1a01ad05ddcbeb3f074a33923dc1ca1
|
[
"BSD-3-Clause"
] | null | null | null |
ndg/security/server/test/unit/wsgi/authn/test_httpbasicauth.py
|
cedadev/ndg_security_server
|
6873cc0de1a01ad05ddcbeb3f074a33923dc1ca1
|
[
"BSD-3-Clause"
] | 1
|
2017-12-05T17:31:08.000Z
|
2017-12-05T17:31:08.000Z
|
#!/usr/bin/env python
"""Unit tests for WSGI HTTP Basic Auth handler
NERC DataGrid Project
"""
__author__ = "P J Kershaw"
__date__ = "13/10/09"
__copyright__ = "(C) 2009 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
import logging
logging.basicConfig(level=logging.DEBUG)
import unittest
import urllib.request, urllib.error, urllib.parse
import base64
import paste.fixture
from paste.httpexceptions import HTTPUnauthorized
from ndg.security.server.test.base import BaseTestCase
from ndg.security.server.wsgi.httpbasicauth import HttpBasicAuthMiddleware
class TestAuthnApp(object):
'''Test Application for the Authentication handler to protect'''
response = b"Test HTTP Basic Authentication application"
if __name__ == "__main__":
unittest.main()
| 32.182796
| 80
| 0.636151
|
#!/usr/bin/env python
"""Unit tests for WSGI HTTP Basic Auth handler
NERC DataGrid Project
"""
__author__ = "P J Kershaw"
__date__ = "13/10/09"
__copyright__ = "(C) 2009 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
import logging
logging.basicConfig(level=logging.DEBUG)
import unittest
import urllib.request, urllib.error, urllib.parse
import base64
import paste.fixture
from paste.httpexceptions import HTTPUnauthorized
from ndg.security.server.test.base import BaseTestCase
from ndg.security.server.wsgi.httpbasicauth import HttpBasicAuthMiddleware
class TestAuthnApp(object):
'''Test Application for the Authentication handler to protect'''
response = b"Test HTTP Basic Authentication application"
def __init__(self, app_conf, **local_conf):
pass
def __call__(self, environ, start_response):
if environ['PATH_INFO'] == '/test_200':
status = "200 OK"
else:
status = "404 Not found"
start_response(status,
[('Content-length',
str(len(TestAuthnApp.response))),
('Content-type', 'text/plain')])
return [TestAuthnApp.response]
class HttpBasicAuthPluginMiddleware(object):
USERNAME = b'testuser'
PASSWORD = b'password'
def __init__(self, app):
self._app = app
def __call__(self, environ, start_response):
def authenticate(environ, username, password):
if username == HttpBasicAuthPluginMiddleware.USERNAME and \
password == HttpBasicAuthPluginMiddleware.PASSWORD:
return
else:
raise HTTPUnauthorized("Invalid credentials")
environ['authenticate'] = authenticate
return self._app(environ, start_response)
class HttpBasicAuthMiddlewareTestCase(BaseTestCase):
SERVICE_PORTNUM = 10443
def __init__(self, *args, **kwargs):
app = TestAuthnApp({})
app = HttpBasicAuthMiddleware.filter_app_factory(app, {}, prefix='',
authnFunc='authenticate')
self.wsgiapp = HttpBasicAuthPluginMiddleware(app)
self.app = paste.fixture.TestApp(self.wsgiapp)
BaseTestCase.__init__(self, *args, **kwargs)
def test01PasteFixture(self):
username = HttpBasicAuthPluginMiddleware.USERNAME
password = HttpBasicAuthPluginMiddleware.PASSWORD
base64String = base64.encodestring(b'%s:%s' % (username, password))[:-1]
authHeader = "Basic %s" % base64String
headers = {'Authorization': authHeader}
url = '/test_200'
response = self.app.get(url, headers=headers, status=200)
print(response)
if __name__ == "__main__":
unittest.main()
| 1,708
| 260
| 116
|
0c2450da8b64ba940ddbb47766c3145d609c2f49
| 4,725
|
py
|
Python
|
src/tankoh2/contour.py
|
sfreund-DLR/tankoh2
|
92ff080f7034a7eb1cdabed5089c79fd01af4d11
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
src/tankoh2/contour.py
|
sfreund-DLR/tankoh2
|
92ff080f7034a7eb1cdabed5089c79fd01af4d11
|
[
"MIT",
"BSD-3-Clause"
] | 27
|
2021-11-03T19:53:00.000Z
|
2022-03-28T12:43:30.000Z
|
src/tankoh2/contour.py
|
sfreund-DLR/tankoh2
|
92ff080f7034a7eb1cdabed5089c79fd01af4d11
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
"""methods for liners and domes"""
import numpy as np
from tankoh2 import pychain
from tankoh2.service import log
from tankoh2.exception import Tankoh2Error
from tankoh2.utilities import updateName, copyAsJson
# #########################################################################################
# Create Liner
# #########################################################################################
def domeContourLength(dome):
"""Returns the contour length of a dome"""
contourCoords = np.array([dome.getXCoords(), dome.getRCoords()]).T
contourDiffs = contourCoords[1:,:] - contourCoords[:-1]
contourLength = np.sum(np.linalg.norm(contourDiffs, axis=1))
return contourLength
def getDome(cylinderRadius, polarOpening, domeType=None,
x=None, r=None):
"""
:param cylinderRadius: radius of the cylinder
:param polarOpening: polar opening radius
:param domeType: pychain.winding.DOME_TYPES.ISOTENSOID or pychain.winding.DOME_TYPES.CIRCLE
:param x: x-coordinates of a custom dome contour
:param r: radius-coordinates of a custom dome contour. r[0] starts at cylinderRadius
"""
if domeType is None:
domeType = pychain.winding.DOME_TYPES.ISOTENSOID
elif isinstance(domeType, str):
domeType = domeType.lower()
if domeType == 'isotensoid':
domeType = pychain.winding.DOME_TYPES.ISOTENSOID
elif domeType == 'circle':
domeType = pychain.winding.DOME_TYPES.CIRCLE
else:
raise Tankoh2Error(f'wrong dome type "{domeType}". Valid dome types: [isotensoid, circle]')
# build dome
dome = pychain.winding.Dome()
dome.buildDome(cylinderRadius, polarOpening, domeType)
if x is not None and r is not None:
if not np.allclose(r[0], cylinderRadius):
raise Tankoh2Error('cylinderRadius and r-vector do not fit')
if not np.allclose(r[-1], polarOpening):
raise Tankoh2Error('polarOpening and r-vector do not fit')
dome.setPoints(x, r)
return dome
def getLiner(dome, length, linerFilename=None, linerName=None, dome2 = None, nodeNumber = 500):
"""Creates a liner
:param dome: dome instance
:param length: zylindrical length of liner
:param linerFilename: if given, the liner is saved to this file for visualization in µChainWind
:param linerName: name of the liner written to the file
:return:
"""
# create a symmetric liner with dome information and cylinder length
liner = pychain.winding.Liner()
# spline for winding calculation is left on default of 1.0
if dome2:
contourLength = length + domeContourLength(dome) + domeContourLength(dome2)
else:
contourLength = length / 2 + domeContourLength(dome) # use half model (one dome, half cylinder)
deltaLengthSpline = contourLength / nodeNumber # just use half side
if dome2 is not None:
log.info("Creat unsymmetric vessel")
liner.buildFromDomes(dome, dome2, length, deltaLengthSpline)
else:
log.info("Create symmetric vessel")
liner.buildFromDome(dome, length, deltaLengthSpline)
if linerFilename:
liner.saveToFile(linerFilename)
updateName(linerFilename, linerName, ['liner'])
copyAsJson(linerFilename, 'liner')
liner.loadFromFile(linerFilename)
return liner
| 37.8
| 116
| 0.631534
|
"""methods for liners and domes"""
import numpy as np
from tankoh2 import pychain
from tankoh2.service import log
from tankoh2.exception import Tankoh2Error
from tankoh2.utilities import updateName, copyAsJson
# #########################################################################################
# Create Liner
# #########################################################################################
def getReducedDomePoints(contourFilename, spacing, contourOutFilename=None):
# load contour from file
Data = np.loadtxt(contourFilename)
if 1:
contourPoints = np.abs(Data)
contourPoints[:, 0] -= contourPoints[0, 0]
# reduce points
redContourPoints = contourPoints[::spacing, :]
if not np.allclose(redContourPoints[-1, :], contourPoints[-1, :]):
redContourPoints = np.append(redContourPoints, [contourPoints[-1, :]], axis=0)
if contourOutFilename:
np.savetxt(contourOutFilename, redContourPoints, delimiter=',')
Xvec, rVec = redContourPoints[:, 0], redContourPoints[:, 1]
else:
Xvec = abs(Data[:, 0])
Xvec = Xvec - Xvec[0]
rVec = abs(Data[:, 1])
# reduce data points
log.info(len(Xvec) - 1)
index = np.linspace(0, dpoints * int((len(Xvec) / dpoints)), int((len(Xvec) / dpoints)) + 1, dtype=np.int16)
arr = [len(Xvec) - 1]
index = np.append(index, arr)
Xvec = Xvec[index]
rVec = rVec[index]
# save liner contour for loading in mikroWind
with open(fileNameReducedDomeContour, "w") as contour:
for i in range(len(Xvec)):
contour.write(str(Xvec[i]) + ',' + str(rVec[i]) + '\n')
return Xvec, rVec
def domeContourLength(dome):
"""Returns the contour length of a dome"""
contourCoords = np.array([dome.getXCoords(), dome.getRCoords()]).T
contourDiffs = contourCoords[1:,:] - contourCoords[:-1]
contourLength = np.sum(np.linalg.norm(contourDiffs, axis=1))
return contourLength
def getDome(cylinderRadius, polarOpening, domeType=None,
x=None, r=None):
"""
:param cylinderRadius: radius of the cylinder
:param polarOpening: polar opening radius
:param domeType: pychain.winding.DOME_TYPES.ISOTENSOID or pychain.winding.DOME_TYPES.CIRCLE
:param x: x-coordinates of a custom dome contour
:param r: radius-coordinates of a custom dome contour. r[0] starts at cylinderRadius
"""
if domeType is None:
domeType = pychain.winding.DOME_TYPES.ISOTENSOID
elif isinstance(domeType, str):
domeType = domeType.lower()
if domeType == 'isotensoid':
domeType = pychain.winding.DOME_TYPES.ISOTENSOID
elif domeType == 'circle':
domeType = pychain.winding.DOME_TYPES.CIRCLE
else:
raise Tankoh2Error(f'wrong dome type "{domeType}". Valid dome types: [isotensoid, circle]')
# build dome
dome = pychain.winding.Dome()
dome.buildDome(cylinderRadius, polarOpening, domeType)
if x is not None and r is not None:
if not np.allclose(r[0], cylinderRadius):
raise Tankoh2Error('cylinderRadius and r-vector do not fit')
if not np.allclose(r[-1], polarOpening):
raise Tankoh2Error('polarOpening and r-vector do not fit')
dome.setPoints(x, r)
return dome
def getLiner(dome, length, linerFilename=None, linerName=None, dome2 = None, nodeNumber = 500):
"""Creates a liner
:param dome: dome instance
:param length: zylindrical length of liner
:param linerFilename: if given, the liner is saved to this file for visualization in µChainWind
:param linerName: name of the liner written to the file
:return:
"""
# create a symmetric liner with dome information and cylinder length
liner = pychain.winding.Liner()
# spline for winding calculation is left on default of 1.0
if dome2:
contourLength = length + domeContourLength(dome) + domeContourLength(dome2)
else:
contourLength = length / 2 + domeContourLength(dome) # use half model (one dome, half cylinder)
deltaLengthSpline = contourLength / nodeNumber # just use half side
if dome2 is not None:
log.info("Creat unsymmetric vessel")
liner.buildFromDomes(dome, dome2, length, deltaLengthSpline)
else:
log.info("Create symmetric vessel")
liner.buildFromDome(dome, length, deltaLengthSpline)
if linerFilename:
liner.saveToFile(linerFilename)
updateName(linerFilename, linerName, ['liner'])
copyAsJson(linerFilename, 'liner')
liner.loadFromFile(linerFilename)
return liner
| 1,298
| 0
| 23
|
d9f39d299ff10da061d80b7dd42549838fdc0966
| 206
|
py
|
Python
|
test/test_local_grid_client.py
|
mari-linhares/Grid
|
e06a13f24667160b91cd5f682983453072877f30
|
[
"Apache-2.0"
] | null | null | null |
test/test_local_grid_client.py
|
mari-linhares/Grid
|
e06a13f24667160b91cd5f682983453072877f30
|
[
"Apache-2.0"
] | null | null | null |
test/test_local_grid_client.py
|
mari-linhares/Grid
|
e06a13f24667160b91cd5f682983453072877f30
|
[
"Apache-2.0"
] | null | null | null |
import syft as sy
import torch as th
from grid.client import GridClient
| 18.727273
| 56
| 0.718447
|
import syft as sy
import torch as th
from grid.client import GridClient
def test_local_grid_client():
hook = sy.TorchHook(th)
gr_client = GridClient(addr="http://127.0.0.1:5000")
assert True
| 109
| 0
| 23
|
f11ef309eb4e3feab167a5e7a7494a790d48b331
| 7,450
|
py
|
Python
|
tests/test_arg_check.py
|
lizeyan/tensorkit
|
2997a5914ec3c3ec72f91eb5906b5ee878fdc020
|
[
"MIT"
] | null | null | null |
tests/test_arg_check.py
|
lizeyan/tensorkit
|
2997a5914ec3c3ec72f91eb5906b5ee878fdc020
|
[
"MIT"
] | null | null | null |
tests/test_arg_check.py
|
lizeyan/tensorkit
|
2997a5914ec3c3ec72f91eb5906b5ee878fdc020
|
[
"MIT"
] | 2
|
2020-10-15T06:41:32.000Z
|
2021-01-27T12:55:11.000Z
|
import unittest
import pytest
import tensorkit as tk
from tensorkit import tensor as T
from tensorkit.arg_check import *
from tests.helper import *
| 38.402062
| 82
| 0.475168
|
import unittest
import pytest
import tensorkit as tk
from tensorkit import tensor as T
from tensorkit.arg_check import *
from tests.helper import *
class ArgCheckTestCase(TestCase):
def test_validate_positive_int(self):
for v in [1, 2, 3]:
self.assertEqual(validate_positive_int('v', v), v)
with pytest.raises(ValueError,
match='`v` must be a positive int: '
'got -1'):
_ = validate_positive_int('v', -1)
def test_validate_layer(self):
layer = tk.layers.Linear(5, 3)
for v in [layer, tk.layers.jit_compile(layer)]:
self.assertIs(validate_layer('v', v), v)
with pytest.raises(TypeError,
match='`v` is required to be a layer: got 123'):
_ = validate_layer('v', 123)
def test_validate_layer_factory(self):
for v in [tk.layers.Linear, (lambda: tk.layers.Linear(5, 3))]:
self.assertIs(validate_layer_factory('v', v), v)
with pytest.raises(TypeError,
match='`v` is required to be a layer factory: '
'got 123'):
_ = validate_layer_factory('v', 123)
def test_get_layer_from_layer_or_factory(self):
factory = lambda in_features, out_features: \
tk.layers.Linear(in_features, out_features)
layer = factory(5, 3)
for v in [layer, tk.layers.jit_compile(layer),
tk.layers.Linear, factory]:
out = get_layer_from_layer_or_factory(
'v', v, args=(5,), kwargs=dict(out_features=3))
if isinstance(v, T.Module):
self.assertIs(out, v)
else:
self.assertIsInstance(out, tk.layers.Linear)
self.assertEqual(out.in_features, 5)
self.assertEqual(out.out_features, 3)
with pytest.raises(TypeError,
match='`v` is required to be a layer or a layer '
'factory: got 123'):
_ = get_layer_from_layer_or_factory('v', 123)
def test_validate_conv_size(self):
for spatial_ndims in (1, 2, 3):
self.assertEqual(
validate_conv_size('v', 2, spatial_ndims),
[2] * spatial_ndims
)
self.assertEqual(
validate_conv_size('v', [1, 2, 3][:spatial_ndims], spatial_ndims),
[1, 2, 3][:spatial_ndims]
)
self.assertEqual(
validate_conv_size('v', (1, 2, 3)[:spatial_ndims], spatial_ndims),
[1, 2, 3][:spatial_ndims]
)
with pytest.raises(ValueError,
match=r'`v` must be either a positive integer, or '
r'a sequence of positive integers of length '
r'`3`: got \[1, 2\]'):
_ = validate_conv_size('v', [1, 2], 3),
with pytest.raises(ValueError,
match=r'`v` must be either a positive integer, or '
r'a sequence of positive integers of length '
r'`3`: got \[1, 2, 0\]'):
_ = validate_conv_size('v', [1, 2, 0], 3)
def test_validate_padding(self):
for spatial_ndims in (1, 2, 3):
self.assertEqual(
validate_padding(
'none',
kernel_size=[5, 6, 7][:spatial_ndims],
dilation=[1, 2, 3][:spatial_ndims],
spatial_ndims=spatial_ndims,
),
[(0, 0)] * spatial_ndims
)
self.assertEqual(
validate_padding(
'full',
kernel_size=[5, 6, 7][:spatial_ndims],
dilation=[1, 2, 3][:spatial_ndims],
spatial_ndims=spatial_ndims,
),
[(4, 4), (10, 10), (18, 18)][:spatial_ndims]
)
self.assertEqual(
validate_padding(
'half',
kernel_size=[4, 5, 6][:spatial_ndims],
dilation=[1, 2, 3][:spatial_ndims],
spatial_ndims=spatial_ndims,
),
[(1, 2), (4, 4), (7, 8)][:spatial_ndims]
)
self.assertEqual(
validate_padding(
0,
kernel_size=[5, 6, 7][:spatial_ndims],
dilation=[1, 2, 3][:spatial_ndims],
spatial_ndims=spatial_ndims,
),
[(0, 0)] * spatial_ndims
)
self.assertEqual(
validate_padding(
[(3, 4), 4, (4, 5)][:spatial_ndims],
kernel_size=[5, 6, 7][:spatial_ndims],
dilation=[1, 2, 3][:spatial_ndims],
spatial_ndims=spatial_ndims,
),
[(3, 4), (4, 4), (4, 5)][:spatial_ndims]
)
self.assertEqual(
validate_padding(
(3, 4, 5)[:spatial_ndims],
kernel_size=[5, 6, 7][:spatial_ndims],
dilation=[1, 2, 3][:spatial_ndims],
spatial_ndims=spatial_ndims,
),
[(3, 3), (4, 4), (5, 5)][:spatial_ndims]
)
msg_prefix = (
r'`padding` must be a non-negative integer, a '
r'sequence of non-negative integers of length '
r'`3`, "none", "half" or "full": got '
)
with pytest.raises(ValueError, match=msg_prefix + r'-1'):
_ = validate_padding(-1, [1] * 3, [1] * 3, 3)
with pytest.raises(ValueError, match=msg_prefix + r'\[1, 2\]'):
_ = validate_padding([1, 2], [1] * 3, [1] * 3, 3)
with pytest.raises(ValueError, match=msg_prefix + r'\[1, 2, -1\]'):
_ = validate_padding([1, 2, -1], [1] * 3, [1] * 3, 3)
def test_validate_output_padding(self):
for spatial_ndims in (1, 2, 3):
self.assertEqual(
validate_output_padding(
0,
stride=[1, 2, 3][: spatial_ndims],
dilation=[1, 2, 3][:spatial_ndims],
spatial_ndims=spatial_ndims,
),
[0] * spatial_ndims
)
self.assertEqual(
validate_output_padding(
[1, 2, 3][:spatial_ndims],
stride=[4, 5, 6][: spatial_ndims],
dilation=[3, 4, 5][:spatial_ndims],
spatial_ndims=spatial_ndims,
),
[1, 2, 3][:spatial_ndims],
)
err_msg = (
r'`output_padding` must be a non-negative integer, or a sequence '
r'of non-negative integers, and must be smaller than either '
r'`stride` or `dilation`'
)
with pytest.raises(ValueError, match=err_msg):
_ = validate_output_padding(-1, [4] * 3, [4] * 3, 3)
with pytest.raises(ValueError, match=err_msg):
_ = validate_output_padding([1, 2], [4] * 3, [4] * 3, 3)
with pytest.raises(ValueError, match=err_msg):
_ = validate_output_padding([1, 2, -1], [4] * 3, [4] * 3, 3)
| 7,075
| 12
| 212
|
9928c1c459906e3027e54332db9b7b9f35517297
| 930
|
py
|
Python
|
levels/chock_a_block.py
|
ungood/stockfighter-py
|
8fd6cc70177227164f3984670d352a89d9f0b0d5
|
[
"MIT"
] | null | null | null |
levels/chock_a_block.py
|
ungood/stockfighter-py
|
8fd6cc70177227164f3984670d352a89d9f0b0d5
|
[
"MIT"
] | null | null | null |
levels/chock_a_block.py
|
ungood/stockfighter-py
|
8fd6cc70177227164f3984670d352a89d9f0b0d5
|
[
"MIT"
] | null | null | null |
#from stockfighter import Stockfighter
import os, time
# sf = Stockfighter()
#
# level = sf.levels['chock_a_block']
# info = level.start()
# print(info)
#
# sf = Stockfighter()
# print(sf.heartbeat())
#
# venue = sf.venues['PVIEX']
#
# stock = venue.stocks['SOF']
# for stock in venue.stocks:
# print(stock)
#
# ORDER_SIZE = 50
# remaining = 100000 - 42823
# goal = 9103
#
# def run():
# while(remaining > 0):
# quote = stock.quote()
# size = quote['askSize']
# if(size < 1):
# continue
# time.sleep(1)
# ask = quote['ask']
# if(ask > goal):
# continue
# time.sleep(1)
# order = min(remaining, size, ORDER_SIZE)
# if order > 0:
# print('Placing order for {} at {}. Remaining: {}'.format(order, ask, remaining))
# stock.buy(ACCOUNT, ask, order)
# remaining -= order
#(venue='CENOEX', account='SAS22786391')
| 22.142857
| 90
| 0.588172
|
#from stockfighter import Stockfighter
import os, time
def create_parser(parent):
pass
# sf = Stockfighter()
#
# level = sf.levels['chock_a_block']
# info = level.start()
# print(info)
#
# sf = Stockfighter()
# print(sf.heartbeat())
#
# venue = sf.venues['PVIEX']
#
# stock = venue.stocks['SOF']
# for stock in venue.stocks:
# print(stock)
#
# ORDER_SIZE = 50
# remaining = 100000 - 42823
# goal = 9103
#
# def run():
# while(remaining > 0):
# quote = stock.quote()
# size = quote['askSize']
# if(size < 1):
# continue
# time.sleep(1)
# ask = quote['ask']
# if(ask > goal):
# continue
# time.sleep(1)
# order = min(remaining, size, ORDER_SIZE)
# if order > 0:
# print('Placing order for {} at {}. Remaining: {}'.format(order, ask, remaining))
# stock.buy(ACCOUNT, ask, order)
# remaining -= order
#(venue='CENOEX', account='SAS22786391')
| 14
| 0
| 23
|
f7e3107009c02c524f668486aee1490c29db37a3
| 3,471
|
py
|
Python
|
matmul.py
|
zangkaiqiang/pyaam
|
3c59026df17fb0b4588797026d5a2fe64d05fca9
|
[
"MIT"
] | 2
|
2020-07-06T18:18:25.000Z
|
2021-01-20T08:05:21.000Z
|
matmul.py
|
zangkaiqiang/pyaam
|
3c59026df17fb0b4588797026d5a2fe64d05fca9
|
[
"MIT"
] | null | null | null |
matmul.py
|
zangkaiqiang/pyaam
|
3c59026df17fb0b4588797026d5a2fe64d05fca9
|
[
"MIT"
] | 3
|
2021-01-11T07:16:42.000Z
|
2021-07-28T11:37:01.000Z
|
########################################################################
#
# License: BSD
# Created: October 11, 2013
# Author: Francesc Alted
#
########################################################################
"""
Implementation of an out of core matrix-matrix multiplication for PyTables.
"""
import sys, math
import numpy as np
import tables as tb
_MB = 2**20
OOC_BUFFER_SIZE = 32*_MB
"""The buffer size for out-of-core operations.
"""
def dot(a, b, out=None):
"""
Matrix multiplication of two 2-D arrays.
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : array_like, optional
Output argument. This must have the exact kind that would be
returned if it was not used.
Returns
-------
output : CArray or scalar
Returns the dot product of `a` and `b`. If `a` and `b` are
both scalars or both 1-D arrays then a scalar is returned;
otherwise a new CArray (in file dot.h5:/out) is returned. If
`out` parameter is provided, then it is returned instead.
Raises
------
ValueError
If the last dimension of `a` is not the same size as the
second-to-last dimension of `b`.
"""
if len(a.shape) != 2 or len(b.shape) != 2:
raise (ValueError, "only 2-D matrices supported")
if a.shape[1] != b.shape[0]:
raise (ValueError,
"last dimension of `a` does not match first dimension of `b`")
l, m, n = a.shape[0], a.shape[1], b.shape[1]
if out is not None:
if out.shape != (l, n):
raise (ValueError, "`out` array does not have the correct shape")
else:
f = tb.openFile('dot.h5', 'w')
filters = tb.Filters(complevel=5, complib='blosc')
out = f.createCArray(f.root, 'out', tb.Atom.from_dtype(a.dtype),
shape=(l, n), filters=filters)
# Compute a good block size
buffersize = OOC_BUFFER_SIZE
bl = math.sqrt(buffersize / out.dtype.itemsize)
bl = 2**int(math.log(bl, 2))
for i in range(0, l, bl):
for j in range(0, n, bl):
for k in range(0, m, bl):
a0 = a[i:min(i+bl, l), k:min(k+bl, m)]
b0 = b[k:min(k+bl, m), j:min(j+bl, n)]
out[i:i+bl, j:j+bl] += np.dot(a0, b0)
return out
if __name__ == "__main__":
"""Small benchmark for comparison against numpy.dot() speed"""
from time import time
# Matrix dimensions
L, M, N = 1000, 100, 2000
print "Multiplying (%d, %d) x (%d, %d) matrices" % (L, M, M, N)
a = np.linspace(0, 1, L*M).reshape(L, M)
b = np.linspace(0, 1, M*N).reshape(M, N)
t0 = time()
cdot = np.dot(a,b)
print "Time for np.dot->", round(time()-t0, 3), cdot.shape
f = tb.openFile('matrix-pt.h5', 'w')
l, m, n = a.shape[0], a.shape[1], b.shape[1]
filters = tb.Filters(complevel=5, complib='blosc')
ad = f.createCArray(f.root, 'a', tb.Float64Atom(), (l,m),
filters=filters)
ad[:] = a
bd = f.createCArray(f.root, 'b', tb.Float64Atom(), (m,n),
filters=filters)
bd[:] = b
cd = f.createCArray(f.root, 'c', tb.Float64Atom(), (l,n),
filters=filters)
t0 = time()
dot(a, b, out=cd)
print "Time for ooc dot->", round(time()-t0, 3), cd.shape
np.testing.assert_almost_equal(cd, cdot)
f.close()
| 28.925
| 77
| 0.53414
|
########################################################################
#
# License: BSD
# Created: October 11, 2013
# Author: Francesc Alted
#
########################################################################
"""
Implementation of an out of core matrix-matrix multiplication for PyTables.
"""
import sys, math
import numpy as np
import tables as tb
_MB = 2**20
OOC_BUFFER_SIZE = 32*_MB
"""The buffer size for out-of-core operations.
"""
def dot(a, b, out=None):
"""
Matrix multiplication of two 2-D arrays.
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : array_like, optional
Output argument. This must have the exact kind that would be
returned if it was not used.
Returns
-------
output : CArray or scalar
Returns the dot product of `a` and `b`. If `a` and `b` are
both scalars or both 1-D arrays then a scalar is returned;
otherwise a new CArray (in file dot.h5:/out) is returned. If
`out` parameter is provided, then it is returned instead.
Raises
------
ValueError
If the last dimension of `a` is not the same size as the
second-to-last dimension of `b`.
"""
if len(a.shape) != 2 or len(b.shape) != 2:
raise (ValueError, "only 2-D matrices supported")
if a.shape[1] != b.shape[0]:
raise (ValueError,
"last dimension of `a` does not match first dimension of `b`")
l, m, n = a.shape[0], a.shape[1], b.shape[1]
if out is not None:
if out.shape != (l, n):
raise (ValueError, "`out` array does not have the correct shape")
else:
f = tb.openFile('dot.h5', 'w')
filters = tb.Filters(complevel=5, complib='blosc')
out = f.createCArray(f.root, 'out', tb.Atom.from_dtype(a.dtype),
shape=(l, n), filters=filters)
# Compute a good block size
buffersize = OOC_BUFFER_SIZE
bl = math.sqrt(buffersize / out.dtype.itemsize)
bl = 2**int(math.log(bl, 2))
for i in range(0, l, bl):
for j in range(0, n, bl):
for k in range(0, m, bl):
a0 = a[i:min(i+bl, l), k:min(k+bl, m)]
b0 = b[k:min(k+bl, m), j:min(j+bl, n)]
out[i:i+bl, j:j+bl] += np.dot(a0, b0)
return out
if __name__ == "__main__":
"""Small benchmark for comparison against numpy.dot() speed"""
from time import time
# Matrix dimensions
L, M, N = 1000, 100, 2000
print "Multiplying (%d, %d) x (%d, %d) matrices" % (L, M, M, N)
a = np.linspace(0, 1, L*M).reshape(L, M)
b = np.linspace(0, 1, M*N).reshape(M, N)
t0 = time()
cdot = np.dot(a,b)
print "Time for np.dot->", round(time()-t0, 3), cdot.shape
f = tb.openFile('matrix-pt.h5', 'w')
l, m, n = a.shape[0], a.shape[1], b.shape[1]
filters = tb.Filters(complevel=5, complib='blosc')
ad = f.createCArray(f.root, 'a', tb.Float64Atom(), (l,m),
filters=filters)
ad[:] = a
bd = f.createCArray(f.root, 'b', tb.Float64Atom(), (m,n),
filters=filters)
bd[:] = b
cd = f.createCArray(f.root, 'c', tb.Float64Atom(), (l,n),
filters=filters)
t0 = time()
dot(a, b, out=cd)
print "Time for ooc dot->", round(time()-t0, 3), cd.shape
np.testing.assert_almost_equal(cd, cdot)
f.close()
| 0
| 0
| 0
|
4bfa331a60755af393427cbeb23700e010675ae3
| 3,487
|
py
|
Python
|
English/CdeC.py
|
ehultee/glacio-CdeC
|
2cab0a2593abe9e23ced8704be9c794fbbce0576
|
[
"MIT"
] | 2
|
2019-07-01T16:42:35.000Z
|
2021-11-07T20:03:30.000Z
|
English/CdeC.py
|
ehultee/glacio-CdeC
|
2cab0a2593abe9e23ced8704be9c794fbbce0576
|
[
"MIT"
] | 18
|
2019-07-02T16:46:54.000Z
|
2019-08-02T18:19:15.000Z
|
English/CdeC.py
|
ehultee/glacio-CdeC
|
2cab0a2593abe9e23ced8704be9c794fbbce0576
|
[
"MIT"
] | 2
|
2019-07-02T16:20:33.000Z
|
2021-01-27T11:47:14.000Z
|
## Helper functions to clean up Clubes de Ciencia notebooks
## 5 July 2019 EHU
import xarray as xr
import pandas as pd
import numpy as np
from oggm import utils
def ice_to_freshwater(icevol, rho_ice=900, rho_water=1000):
"""Cleanly convert volume of glacial ice (km3) to equivalent volume fresh water (liter).
Arguments:
icevol = volume of ice to convert, in km3
rho_ice = density of glacial ice (default 900 kg/m3)
rho_water = density of freshwater (default 1000 kg/m3)
"""
km3_to_ltr = 1E12
water_vol_km3 = icevol * rho_ice / rho_water
return water_vol_km3 * km3_to_ltr
def read_run_results(gdir, filesuffix=None):
"""Reads the output diagnostics of a simulation and puts the data in a pandas dataframe.
Parameters
----------
gdir : the glacier directory
filesuffix : the file identifier
Returns
-------
a pandas Dataframe with monthly temp and precip
"""
with xr.open_dataset(gdir.get_filepath('model_diagnostics', filesuffix=filesuffix)) as ds:
ds = ds.load()
# Lemgth needs filtering
ts = ds.length_m.to_series()
ts = ts.rolling(12*3).min()
ts.iloc[0:12*3] = ts.iloc[12*3]
# Volume change
delta_vol = np.append(ds.volume_m3.data[1:] - ds.volume_m3.data[0:-1], [0])
if ds.calendar_month[0] == 10 and gdir.cenlat < 0:
# this is to cover up a bug in OGGM
_, m = utils.hydrodate_to_calendardate(ds.hydro_year.data, ds.hydro_month.data, start_month=4)
ds.calendar_month[:] = m
odf = pd.DataFrame()
odf['length_m'] = ts
odf['volume_m3'] = ds.volume_m3
odf['delta_water_m3'] = delta_vol * 0.9
odf['month'] = ds.calendar_month
return odf
def read_climate_statistics(gdir):
"""Reads the annual cycle of climate for [1985-2015] at the glacier terminus elevation.
Parameters
----------
gdir : the glacier directory
Returns
-------
a pandas Dataframe with monthly average temp and precip
"""
with xr.open_dataset(gdir.get_filepath('climate_monthly')) as ds:
ds = ds.load()
ds = ds.sel(time=slice('1985', '2015'))
dsm = ds.groupby('time.month').mean(dim='time')
odf = pd.DataFrame()
odf['temp_celcius'] = dsm.temp.to_series()
odf['prcp_mm_mth'] = dsm.prcp.to_series()
# We correct for altitude difference
d = utils.glacier_statistics(gdir)
odf['temp_celcius'] += (ds.ref_hgt - d['flowline_min_elev']) * 0.0065
return odf
def plot_xz_bed(x, bed, ax=None, ylim=None):
"""This function implements a glacier bed, prepared axes and a legend in
altitude vs. distance along a glacier plot. Based on function of the same
name in OGGM-Edu, but adds explicit axes argument.
Parameters
----------
x : ndarray
distance along glacier (all steps in km)
bed : ndarray
bed rock
Parameters (Optional)
----------
ax : matplotlib axes instance on which to plot
If None, calls plt.gca()
ylim : tuple, y-limits of plot
If None, calls ax.get_ylim()
"""
if ax is None:
ax = plt.gca()
if ylim is None:
ylim = ax.get_ylim()
ax.plot(x, bed, color='k', label='Bedrock', linestyle=':', linewidth=1.5)
ax.set_xlabel('Distance along glacier [km]')
ax.set_ylabel('Altitude [m]')
ax.set_ylim(ylim)
ax.legend(loc='best', frameon=False)
| 30.587719
| 102
| 0.629481
|
## Helper functions to clean up Clubes de Ciencia notebooks
## 5 July 2019 EHU
import xarray as xr
import pandas as pd
import numpy as np
from oggm import utils
def ice_to_freshwater(icevol, rho_ice=900, rho_water=1000):
"""Cleanly convert volume of glacial ice (km3) to equivalent volume fresh water (liter).
Arguments:
icevol = volume of ice to convert, in km3
rho_ice = density of glacial ice (default 900 kg/m3)
rho_water = density of freshwater (default 1000 kg/m3)
"""
km3_to_ltr = 1E12
water_vol_km3 = icevol * rho_ice / rho_water
return water_vol_km3 * km3_to_ltr
def read_run_results(gdir, filesuffix=None):
"""Reads the output diagnostics of a simulation and puts the data in a pandas dataframe.
Parameters
----------
gdir : the glacier directory
filesuffix : the file identifier
Returns
-------
a pandas Dataframe with monthly temp and precip
"""
with xr.open_dataset(gdir.get_filepath('model_diagnostics', filesuffix=filesuffix)) as ds:
ds = ds.load()
# Lemgth needs filtering
ts = ds.length_m.to_series()
ts = ts.rolling(12*3).min()
ts.iloc[0:12*3] = ts.iloc[12*3]
# Volume change
delta_vol = np.append(ds.volume_m3.data[1:] - ds.volume_m3.data[0:-1], [0])
if ds.calendar_month[0] == 10 and gdir.cenlat < 0:
# this is to cover up a bug in OGGM
_, m = utils.hydrodate_to_calendardate(ds.hydro_year.data, ds.hydro_month.data, start_month=4)
ds.calendar_month[:] = m
odf = pd.DataFrame()
odf['length_m'] = ts
odf['volume_m3'] = ds.volume_m3
odf['delta_water_m3'] = delta_vol * 0.9
odf['month'] = ds.calendar_month
return odf
def read_climate_statistics(gdir):
"""Reads the annual cycle of climate for [1985-2015] at the glacier terminus elevation.
Parameters
----------
gdir : the glacier directory
Returns
-------
a pandas Dataframe with monthly average temp and precip
"""
with xr.open_dataset(gdir.get_filepath('climate_monthly')) as ds:
ds = ds.load()
ds = ds.sel(time=slice('1985', '2015'))
dsm = ds.groupby('time.month').mean(dim='time')
odf = pd.DataFrame()
odf['temp_celcius'] = dsm.temp.to_series()
odf['prcp_mm_mth'] = dsm.prcp.to_series()
# We correct for altitude difference
d = utils.glacier_statistics(gdir)
odf['temp_celcius'] += (ds.ref_hgt - d['flowline_min_elev']) * 0.0065
return odf
def plot_xz_bed(x, bed, ax=None, ylim=None):
"""This function implements a glacier bed, prepared axes and a legend in
altitude vs. distance along a glacier plot. Based on function of the same
name in OGGM-Edu, but adds explicit axes argument.
Parameters
----------
x : ndarray
distance along glacier (all steps in km)
bed : ndarray
bed rock
Parameters (Optional)
----------
ax : matplotlib axes instance on which to plot
If None, calls plt.gca()
ylim : tuple, y-limits of plot
If None, calls ax.get_ylim()
"""
if ax is None:
ax = plt.gca()
if ylim is None:
ylim = ax.get_ylim()
ax.plot(x, bed, color='k', label='Bedrock', linestyle=':', linewidth=1.5)
ax.set_xlabel('Distance along glacier [km]')
ax.set_ylabel('Altitude [m]')
ax.set_ylim(ylim)
ax.legend(loc='best', frameon=False)
| 0
| 0
| 0
|
891a6a2c0dd55610ffc22400d6c2676a3191cb6f
| 1,543
|
py
|
Python
|
skoleintra/schildren.py
|
svalgaard/fskintra
|
3ccf656ef1450e541c902d4c00ea1dadcf82085c
|
[
"BSD-2-Clause-FreeBSD"
] | 9
|
2015-08-12T09:54:04.000Z
|
2021-06-21T08:35:39.000Z
|
skoleintra/schildren.py
|
svalgaard/fskintra
|
3ccf656ef1450e541c902d4c00ea1dadcf82085c
|
[
"BSD-2-Clause-FreeBSD"
] | 29
|
2015-01-03T21:13:20.000Z
|
2020-11-12T08:23:56.000Z
|
skoleintra/schildren.py
|
svalgaard/fskintra
|
3ccf656ef1450e541c902d4c00ea1dadcf82085c
|
[
"BSD-2-Clause-FreeBSD"
] | 11
|
2015-02-25T20:24:56.000Z
|
2018-11-16T07:37:37.000Z
|
# -*- coding: utf-8 -*-
import re
import config
import surllib
# Map of children => urlPrefix
# 'Andrea 0A' => '/parent/1234/Andrea/'
_children = None
def getChildren():
'''Returns of list of "available" children in the system'''
global _children
if not _children:
_children = dict()
seen = set()
config.log(u'Henter liste af børn')
data = surllib.skoleLogin()
# Name of "First child"
fst = data.find(id="sk-personal-menu-button").text.strip()
for a in data.findAll('a', href=re.compile('^(/[^/]*){3}/Index$')):
url = a['href'].rsplit('/', 1)[0].rstrip('/')
if url in seen:
continue
seen.add(url)
name = a.text.strip() or fst
if name not in _children:
config.log(u'Barn %s => %s' % (name, url), 2)
_children[name] = url
cns = sorted(_children.keys(), key=ckey)
config.log(u'Følgende børn blev fundet: ' + u', '.join(cns))
return sorted(_children.keys(), key=ckey)
| 27.553571
| 75
| 0.596889
|
# -*- coding: utf-8 -*-
import re
import config
import surllib
# Map of children => urlPrefix
# 'Andrea 0A' => '/parent/1234/Andrea/'
_children = None
def getChildren():
'''Returns of list of "available" children in the system'''
global _children
def ckey(n): return tuple(n.rsplit(' ', 1)[::-1])
if not _children:
_children = dict()
seen = set()
config.log(u'Henter liste af børn')
data = surllib.skoleLogin()
# Name of "First child"
fst = data.find(id="sk-personal-menu-button").text.strip()
for a in data.findAll('a', href=re.compile('^(/[^/]*){3}/Index$')):
url = a['href'].rsplit('/', 1)[0].rstrip('/')
if url in seen:
continue
seen.add(url)
name = a.text.strip() or fst
if name not in _children:
config.log(u'Barn %s => %s' % (name, url), 2)
_children[name] = url
cns = sorted(_children.keys(), key=ckey)
config.log(u'Følgende børn blev fundet: ' + u', '.join(cns))
return sorted(_children.keys(), key=ckey)
def getChildURLPrefix(cname):
getChildren()
assert(cname in _children)
return surllib.absurl(_children[cname])
def getChildURL(cname, suffix):
# Guessing a bug in forældre intra as weekplan urls have the following
# format: parent/CHILD_ID/CHILD_NAMEitem/weeklyplansandhomework/list
assert(suffix.startswith('/') or suffix.startswith('item/'))
return getChildURLPrefix(cname) + suffix
| 399
| 0
| 73
|
b6fea9e2c246a5af265492ae8abcfc853cc92e50
| 297
|
py
|
Python
|
aula01/par_impar.py
|
Doni-zete/Praticas-Python
|
36a877a9f22f9992550fb6e3bdb89c751d6299ef
|
[
"MIT"
] | null | null | null |
aula01/par_impar.py
|
Doni-zete/Praticas-Python
|
36a877a9f22f9992550fb6e3bdb89c751d6299ef
|
[
"MIT"
] | null | null | null |
aula01/par_impar.py
|
Doni-zete/Praticas-Python
|
36a877a9f22f9992550fb6e3bdb89c751d6299ef
|
[
"MIT"
] | null | null | null |
"""
Decobrir se um numero é impar oou par
"""
print(25*"-")
while True:
numero = int(input("Digite um numero: "))
if (numero % 2) == 0:
print(f"Numero digitado, {numero} é PAR: ")
elif(numero % 2) != 0:
print(f"Numero digitado, {numero} é IMPAR: ")
print(25*"-")
| 21.214286
| 53
| 0.555556
|
"""
Decobrir se um numero é impar oou par
"""
print(25*"-")
while True:
numero = int(input("Digite um numero: "))
if (numero % 2) == 0:
print(f"Numero digitado, {numero} é PAR: ")
elif(numero % 2) != 0:
print(f"Numero digitado, {numero} é IMPAR: ")
print(25*"-")
| 0
| 0
| 0
|
0167fb62df138c757952327fadf399fc12f32100
| 3,435
|
py
|
Python
|
tests/python/test_plotting.py
|
MichaelChirico/xgboost
|
028bdc174086d22dcda4130ca5955efca9a0eed7
|
[
"Apache-2.0"
] | 1
|
2022-01-04T23:38:14.000Z
|
2022-01-04T23:38:14.000Z
|
tests/python/test_plotting.py
|
Nihilitior/xgboost
|
7366d3b20cad8e28ecef67d5130c71e81bb0b088
|
[
"Apache-2.0"
] | 40
|
2021-09-10T06:17:11.000Z
|
2022-03-19T19:30:56.000Z
|
tests/python/test_plotting.py
|
Nihilitior/xgboost
|
7366d3b20cad8e28ecef67d5130c71e81bb0b088
|
[
"Apache-2.0"
] | 1
|
2018-12-09T14:30:38.000Z
|
2018-12-09T14:30:38.000Z
|
import json
import numpy as np
import xgboost as xgb
import testing as tm
import pytest
try:
import matplotlib
matplotlib.use('Agg')
from matplotlib.axes import Axes
from graphviz import Source
except ImportError:
pass
pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_matplotlib(),
tm.no_graphviz()))
dpath = 'demo/data/agaricus.txt.train'
| 35.05102
| 77
| 0.560116
|
import json
import numpy as np
import xgboost as xgb
import testing as tm
import pytest
try:
import matplotlib
matplotlib.use('Agg')
from matplotlib.axes import Axes
from graphviz import Source
except ImportError:
pass
pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_matplotlib(),
tm.no_graphviz()))
dpath = 'demo/data/agaricus.txt.train'
class TestPlotting:
def test_plotting(self):
m = xgb.DMatrix(dpath)
booster = xgb.train({'max_depth': 2, 'eta': 1,
'objective': 'binary:logistic'}, m,
num_boost_round=2)
ax = xgb.plot_importance(booster)
assert isinstance(ax, Axes)
assert ax.get_title() == 'Feature importance'
assert ax.get_xlabel() == 'F score'
assert ax.get_ylabel() == 'Features'
assert len(ax.patches) == 4
ax = xgb.plot_importance(booster, color='r',
title='t', xlabel='x', ylabel='y')
assert isinstance(ax, Axes)
assert ax.get_title() == 't'
assert ax.get_xlabel() == 'x'
assert ax.get_ylabel() == 'y'
assert len(ax.patches) == 4
for p in ax.patches:
assert p.get_facecolor() == (1.0, 0, 0, 1.0) # red
ax = xgb.plot_importance(booster, color=['r', 'r', 'b', 'b'],
title=None, xlabel=None, ylabel=None)
assert isinstance(ax, Axes)
assert ax.get_title() == ''
assert ax.get_xlabel() == ''
assert ax.get_ylabel() == ''
assert len(ax.patches) == 4
assert ax.patches[0].get_facecolor() == (1.0, 0, 0, 1.0) # red
assert ax.patches[1].get_facecolor() == (1.0, 0, 0, 1.0) # red
assert ax.patches[2].get_facecolor() == (0, 0, 1.0, 1.0) # blue
assert ax.patches[3].get_facecolor() == (0, 0, 1.0, 1.0) # blue
g = xgb.to_graphviz(booster, num_trees=0)
assert isinstance(g, Source)
ax = xgb.plot_tree(booster, num_trees=0)
assert isinstance(ax, Axes)
def test_importance_plot_lim(self):
np.random.seed(1)
dm = xgb.DMatrix(np.random.randn(100, 100), label=[0, 1] * 50)
bst = xgb.train({}, dm)
assert len(bst.get_fscore()) == 71
ax = xgb.plot_importance(bst)
assert ax.get_xlim() == (0., 11.)
assert ax.get_ylim() == (-1., 71.)
ax = xgb.plot_importance(bst, xlim=(0, 5), ylim=(10, 71))
assert ax.get_xlim() == (0., 5.)
assert ax.get_ylim() == (10., 71.)
def run_categorical(self, tree_method: str) -> None:
X, y = tm.make_categorical(1000, 31, 19, onehot=False)
reg = xgb.XGBRegressor(
enable_categorical=True, n_estimators=10, tree_method=tree_method
)
reg.fit(X, y)
trees = reg.get_booster().get_dump(dump_format="json")
for tree in trees:
j_tree = json.loads(tree)
assert "leaf" in j_tree.keys() or isinstance(
j_tree["split_condition"], list
)
graph = xgb.to_graphviz(reg, num_trees=len(j_tree) - 1)
assert isinstance(graph, Source)
ax = xgb.plot_tree(reg, num_trees=len(j_tree) - 1)
assert isinstance(ax, Axes)
@pytest.mark.skipif(**tm.no_pandas())
def test_categorical(self) -> None:
self.run_categorical("approx")
| 2,845
| 147
| 23
|
982d1343458738175e9ac9c51f09078a5d70f3fc
| 1,386
|
py
|
Python
|
tests/components/test_branch.py
|
haowen-xu/tfsnippet-pre-alpha
|
31eb2cf692ac25b95cc815aaca53754d6db42d9f
|
[
"MIT"
] | null | null | null |
tests/components/test_branch.py
|
haowen-xu/tfsnippet-pre-alpha
|
31eb2cf692ac25b95cc815aaca53754d6db42d9f
|
[
"MIT"
] | null | null | null |
tests/components/test_branch.py
|
haowen-xu/tfsnippet-pre-alpha
|
31eb2cf692ac25b95cc815aaca53754d6db42d9f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
import six
import tensorflow as tf
from tfsnippet.components import DictMapper, Linear, Dense
from tests.helper import TestCase
if __name__ == '__main__':
unittest.main()
| 30.130435
| 71
| 0.565657
|
# -*- coding: utf-8 -*-
import unittest
import six
import tensorflow as tf
from tfsnippet.components import DictMapper, Linear, Dense
from tests.helper import TestCase
class DictMapperTestCase(TestCase):
def test_construction(self):
net = DictMapper({
'a': Linear(2),
'b': Dense(2),
'c': lambda x: x * tf.get_variable('y', initializer=0.)
})
inputs = tf.placeholder(dtype=tf.float32, shape=[None, 2])
output = net(inputs)
self.assertIsInstance(output, dict)
self.assertEqual(sorted(output.keys()), ['a', 'b', 'c'])
for v in six.itervalues(output):
self.assertIsInstance(v, tf.Tensor)
_ = net(inputs)
self.assertEqual(
sorted(v.name for v in tf.global_variables()),
['dense/fully_connected/biases:0',
'dense/fully_connected/weights:0',
'dict_mapper/c/y:0',
'linear/fully_connected/biases:0',
'linear/fully_connected/weights:0']
)
def test_invalid_key(self):
for k in ['.', '', '90ab', 'abc.def']:
with self.assertRaisesRegex(
ValueError, 'The key for `DictMapper` must be a valid '
'Python identifier.*'):
_ = DictMapper({k: lambda x: x})
if __name__ == '__main__':
unittest.main()
| 1,075
| 14
| 77
|
dc034e6d00493ce71c20bb1a4fa5cb0e5ade07f9
| 1,076
|
py
|
Python
|
server.py
|
MusaTamzid05/simple_image_api
|
ea7a2b255c47732d7f79f7f9c59576eced23f3c6
|
[
"MIT"
] | null | null | null |
server.py
|
MusaTamzid05/simple_image_api
|
ea7a2b255c47732d7f79f7f9c59576eced23f3c6
|
[
"MIT"
] | null | null | null |
server.py
|
MusaTamzid05/simple_image_api
|
ea7a2b255c47732d7f79f7f9c59576eced23f3c6
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_restful import Resource
from flask_restful import Api
import numpy as np
import cv2
import werkzeug
from flask_restful import reqparse
parser = reqparse.RequestParser()
parser.add_argument("file", type = werkzeug.datastructures.FileStorage, location = "files")
app = Flask(__name__)
api = Api(app)
import base64
api.add_resource(ImageServer, "/")
if __name__ == "__main__":
app.run(debug = True, port = 5000)
| 19.925926
| 91
| 0.672862
|
from flask import Flask
from flask_restful import Resource
from flask_restful import Api
import numpy as np
import cv2
import werkzeug
from flask_restful import reqparse
parser = reqparse.RequestParser()
parser.add_argument("file", type = werkzeug.datastructures.FileStorage, location = "files")
app = Flask(__name__)
api = Api(app)
import base64
class ImageServer(Resource):
def _decode(self, image):
image = np.fromstring(image, np.uint8)
image = cv2.imdecode(image, cv2.IMREAD_UNCHANGED)
return image
def _encode(self, image):
_, encoded_image = cv2.imencode(".jpg", image)
image_data = base64.b64encode(encoded_image).decode("utf-8")
return f"data:image/jpeg;base64,{image_data}"
def post(self):
data = parser.parse_args()
image = data["file"].read()
image = self._decode(image = image)
image = self._encode(image = image)
return {"image" : image}
api.add_resource(ImageServer, "/")
if __name__ == "__main__":
app.run(debug = True, port = 5000)
| 502
| 7
| 104
|
762c4a4873f93d04b2ca0adffa5772acd9975495
| 1,428
|
py
|
Python
|
access-analyzer/step-functions-archive-findings/functions/evaluate-access-analyzer-finding/app.py
|
lulukelu/aws-iam-permissions-guardrails
|
cae485e3d8589c85f55c50c442ce47916345e00d
|
[
"Apache-2.0"
] | 88
|
2020-04-02T02:56:27.000Z
|
2022-03-18T13:22:02.000Z
|
access-analyzer/step-functions-archive-findings/functions/evaluate-access-analyzer-finding/app.py
|
lulukelu/aws-iam-permissions-guardrails
|
cae485e3d8589c85f55c50c442ce47916345e00d
|
[
"Apache-2.0"
] | 45
|
2020-06-26T11:11:28.000Z
|
2021-08-17T15:31:47.000Z
|
access-analyzer/step-functions-archive-findings/functions/evaluate-access-analyzer-finding/app.py
|
lulukelu/aws-iam-permissions-guardrails
|
cae485e3d8589c85f55c50c442ce47916345e00d
|
[
"Apache-2.0"
] | 32
|
2020-04-02T02:56:28.000Z
|
2021-12-20T18:53:04.000Z
|
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
import boto3
#Evaluate Risk Level
#Return True to raise alert if risk level exceeds threshold
#Return False to Archive finding
| 31.733333
| 111
| 0.740196
|
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
import boto3
#Evaluate Risk Level
#Return True to raise alert if risk level exceeds threshold
#Return False to Archive finding
def should_raise_alert(finding_details, tags, additional_context):
if "error" in finding_details:
logger.error(f"Error in finding {finding_details['error']} for resource {finding_details['resource']}")
return True
if (
finding_details["isPublic"]
and not is_allowed_public(finding_details, tags, additional_context)
):
return True
elif (
"IsAllowedToShare" in tags and tags["IsAllowedToShare"]=="true"
and "Environment" in tags and tags["Environment"]=="development"
and "key_aliases" in additional_context and "alias/DevelopmentKey" in additional_context["key_aliases"]
):
return False
return True
def is_allowed_public(finding_details, tags, additional_context):
#customize logic
#for example, Data Classification is Confidential, return False
if "Data Classification" in tags and tags["Data Classification"]=="Confidential":
return False
return True
def handler(event,context):
finding_details=event["detail"]
tags=event["guid"]["tags"]
additional_context=event["guid"]["additional_context"]
if should_raise_alert(finding_details,tags,additional_context):
return {"status":"NOTIFY"}
else:
return {"status":"ARCHIVE"}
| 1,157
| 0
| 68
|
2e3fe88d47f02aa45547236534d02086aa6a58e7
| 3,372
|
py
|
Python
|
MobileRevelator/python/android_gls.py
|
ohunecker/MR
|
b0c93436c7964d87a0b8154f8b7662b1731124b9
|
[
"MIT"
] | 98
|
2019-02-03T22:50:24.000Z
|
2022-03-17T12:50:56.000Z
|
MobileRevelator/python/android_gls.py
|
cewatkins/MR
|
5ba553fd0eb4c1d80842074a553119486f005822
|
[
"MIT"
] | 10
|
2019-03-14T20:12:10.000Z
|
2020-05-23T10:37:54.000Z
|
MobileRevelator/python/android_gls.py
|
cewatkins/MR
|
5ba553fd0eb4c1d80842074a553119486f005822
|
[
"MIT"
] | 30
|
2019-02-03T22:50:27.000Z
|
2022-03-30T12:37:30.000Z
|
#Pluginname="GLS Tracking (Android)"
#Type=App
import os
import json
import tempfile
| 42.15
| 148
| 0.472123
|
#Pluginname="GLS Tracking (Android)"
#Type=App
import os
import json
import tempfile
def convertdata(filenames):
zfields=[]
row=0
for fsname in filenames:
filename=tempfile.gettempdir()+"/"+fsname[fsname.rfind("/")+1:]
if ctx.fs_file_extract(fsname,filename):
print("Running GLS conversion: "+filename[filename.rfind("/")+1:])
with open(filename,'rb') as rt:
dat=json.loads(rt.read().decode())
desc=""
if ("innerResult") in dat:
zfield={}
root=dat["innerResult"]
if "expeditionDate" in root:
desc+="ExpeditionDate:"+root["expeditionDate"]+";"
if "recipient" in root:
desc+="Recipient:"+root["recipient"]+";"
if "recipient" in root:
desc+="Sender:"+root["sender"]+";"
if "parcelNumber" in root:
desc+="ParcelNumber:"+root["parcelNumber"]+";"
if "stepList" in root:
st=root["stepList"]
for child in st:
if "dateStep" in child:
desc+="[DateStep:"+child["dateStep"]+";"
if "note" in child:
desc+="Note:"+child["note"]+";"
if "place" in child:
desc+="Place:"+child["place"]+";"
if "timeStep" in child:
desc+="TimeStep:"+child["timeStep"]+";"
if "statusTitle" in child:
desc+="Status:"+child["statusTitle"]+"]"
zfield["ID"]=str(row)
zfield["Type"]=""
zfield["Package"]=""
zfield["Duration"]=""
zfield["Filename"]=fsname
zfield["Timestamp"]=""
zfield["Other content"]=desc
row+=1
zfields.append(zfield)
os.remove(filename)
rows=len(zfields)
#print(zfields)
for i in range(0,rows):
zfield=zfields[i]
oldpos=0
newpos=int(i/rows*100)
if (oldpos<newpos):
oldpos=newpos
ctx.gui_setMainProgressBar(oldpos)
ctx.gui_set_data(i,0,zfield["ID"])
ctx.gui_set_data(i,1,zfield["Type"])
ctx.gui_set_data(i,2,zfield["Package"])
ctx.gui_set_data(i,3,zfield["Timestamp"])
ctx.gui_set_data(i,4,zfield["Duration"])
ctx.gui_set_data(i,5,zfield["Other content"])
ctx.gui_set_data(i,6,zfield["Filename"])
def main():
ctx.gui_setMainLabel("GLS: Parsing Parcels");
ctx.gui_setMainProgressBar(0)
headers=["rowid (int)","Type (QString)", "Package (QString)","Timestamp (int)","Duration (int)","Other_Content (QString)","Filename (QString)"]
ctx.gui_set_headers(headers)
filenames=ctx.pluginfilenames()
convertdata(filenames)
ctx.gui_update()
ctx.gui_setMainLabel("Status: Idle.")
ctx.gui_setMainProgressBar(0)
return "Finished running plugin."
| 3,230
| 0
| 49
|
1129bbca49af3c3ae848c54738eec81269b88739
| 983
|
py
|
Python
|
utf7.py
|
CthUlhUzzz/unicode_crafter
|
b1b6b54e13d9afdf20c58abffe7a4986e35628d0
|
[
"WTFPL"
] | null | null | null |
utf7.py
|
CthUlhUzzz/unicode_crafter
|
b1b6b54e13d9afdf20c58abffe7a4986e35628d0
|
[
"WTFPL"
] | null | null | null |
utf7.py
|
CthUlhUzzz/unicode_crafter
|
b1b6b54e13d9afdf20c58abffe7a4986e35628d0
|
[
"WTFPL"
] | null | null | null |
# Module for UTF-7 encoding
from base64 import b64encode
from utf16 import utf16_encode, UTF16_MAXIMUM_CODEPOINT
DIRECT_CHARACTERS = '\'(),-./:?'
UTF7_MAXIMUM_CODEPOINT = UTF16_MAXIMUM_CODEPOINT
| 31.709677
| 95
| 0.603255
|
# Module for UTF-7 encoding
from base64 import b64encode
from utf16 import utf16_encode, UTF16_MAXIMUM_CODEPOINT
DIRECT_CHARACTERS = '\'(),-./:?'
UTF7_MAXIMUM_CODEPOINT = UTF16_MAXIMUM_CODEPOINT
def utf7_encode(codepoints, direct_characters=DIRECT_CHARACTERS):
assert '+' not in direct_characters
result_str = b''
for i in codepoints:
to_encode_str = []
# if codepoint <= 127
if i <= 0x7f:
# if char not '+' and not in direct_characters
if i != 0x2b and chr(i) not in direct_characters:
to_encode_str.append(i)
else:
result_str += chr(i).encode('ascii')
else:
if to_encode_str:
result_str += b'+' + b64encode(utf16_encode(to_encode_str)).rstrip(b'=') + b'-'
to_encode_str = []
if to_encode_str:
result_str += b'+' + b64encode(utf16_encode(to_encode_str)).rstrip(b'=') + b'-'
return result_str
| 761
| 0
| 23
|
48337410cf4a7f2042310786ee08e0b9f74679d0
| 6,939
|
py
|
Python
|
ping_me/ping.py
|
harshcrop/ping-me
|
274a07fd07356255763f516a47b37d9472041dcf
|
[
"Apache-2.0"
] | null | null | null |
ping_me/ping.py
|
harshcrop/ping-me
|
274a07fd07356255763f516a47b37d9472041dcf
|
[
"Apache-2.0"
] | null | null | null |
ping_me/ping.py
|
harshcrop/ping-me
|
274a07fd07356255763f516a47b37d9472041dcf
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Command line execution listener module of ping-me"""
from __future__ import print_function
from dateutil import parser
import argparse
import datetime
import getpass
import hashlib
import os
import parsedatetime
import sys
import time
import ping_me.authenticate
import ping_me.engine
home = os.path.expanduser("~")
cal = parsedatetime.Calendar()
def main():
"""Parse the arguments using argparse package"""
argparser = argparse.ArgumentParser(description='ping-me')
argparser.add_argument("-e", action="store_true", default=False)
argparser.add_argument("-V", "--version", action="store_true",
default=False)
argparser.add_argument("-d", "--date", action="store", dest="DATE",
default=None, nargs="+")
argparser.add_argument("-t", "--time", action="store", dest="TIME",
default=None, nargs="+")
argparser.add_argument("message", action="store", help="Message",
default=None, nargs="*")
argparser.add_argument("-v", action="store_true", default=False)
args = argparser.parse_args()
process(args)
def process(args):
"""Process the arguments. Call engine if flags are used."""
if args.e:
detailed_usage()
sys.exit(2)
if args.version:
import release
print(release.__version__)
sys.exit(2)
if args.DATE is not None and args.TIME is not None:
message = ' '.join(args.message).lstrip('to ')
date_time = parser.parse(' '.join(args.DATE) +
' ' + ' '.join(args.TIME))
if len(message) == 0:
print("What is the message of your reminder?\n")
print("Use ping-me -h for help\n")
sys.exit(2)
ping_me.engine.engine(message, date_time.year, date_time.month,
date_time.day, date_time.hour, date_time.minute,
args.v)
elif args.TIME is not None:
m_time = parser.parse(' '.join(args.TIME))
c_time = datetime.datetime.now()
if (m_time - c_time).days == -1:
m_time += datetime.timedelta(1)
message = ' '.join(args.message).lstrip('to ')
if len(message) == 0:
print("What is the message of your reminder?\n")
print("Use ping-me -h for help\n")
sys.exit(2)
ping_me.engine.engine(message, m_time.year, m_time.month,
m_time.day, m_time.hour, m_time.minute, args.v)
elif args.DATE is not None:
c_time = repr(time.localtime().tm_hour) + ":" + \
repr(time.localtime().tm_min)
m_date = parser.parse(' '.join(args.DATE) + ' ' + c_time)
message = ' '.join(args.message).lstrip('to ')
if len(message) == 0:
print("What is the message of your reminder?\n")
print("Use ping-me -h for help\n")
sys.exit(2)
ping_me.engine.engine(message, m_date.year, m_date.month,
m_date.day, m_date.hour, m_date.minute, args.v)
else:
if len(args.message) == 0:
sys.stderr.write("Use ping-me -h for help\n")
sys.exit(2)
elif len(args.message) == 1 and args.message == ['config']:
ping_me.authenticate.newuser()
elif len(args.message) == 1 and args.message == ['reconfig']:
reconfig()
else:
nlp_process(args)
def nlp_process(args):
"""Process arguments using Natural Language Processing."""
# If there is something like "to do something in 2 mins"
try:
mins_index = args.message.index('mins')
args.message[mins_index] = 'minutes'
except ValueError:
pass
to_parse = ' '.join(args.message)
try:
m_date = cal.nlp(to_parse)[0][0]
except TypeError:
print("Sorry, couldn't understand your message. Try again.")
sys.exit(2)
# Remove the keywords
keywords = cal.nlp(to_parse)[0][-1].split()
for word in keywords:
args.message.remove(word)
# Remove redundant word 'this'
try:
args.message.remove('this')
except ValueError:
pass
if 'to' in args.message:
args.message.remove('to')
message = ' '.join(args.message)
ping_me.engine.engine(message, m_date.year, m_date.month,
m_date.day, m_date.hour, m_date.minute,
args.v)
def detailed_usage():
"""Detailed documentation of ping-me."""
print("Welcome to the detailed documentation of ping-me !")
# Inspired from 'import this'
s = " "; l = "_ "; r = " _"; f = "/"; b = "\\"; p = "|"; d = "— "
print(s*6 + l*5 + s + l*4 + r + s*12 + l + r*5 + s*2 + r + s*8 + l +
s*7 + l*4)
print(s*5 + f + s*8 + f + s*5 + f + s*4 + f + s + b + s*10 + f + s + f +
s*12 + f + s + b + s*6 + f + s + p + s*6 + f + s*7)
print(s*4 + f + s*8 + f + s*5 + f + s*4 + f + s*3 + b + s*8 + f + s + f +
s*12 + f + s*3 + b + s*4 + f + s*2 + p + s*5 + f + s*7)
print(s*3 + f + r*4 + f + s*5 + f + s*4 + f + s*5 + b + s*6 + f + s + f +
s*2 + r*4 + s*2 + f + 5*s + b + s*2 + f + s*3 + p + s*4 + f + l*4)
print(s*2 + f + s*14 + f + s*4 + f + s*7 + b + s*4 + f + s + f + s*9 + f +
s*2 + f + s*7 + b + f + s*4 + p + s*3 + f + s*7)
print(s + f + s*14 + f + s*4 + f + s*9 + b + s*2 + f + s + f + s*9 + f +
s*2 + f + s*14 + p + s*2 + f + s*7)
print(f + s*11 + d*4 + f + s*11 + b + f + s + f + (r*5)[1:] + f + s*2 +
f + s*15 + p + s + f + (r*4)[1:])
print("")
print("ping-me works well with time and date flags already. " +
"Use 'ping-me -h' for that option. " +
"However, ping-me is smart enough to work without flags.\n")
print("Examples : ")
print("\t\t1. ping-me to call mom tonight")
print("\t\t2. ping-me to buy milk early today")
print("\t\t3. ping-me to go home seven days from now")
print("\t\t4. ping-me to take a nap this afternoon")
print("\t\t5. ping-me to go workout next month")
print("")
print("Report (and track process on fixing) bugs on " +
"https://github.com/OrkoHunter/ping-me. Or simply write a mail " +
"to Himanshu Mishra at himanshumishra[at]iitkgp[dot]ac[dot]in")
def reconfig():
"""Reconfigure the user. Removes all the information of existing one."""
if not os.path.exists(home + "/.pingmeconfig"):
ping_me.authenticate.newuser()
else:
old_pass = hashlib.md5(getpass.getpass("Old Password : " +
"").rstrip()).hexdigest()
if old_pass == ping_me.authenticate.extract_password():
ping_me.authenticate.newuser()
else:
print("Authentication failed.")
sys.exit(2)
if __name__ == "__main__":
main()
| 39.651429
| 78
| 0.542729
|
# -*- coding: utf-8 -*-
"""Command line execution listener module of ping-me"""
from __future__ import print_function
from dateutil import parser
import argparse
import datetime
import getpass
import hashlib
import os
import parsedatetime
import sys
import time
import ping_me.authenticate
import ping_me.engine
home = os.path.expanduser("~")
cal = parsedatetime.Calendar()
def main():
"""Parse the arguments using argparse package"""
argparser = argparse.ArgumentParser(description='ping-me')
argparser.add_argument("-e", action="store_true", default=False)
argparser.add_argument("-V", "--version", action="store_true",
default=False)
argparser.add_argument("-d", "--date", action="store", dest="DATE",
default=None, nargs="+")
argparser.add_argument("-t", "--time", action="store", dest="TIME",
default=None, nargs="+")
argparser.add_argument("message", action="store", help="Message",
default=None, nargs="*")
argparser.add_argument("-v", action="store_true", default=False)
args = argparser.parse_args()
process(args)
def process(args):
"""Process the arguments. Call engine if flags are used."""
if args.e:
detailed_usage()
sys.exit(2)
if args.version:
import release
print(release.__version__)
sys.exit(2)
if args.DATE is not None and args.TIME is not None:
message = ' '.join(args.message).lstrip('to ')
date_time = parser.parse(' '.join(args.DATE) +
' ' + ' '.join(args.TIME))
if len(message) == 0:
print("What is the message of your reminder?\n")
print("Use ping-me -h for help\n")
sys.exit(2)
ping_me.engine.engine(message, date_time.year, date_time.month,
date_time.day, date_time.hour, date_time.minute,
args.v)
elif args.TIME is not None:
m_time = parser.parse(' '.join(args.TIME))
c_time = datetime.datetime.now()
if (m_time - c_time).days == -1:
m_time += datetime.timedelta(1)
message = ' '.join(args.message).lstrip('to ')
if len(message) == 0:
print("What is the message of your reminder?\n")
print("Use ping-me -h for help\n")
sys.exit(2)
ping_me.engine.engine(message, m_time.year, m_time.month,
m_time.day, m_time.hour, m_time.minute, args.v)
elif args.DATE is not None:
c_time = repr(time.localtime().tm_hour) + ":" + \
repr(time.localtime().tm_min)
m_date = parser.parse(' '.join(args.DATE) + ' ' + c_time)
message = ' '.join(args.message).lstrip('to ')
if len(message) == 0:
print("What is the message of your reminder?\n")
print("Use ping-me -h for help\n")
sys.exit(2)
ping_me.engine.engine(message, m_date.year, m_date.month,
m_date.day, m_date.hour, m_date.minute, args.v)
else:
if len(args.message) == 0:
sys.stderr.write("Use ping-me -h for help\n")
sys.exit(2)
elif len(args.message) == 1 and args.message == ['config']:
ping_me.authenticate.newuser()
elif len(args.message) == 1 and args.message == ['reconfig']:
reconfig()
else:
nlp_process(args)
def nlp_process(args):
"""Process arguments using Natural Language Processing."""
# If there is something like "to do something in 2 mins"
try:
mins_index = args.message.index('mins')
args.message[mins_index] = 'minutes'
except ValueError:
pass
to_parse = ' '.join(args.message)
try:
m_date = cal.nlp(to_parse)[0][0]
except TypeError:
print("Sorry, couldn't understand your message. Try again.")
sys.exit(2)
# Remove the keywords
keywords = cal.nlp(to_parse)[0][-1].split()
for word in keywords:
args.message.remove(word)
# Remove redundant word 'this'
try:
args.message.remove('this')
except ValueError:
pass
if 'to' in args.message:
args.message.remove('to')
message = ' '.join(args.message)
ping_me.engine.engine(message, m_date.year, m_date.month,
m_date.day, m_date.hour, m_date.minute,
args.v)
def detailed_usage():
"""Detailed documentation of ping-me."""
print("Welcome to the detailed documentation of ping-me !")
# Inspired from 'import this'
s = " "; l = "_ "; r = " _"; f = "/"; b = "\\"; p = "|"; d = "— "
print(s*6 + l*5 + s + l*4 + r + s*12 + l + r*5 + s*2 + r + s*8 + l +
s*7 + l*4)
print(s*5 + f + s*8 + f + s*5 + f + s*4 + f + s + b + s*10 + f + s + f +
s*12 + f + s + b + s*6 + f + s + p + s*6 + f + s*7)
print(s*4 + f + s*8 + f + s*5 + f + s*4 + f + s*3 + b + s*8 + f + s + f +
s*12 + f + s*3 + b + s*4 + f + s*2 + p + s*5 + f + s*7)
print(s*3 + f + r*4 + f + s*5 + f + s*4 + f + s*5 + b + s*6 + f + s + f +
s*2 + r*4 + s*2 + f + 5*s + b + s*2 + f + s*3 + p + s*4 + f + l*4)
print(s*2 + f + s*14 + f + s*4 + f + s*7 + b + s*4 + f + s + f + s*9 + f +
s*2 + f + s*7 + b + f + s*4 + p + s*3 + f + s*7)
print(s + f + s*14 + f + s*4 + f + s*9 + b + s*2 + f + s + f + s*9 + f +
s*2 + f + s*14 + p + s*2 + f + s*7)
print(f + s*11 + d*4 + f + s*11 + b + f + s + f + (r*5)[1:] + f + s*2 +
f + s*15 + p + s + f + (r*4)[1:])
print("")
print("ping-me works well with time and date flags already. " +
"Use 'ping-me -h' for that option. " +
"However, ping-me is smart enough to work without flags.\n")
print("Examples : ")
print("\t\t1. ping-me to call mom tonight")
print("\t\t2. ping-me to buy milk early today")
print("\t\t3. ping-me to go home seven days from now")
print("\t\t4. ping-me to take a nap this afternoon")
print("\t\t5. ping-me to go workout next month")
print("")
print("Report (and track process on fixing) bugs on " +
"https://github.com/OrkoHunter/ping-me. Or simply write a mail " +
"to Himanshu Mishra at himanshumishra[at]iitkgp[dot]ac[dot]in")
def reconfig():
"""Reconfigure the user. Removes all the information of existing one."""
if not os.path.exists(home + "/.pingmeconfig"):
ping_me.authenticate.newuser()
else:
old_pass = hashlib.md5(getpass.getpass("Old Password : " +
"").rstrip()).hexdigest()
if old_pass == ping_me.authenticate.extract_password():
ping_me.authenticate.newuser()
else:
print("Authentication failed.")
sys.exit(2)
if __name__ == "__main__":
main()
| 0
| 0
| 0
|
5cb7d5f6b4498ffbcd46a0a7bcd0f240d3cdfa19
| 443
|
py
|
Python
|
test/forecast.py
|
sambacha/pyblock
|
f8f207de36a2f91dfe5f61681eba0e371cb0c552
|
[
"MIT"
] | null | null | null |
test/forecast.py
|
sambacha/pyblock
|
f8f207de36a2f91dfe5f61681eba0e371cb0c552
|
[
"MIT"
] | null | null | null |
test/forecast.py
|
sambacha/pyblock
|
f8f207de36a2f91dfe5f61681eba0e371cb0c552
|
[
"MIT"
] | null | null | null |
"""Test workflow of forecasting model."""
import sys
sys.path.append("../Forecasting")
import model
def test_forecast():
"""Optimize an ARIMA model and predict a few data points."""
START = 5
END = 10
print("Forecasting...")
f = model.Forecast("../Forecasting/blockchain.csv")
f.optimizeARIMA(range(5), range(5), range(5), f.endog, f.exog)
pred = f.predictARIMA(START, END)
assert len(pred) == (END - START)
| 26.058824
| 66
| 0.650113
|
"""Test workflow of forecasting model."""
import sys
sys.path.append("../Forecasting")
import model
def test_forecast():
"""Optimize an ARIMA model and predict a few data points."""
START = 5
END = 10
print("Forecasting...")
f = model.Forecast("../Forecasting/blockchain.csv")
f.optimizeARIMA(range(5), range(5), range(5), f.endog, f.exog)
pred = f.predictARIMA(START, END)
assert len(pred) == (END - START)
| 0
| 0
| 0
|
6286afe34c783abf3fab1f6006078941470e0023
| 6,221
|
py
|
Python
|
prototxt_basic.py
|
niekai1982/MobileNet-SSD
|
0fb307ff049865338017627d4f082a8fd8b12016
|
[
"MIT"
] | null | null | null |
prototxt_basic.py
|
niekai1982/MobileNet-SSD
|
0fb307ff049865338017627d4f082a8fd8b12016
|
[
"MIT"
] | null | null | null |
prototxt_basic.py
|
niekai1982/MobileNet-SSD
|
0fb307ff049865338017627d4f082a8fd8b12016
|
[
"MIT"
] | null | null | null |
# prototxt_basic
# ----------------------------------------------------------------
| 36.380117
| 100
| 0.561968
|
# prototxt_basic
def data(txt_file, info):
txt_file.write('name: "mxnet-mdoel"\n')
txt_file.write('layer {\n')
txt_file.write(' name: "data"\n')
txt_file.write(' type: "Input"\n')
txt_file.write(' top: "data"\n')
txt_file.write(' input_param {\n')
txt_file.write(' shape: { dim: 10 dim: 3 dim: 224 dim: 224 }\n') # TODO
txt_file.write(' }\n')
txt_file.write('}\n')
txt_file.write('\n')
def Convolution(txt_file, info):
print('test')
if info['attr']['no_bias'] == 'True':
bias_term = 'false'
else:
bias_term = 'true'
txt_file.write('layer {\n')
txt_file.write(' bottom: "%s"\n' % info['bottom'][0])
txt_file.write(' top: "%s"\n' % info['top'])
txt_file.write(' name: "%s"\n' % info['top'])
txt_file.write(' type: "Convolution"\n')
txt_file.write(' convolution_param {\n')
txt_file.write(' num_output: %s\n' % info['attr']['num_filter'])
txt_file.write(' kernel_size: %s\n' % info['attr']['kernel'].split('(')[1].split(',')[0]) # TODO
txt_file.write(' pad: %s\n' % info['attr']['pad'].split('(')[1].split(',')[0]) # TODO
txt_file.write(' group: %s\n' % info['attr']['num_group'])
txt_file.write(' stride: %s\n' % info['attr']['stride'].split('(')[1].split(',')[0])
txt_file.write(' bias_term: %s\n' % bias_term)
txt_file.write(' }\n')
if 'share' in info.keys() and info['share']:
txt_file.write(' param {\n')
txt_file.write(' name: "%s"\n' % info['params'][0])
txt_file.write(' }\n')
txt_file.write('}\n')
txt_file.write('\n')
def ChannelwiseConvolution(txt_file, info):
Convolution(txt_file, info)
def BatchNorm(txt_file, info):
txt_file.write('layer {\n')
txt_file.write(' bottom: "%s"\n' % info['bottom'][0])
txt_file.write(' top: "%s"\n' % info['top'])
txt_file.write(' name: "%s"\n' % info['top'])
txt_file.write(' type: "BatchNorm"\n')
txt_file.write(' batch_norm_param {\n')
txt_file.write(' use_global_stats: true\n') # TODO
txt_file.write(' moving_average_fraction: 0.9\n') # TODO
txt_file.write(' eps: 0.001\n') # TODO
txt_file.write(' }\n')
txt_file.write('}\n')
# if info['fix_gamma'] is "False": # TODO
txt_file.write('layer {\n')
txt_file.write(' bottom: "%s"\n' % info['top'])
txt_file.write(' top: "%s"\n' % info['top'])
txt_file.write(' name: "%s_scale"\n' % info['top'])
txt_file.write(' type: "Scale"\n')
txt_file.write(' scale_param { bias_term: true }\n')
txt_file.write('}\n')
txt_file.write('\n')
pass
def Activation(txt_file, info):
txt_file.write('layer {\n')
txt_file.write(' bottom: "%s"\n' % info['bottom'][0])
txt_file.write(' top: "%s"\n' % info['top'])
txt_file.write(' name: "%s"\n' % info['top'])
txt_file.write(' type: "ReLU"\n') # TODO
txt_file.write('}\n')
txt_file.write('\n')
pass
def Concat(txt_file, info):
txt_file.write('layer {\n')
txt_file.write(' name: "%s"\n' % info['top'])
txt_file.write(' type: "Concat"\n')
for bottom_i in info['bottom']:
txt_file.write(' bottom: "%s"\n' % bottom_i)
txt_file.write(' top: "%s"\n' % info['top'])
txt_file.write('}\n')
txt_file.write('\n')
pass
def ElementWiseSum(txt_file, info):
txt_file.write('layer {\n')
txt_file.write(' name: "%s"\n' % info['top'])
txt_file.write(' type: "Eltwise"\n')
for bottom_i in info['bottom']:
txt_file.write(' bottom: "%s"\n' % bottom_i)
txt_file.write(' top: "%s"\n' % info['top'])
txt_file.write('}\n')
txt_file.write('\n')
pass
def Pooling(txt_file, info):
pool_type = 'AVE' if info['param']['pool_type'] == 'avg' else 'MAX'
txt_file.write('layer {\n')
txt_file.write(' bottom: "%s"\n' % info['bottom'][0])
txt_file.write(' top: "%s"\n' % info['top'])
txt_file.write(' name: "%s"\n' % info['top'])
txt_file.write(' type: "Pooling"\n')
txt_file.write(' pooling_param {\n')
txt_file.write(' pool: %s\n' % pool_type) # TODO
txt_file.write(' kernel_size: %s\n' % info['param']['kernel'].split('(')[1].split(',')[0])
txt_file.write(' stride: %s\n' % info['param']['stride'].split('(')[1].split(',')[0])
txt_file.write(' pad: %s\n' % info['param']['pad'].split('(')[1].split(',')[0])
txt_file.write(' }\n')
txt_file.write('}\n')
txt_file.write('\n')
pass
def FullyConnected(txt_file, info):
txt_file.write('layer {\n')
txt_file.write(' bottom: "%s"\n' % info['bottom'][0])
txt_file.write(' top: "%s"\n' % info['top'])
txt_file.write(' name: "%s"\n' % info['top'])
txt_file.write(' type: "InnerProduct"\n')
txt_file.write(' inner_product_param {\n')
txt_file.write(' num_output: %s\n' % info['param']['num_hidden'])
txt_file.write(' }\n')
txt_file.write('}\n')
txt_file.write('\n')
pass
def Flatten(txt_file, info):
pass
def SoftmaxOutput(txt_file, info):
pass
# ----------------------------------------------------------------
def write_node(txt_file, info):
if 'label' in info['name']:
return
if info['op'] == 'null' and info['name'] == 'data':
data(txt_file, info)
elif info['op'] == 'Convolution':
Convolution(txt_file, info)
elif info['op'] == 'ChannelwiseConvolution':
ChannelwiseConvolution(txt_file, info)
elif info['op'] == 'BatchNorm':
BatchNorm(txt_file, info)
elif info['op'] == 'Activation':
Activation(txt_file, info)
elif info['op'] == 'ElementWiseSum':
ElementWiseSum(txt_file, info)
elif info['op'] == '_Plus':
ElementWiseSum(txt_file, info)
elif info['op'] == 'Concat':
Concat(txt_file, info)
elif info['op'] == 'Pooling':
Pooling(txt_file, info)
elif info['op'] == 'Flatten':
Flatten(txt_file, info)
elif info['op'] == 'FullyConnected':
FullyConnected(txt_file, info)
elif info['op'] == 'SoftmaxOutput':
SoftmaxOutput(txt_file, info)
else:
sys.exit("Warning! Unknown mxnet op:{}".format(info['op']))
| 5,849
| 0
| 281
|
0255284ab4263a817a1bbf24a0fc9d68856eaa0f
| 3,769
|
py
|
Python
|
tests/test_gregorian_calendar.py
|
ibalagurov/laughing-train
|
24c345b22230e695ddd461368f118d1f2bb8d379
|
[
"MIT"
] | null | null | null |
tests/test_gregorian_calendar.py
|
ibalagurov/laughing-train
|
24c345b22230e695ddd461368f118d1f2bb8d379
|
[
"MIT"
] | null | null | null |
tests/test_gregorian_calendar.py
|
ibalagurov/laughing-train
|
24c345b22230e695ddd461368f118d1f2bb8d379
|
[
"MIT"
] | null | null | null |
"""
Tests for checking gregorian calendar date.
Astronomical year contains 365,2425 days:
365 for usual year and 366 for leap
Leap years:
0.2425 is 97 / 400 or 1/4 - 1/100 + 1/400
It means:
- each 4th year is leap, except 3 of 4 round dates
- 2004, 2008, 2012 and etc are leap
- 2000, 2400, 2800 and etc. is leap
- 2100, 2200, 2300, 2500, 2600, 2700, 2900 and etc. are NOT leap
- for 400 years 97 are leap
"""
import pytest
from src.calendar import date_to_gregorian, GregorianDate
@pytest.mark.parametrize(
"day, month, year",
[
pytest.param(1, 1, 1, id="Minimum supported date"),
pytest.param(31, 12, 9999, id="Maximum supported date"),
pytest.param(29, 2, 4, id="First supported usual 4th leap year"),
pytest.param(29, 2, 2020, id="Usual 4th leap year"),
pytest.param(29, 2, 9996, id="Last supported usual 4th leap year"),
pytest.param(29, 2, 400, id="First supported round leap year"),
pytest.param(29, 2, 2000, id="Usual round leap year"),
pytest.param(29, 2, 9600, id="Last supported round leap year"),
],
)
def test_correct_date_format(day, month, year):
"""Check correct date"""
result: GregorianDate = date_to_gregorian(day=day, month=month, year=year)
assert (
result.correct
), f"Correct date '{day}-{month}-{year}'(day-month-year) is recognized as incorrect"
def test_400_years_contain_97_leap_years():
"""Check property of count leap years for 400 consecutive years"""
start_year = 2000
leap_years = [
year
for year in range(start_year, start_year + 400)
if date_to_gregorian(year=year, month=2, day=29).correct
]
actual_count = len(leap_years)
expected_count = 97
assert actual_count == expected_count, (
f"For 400 consecutive years '{expected_count}' should be leap. "
f"But actual count: '{actual_count}'. "
f"Years, recognized as leap:\n{leap_years}"
)
@pytest.mark.parametrize(
"day, month, year",
[
# 29th february for not leap years
pytest.param(29, 2, 1, id="First supported usual year"),
pytest.param(29, 2, 2021, id="Usual year"),
pytest.param(29, 2, 9999, id="Last supported usual year"),
pytest.param(29, 2, 100, id="First supported round usual year"),
pytest.param(29, 2, 2100, id="Usual round year"),
pytest.param(29, 2, 9900, id="Last supported round usual year"),
# day format
pytest.param(32, 1, 1900, id="Nonexistent 32th day"),
pytest.param(31, 4, 1900, id="Nonexistent 31th day"),
pytest.param(0, 1, 1900, id="Nonexistent 0th day"),
pytest.param(-1, 1, 1900, id="Negative day"),
# month format
pytest.param(1, 0, 1900, id="Nonexistent 0th day"),
pytest.param(1, 13, 1900, id="Nonexistent 13th month"),
pytest.param(1, -1, 1900, id="Negative month"),
],
)
def test_incorrect_date_format(day, month, year):
"""Check incorrect date"""
result: GregorianDate = date_to_gregorian(day=day, month=month, year=year)
assert (
not result.correct
), f"Incorrect date '{day}-{month}-{year}'(day-month-year) is recognized as correct"
@pytest.mark.parametrize(
"day, month, year",
[
pytest.param(31, 1, 0, id="Unsupported bottom boundary year"),
pytest.param(1, 1, 10_000, id="Unsupported top boundary year"),
pytest.param(31, 1, -1, id="Negative year"),
],
)
def test_unsupported_date_format(day, month, year):
"""Check unsupported date"""
result: GregorianDate = date_to_gregorian(day=day, month=month, year=year)
assert (
not result.supported
), f"Unsupported date '{day}-{month}-{year}'(day-month-year) is recognized as supported"
| 36.592233
| 92
| 0.645795
|
"""
Tests for checking gregorian calendar date.
Astronomical year contains 365,2425 days:
365 for usual year and 366 for leap
Leap years:
0.2425 is 97 / 400 or 1/4 - 1/100 + 1/400
It means:
- each 4th year is leap, except 3 of 4 round dates
- 2004, 2008, 2012 and etc are leap
- 2000, 2400, 2800 and etc. is leap
- 2100, 2200, 2300, 2500, 2600, 2700, 2900 and etc. are NOT leap
- for 400 years 97 are leap
"""
import pytest
from src.calendar import date_to_gregorian, GregorianDate
@pytest.mark.parametrize(
"day, month, year",
[
pytest.param(1, 1, 1, id="Minimum supported date"),
pytest.param(31, 12, 9999, id="Maximum supported date"),
pytest.param(29, 2, 4, id="First supported usual 4th leap year"),
pytest.param(29, 2, 2020, id="Usual 4th leap year"),
pytest.param(29, 2, 9996, id="Last supported usual 4th leap year"),
pytest.param(29, 2, 400, id="First supported round leap year"),
pytest.param(29, 2, 2000, id="Usual round leap year"),
pytest.param(29, 2, 9600, id="Last supported round leap year"),
],
)
def test_correct_date_format(day, month, year):
"""Check correct date"""
result: GregorianDate = date_to_gregorian(day=day, month=month, year=year)
assert (
result.correct
), f"Correct date '{day}-{month}-{year}'(day-month-year) is recognized as incorrect"
def test_400_years_contain_97_leap_years():
"""Check property of count leap years for 400 consecutive years"""
start_year = 2000
leap_years = [
year
for year in range(start_year, start_year + 400)
if date_to_gregorian(year=year, month=2, day=29).correct
]
actual_count = len(leap_years)
expected_count = 97
assert actual_count == expected_count, (
f"For 400 consecutive years '{expected_count}' should be leap. "
f"But actual count: '{actual_count}'. "
f"Years, recognized as leap:\n{leap_years}"
)
@pytest.mark.parametrize(
"day, month, year",
[
# 29th february for not leap years
pytest.param(29, 2, 1, id="First supported usual year"),
pytest.param(29, 2, 2021, id="Usual year"),
pytest.param(29, 2, 9999, id="Last supported usual year"),
pytest.param(29, 2, 100, id="First supported round usual year"),
pytest.param(29, 2, 2100, id="Usual round year"),
pytest.param(29, 2, 9900, id="Last supported round usual year"),
# day format
pytest.param(32, 1, 1900, id="Nonexistent 32th day"),
pytest.param(31, 4, 1900, id="Nonexistent 31th day"),
pytest.param(0, 1, 1900, id="Nonexistent 0th day"),
pytest.param(-1, 1, 1900, id="Negative day"),
# month format
pytest.param(1, 0, 1900, id="Nonexistent 0th day"),
pytest.param(1, 13, 1900, id="Nonexistent 13th month"),
pytest.param(1, -1, 1900, id="Negative month"),
],
)
def test_incorrect_date_format(day, month, year):
"""Check incorrect date"""
result: GregorianDate = date_to_gregorian(day=day, month=month, year=year)
assert (
not result.correct
), f"Incorrect date '{day}-{month}-{year}'(day-month-year) is recognized as correct"
@pytest.mark.parametrize(
"day, month, year",
[
pytest.param(31, 1, 0, id="Unsupported bottom boundary year"),
pytest.param(1, 1, 10_000, id="Unsupported top boundary year"),
pytest.param(31, 1, -1, id="Negative year"),
],
)
def test_unsupported_date_format(day, month, year):
"""Check unsupported date"""
result: GregorianDate = date_to_gregorian(day=day, month=month, year=year)
assert (
not result.supported
), f"Unsupported date '{day}-{month}-{year}'(day-month-year) is recognized as supported"
| 0
| 0
| 0
|
f317684016dd3310bb8a93c672100e2f58bb2b7f
| 159
|
py
|
Python
|
arithmetic10.py
|
indraputra147/belajarpython
|
13ed3e73a75f25cc6c2c0e1fc7af17ffa53e5760
|
[
"MIT"
] | null | null | null |
arithmetic10.py
|
indraputra147/belajarpython
|
13ed3e73a75f25cc6c2c0e1fc7af17ffa53e5760
|
[
"MIT"
] | null | null | null |
arithmetic10.py
|
indraputra147/belajarpython
|
13ed3e73a75f25cc6c2c0e1fc7af17ffa53e5760
|
[
"MIT"
] | null | null | null |
import math
a = int(input("a = "))
b = int(input("b = "))
c = a + b
d = a - b
e = a * b
f = a / b
g = a % b
h = math.log10(a)
i = a**b
print(c,d,e,f,g,h,i)
| 10.6
| 22
| 0.446541
|
import math
a = int(input("a = "))
b = int(input("b = "))
c = a + b
d = a - b
e = a * b
f = a / b
g = a % b
h = math.log10(a)
i = a**b
print(c,d,e,f,g,h,i)
| 0
| 0
| 0
|
feda14de5a8d251a97859d636e823d8fc7099816
| 193
|
py
|
Python
|
section_3/3_decorators_and_context_mgrs/decos.py
|
hgohel/Python-for-Everyday-Life
|
957963e67dca8c2d20a86fc7e66e818c80d013aa
|
[
"MIT"
] | 43
|
2018-04-09T11:59:11.000Z
|
2022-01-29T14:27:37.000Z
|
section_3/3_decorators_and_context_mgrs/decos.py
|
hgohel/Python-for-Everyday-Life
|
957963e67dca8c2d20a86fc7e66e818c80d013aa
|
[
"MIT"
] | 12
|
2019-11-03T16:50:39.000Z
|
2021-09-07T23:52:37.000Z
|
section_3/3_decorators_and_context_mgrs/decos.py
|
hgohel/Python-for-Everyday-Life
|
957963e67dca8c2d20a86fc7e66e818c80d013aa
|
[
"MIT"
] | 45
|
2018-05-10T21:40:46.000Z
|
2022-03-01T05:50:07.000Z
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python3
| 24.125
| 40
| 0.595855
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python3
def greet(func):
def decorated_func(*args, **kwargs):
print('Hello!')
return func(*args, **kwargs)
return decorated_func
| 123
| 0
| 23
|
38a8181ed5c0474062640cb8571e1aa5db1c0d30
| 7,383
|
py
|
Python
|
server/generateconfig.py
|
ehackify/hnp
|
ba0e10e9ca390616dfa3888ceafc94672f41d26d
|
[
"MIT"
] | 2
|
2020-04-29T09:58:21.000Z
|
2020-05-08T20:23:33.000Z
|
server/generateconfig.py
|
ehackify/hnp
|
ba0e10e9ca390616dfa3888ceafc94672f41d26d
|
[
"MIT"
] | 1
|
2020-05-01T11:00:58.000Z
|
2020-05-01T11:00:58.000Z
|
server/generateconfig.py
|
ehackify/hnp
|
ba0e10e9ca390616dfa3888ceafc94672f41d26d
|
[
"MIT"
] | null | null | null |
"""
This is a helper script meant to generate a
working config.py file from the config template.
"""
from getpass import getpass
import json
import os.path
from random import choice
import string
import sys
from urllib2 import urlopen
import argparse
el = string.ascii_letters + string.digits
rand_str = lambda n: ''.join(choice(el) for _ in range(n))
if __name__ == '__main__':
generate_config()
| 39.908108
| 111
| 0.629554
|
"""
This is a helper script meant to generate a
working config.py file from the config template.
"""
from getpass import getpass
import json
import os.path
from random import choice
import string
import sys
from urllib2 import urlopen
import argparse
el = string.ascii_letters + string.digits
rand_str = lambda n: ''.join(choice(el) for _ in range(n))
def get_pub_ip():
sock = urlopen('http://icanhazip.com/')
ip = sock.read().rstrip()
sock.close()
return ip
def generate_config():
# Check if config file already exists
if os.path.isfile('config.py'):
print('config.py already exists')
sys.exit()
pub_ip = get_pub_ip()
default_base_url = 'http://{}'.format(pub_ip)
default_honeymap_url = '{}:3000'.format(default_base_url)
default_log_path = '/var/log/hnp/hnp.log'
localconfig = {}
localconfig['SECRET_KEY'] = rand_str(32)
localconfig['DEPLOY_KEY'] = rand_str(8)
is_unattended = False
# Get and parse args for command unattended install
parser_description = 'This is a help script to generate a working config.py file from the config template.'
parser = argparse.ArgumentParser(description=parser_description)
subparsers = parser.add_subparsers(help='commands')
parser_generate = subparsers.add_parser('generate', help='Generate a config.py and prompt for options')
parser_generate.set_defaults(which='generate')
parser_unatt = subparsers.add_parser('unattended', help='Unattended install')
parser_unatt.set_defaults(which='unattended')
parser_unatt.add_argument('-e', '--email', type=str, required=True,
help='Superuser email address')
parser_unatt.add_argument('-p', '--password', type=str, required=True,
help='Superuser password')
parser_unatt.add_argument('-b', '--base_url', type=str, default=default_base_url,
help='Server base url')
parser_unatt.add_argument('-y', '--honeymap_url', type=str, default=default_honeymap_url,
help='Honeymap url')
parser_unatt.add_argument('-m', '--mail_server', type=str, default='localhost',
help='Mail server address')
parser_unatt.add_argument('-s', '--mail_port', type=int, default=25,
help='Mail server port')
parser_unatt.add_argument('--mail_tls', action='store_true',
help='Use TLS for mail')
parser_unatt.add_argument('--mail_ssl', action='store_true',
help='Use SSL for mail')
parser_unatt.add_argument('--mail_user', type=str, default='',
help='Mail username')
parser_unatt.add_argument('--mail_pass', type=str, default='',
help='Mail password')
parser_unatt.add_argument('--mail_sender', type=str, default='',
help='Mail sender')
parser_unatt.add_argument('-l', '--log_file_path', type=str, default=default_log_path,
help='Log file path')
parser_unatt.add_argument('-d', '--debug', action='store_true',
help='Run in debug mode')
if (len(sys.argv) < 2):
args = parser.parse_args(['generate'])
else:
args = parser.parse_args(sys.argv[1:])
# check for unattended install
if args.which is 'unattended':
is_unattended = True
if is_unattended:
# Collect values from arguments
debug = args.debug
email = args.email
password = args.password
server_base_url= args.base_url
honeymap_url = args.honeymap_url
mail_server = args.mail_server
mail_port = args.mail_port
mail_tls = args.mail_tls
mail_ssl = args.mail_ssl
mail_username = args.mail_user
mail_password = args.mail_pass
default_mail_sender = args.mail_sender
log_file_path = args.log_file_path
else:
# Collect values from user
debug = raw_input('Do you wish to run in Debug mode?: y/n ')
while debug not in ['y', 'n']:
debug = raw_input('Please y or n ')
debug = True if debug == 'y' else False
email = raw_input('Superuser email: ')
while '@' not in email:
email = raw_input('Superuser email (must be valid): ')
while True:
password = getpass('Superuser password: ')
while not password:
password = getpass('Superuser password (cannot be blank): ')
password2 = getpass('Superuser password: (again): ')
while not password2:
password2 = getpass('Superuser password (again; cannot be blank): ')
if password == password2:
break
else:
print "Passwords did not match. Try again"
server_base_url = raw_input('Server base url ["{}"]: '.format(default_base_url))
if server_base_url.endswith('/'):
server_base_url = server_base_url[:-1]
server_base_url = server_base_url if server_base_url.strip() else default_base_url
default_honeymap_url = '{}:3000'.format(server_base_url)
honeymap_url = raw_input('Honeymap url ["{}"]: '.format(default_honeymap_url))
if honeymap_url.endswith('/'):
honeymap_url = honeymap_url[:-1]
mail_server = raw_input('Mail server address ["localhost"]: ')
mail_port = raw_input('Mail server port [25]: ')
mail_tls = raw_input('Use TLS for email?: y/n ')
while mail_tls not in ['y', 'n']:
mail_tls = raw_input('Please y or n ')
mail_ssl = raw_input('Use SSL for email?: y/n ')
while mail_ssl not in ['y', 'n']:
mail_ssl = raw_input('Please y or n ')
mail_username = raw_input('Mail server username [""]: ')
mail_password = getpass('Mail server password [""]: ')
default_mail_sender = raw_input('Mail default sender [""]: ')
log_file_path = raw_input('Path for log file ["{}"]: '.format(default_log_path))
honeymap_url = honeymap_url if honeymap_url.strip() else default_honeymap_url
log_file_path = log_file_path if log_file_path else default_log_path
localconfig['DEBUG'] = debug
localconfig['SUPERUSER_EMAIL'] = email
localconfig['SUPERUSER_PASSWORD'] = password
localconfig['SERVER_BASE_URL'] = server_base_url
localconfig['HONEYMAP_URL'] = honeymap_url
localconfig['MAIL_SERVER'] = mail_server if mail_server else "localhost"
localconfig['MAIL_PORT'] = mail_port if mail_port else 25
localconfig['MAIL_USE_TLS'] = 'y' == mail_tls
localconfig['MAIL_USE_SSL'] = 'y' == mail_ssl
localconfig['MAIL_USERNAME'] = mail_username if mail_username else ''
localconfig['MAIL_PASSWORD'] = mail_password if mail_password else ''
localconfig['DEFAULT_MAIL_SENDER'] = default_mail_sender if default_mail_sender else ""
localconfig['LOG_FILE_PATH'] = log_file_path
with open('config.py.template', 'r') as templfile,\
open('config.py', 'w') as confile:
templ = templfile.read()
for key, setting in localconfig.iteritems():
templ = templ.replace('{{' + key + '}}', str(setting))
confile.write(templ)
if __name__ == '__main__':
generate_config()
| 6,930
| 0
| 46
|
080aa373a3a2c4c389161bee26616e1cb5da3628
| 12,129
|
py
|
Python
|
cogs/admin.py
|
fossabot/Pawbot
|
6fb5d6c16adc02b155a70df91a44c930eddb493f
|
[
"MIT"
] | null | null | null |
cogs/admin.py
|
fossabot/Pawbot
|
6fb5d6c16adc02b155a70df91a44c930eddb493f
|
[
"MIT"
] | null | null | null |
cogs/admin.py
|
fossabot/Pawbot
|
6fb5d6c16adc02b155a70df91a44c930eddb493f
|
[
"MIT"
] | null | null | null |
import time
import aiohttp
import traceback
import discord
import textwrap
import io
import json
from dhooks import Webhook
from utils.chat_formatting import pagify
from contextlib import redirect_stdout
from copy import copy
from typing import Union
from utils import repo, default, http, dataIO
from discord.ext import commands
| 36.423423
| 236
| 0.574986
|
import time
import aiohttp
import traceback
import discord
import textwrap
import io
import json
from dhooks import Webhook
from utils.chat_formatting import pagify
from contextlib import redirect_stdout
from copy import copy
from typing import Union
from utils import repo, default, http, dataIO
from discord.ext import commands
class Admin:
def __init__(self, bot):
self.bot = bot
self.config = default.get("config.json")
self._last_result = None
self.sessions = set()
@staticmethod
def cleanup_code(content):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
# remove `foo`
return content.strip('` \n')
@staticmethod
def get_syntax_error(e):
if e.text is None:
return f'```py\n{e.__class__.__name__}: {e}\n```'
return f'```py\n{e.text}{"^":>{e.offset}}\n{e.__class__.__name__}: {e}```'
@commands.command()
async def amiadmin(self, ctx):
""" Are you admin? """
if ctx.author.id in self.config.owners:
await ctx.send(f"Yes **{ctx.author.name}** you are admin! ✅")
elif ctx.author.id in self.config.contributors:
await ctx.send(f"No, but you're a contributor **{ctx.author.name}** 💙")
elif ctx.author.id in self.config.friends:
await ctx.send(f"No, but you're a friend of Paws **{ctx.author.name}** 💜")
else:
await ctx.send(f"No, heck off **{ctx.author.name}**.")
@commands.command()
@commands.check(repo.is_owner)
async def reload(self, ctx, name: str):
""" Reloads an extension. """
try:
self.bot.unload_extension(f"cogs.{name}")
self.bot.load_extension(f"cogs.{name}")
except FileNotFoundError as e:
return await ctx.send(f"```\n{e}```")
await ctx.send(f"Reloaded extension **{name}.py**")
@commands.command()
@commands.check(repo.is_owner)
async def reboot(self, ctx):
""" Reboot the bot """
await ctx.send('Rebooting now...')
time.sleep(1)
await self.bot.logout()
@commands.command()
@commands.check(repo.is_owner)
async def load(self, ctx, name: str):
""" Reloads an extension. """
try:
self.bot.load_extension(f"cogs.{name}")
except FileNotFoundError as e:
await ctx.send(f"```diff\n- {e}```")
return
await ctx.send(f"Loaded extension **{name}.py**")
@commands.command()
@commands.check(repo.is_owner)
async def unload(self, ctx, name: str):
""" Reloads an extension. """
try:
self.bot.unload_extension(f"cogs.{name}")
except FileNotFoundError as e:
await ctx.send(f"```diff\n- {e}```")
return
await ctx.send(f"Unloaded extension **{name}.py**")
@commands.group()
@commands.check(repo.is_owner)
async def change(self, ctx):
if ctx.invoked_subcommand is None:
_help = await ctx.bot.formatter.format_help_for(ctx, ctx.command)
for page in _help:
await ctx.send(page)
@change.command(name="playing")
@commands.check(repo.is_owner)
async def change_playing(self, ctx, *, playing: str):
""" Change playing status. """
try:
await self.bot.change_presence(
activity=discord.Game(type=0, name=playing),
status=discord.Status.online
)
dataIO.change_value("config.json", "playing", playing)
await ctx.send(f"Successfully changed playing status to **{playing}**")
except discord.InvalidArgument as err:
await ctx.send(err)
except Exception as e:
await ctx.send(e)
@change.command(name="username")
@commands.check(repo.is_owner)
async def change_username(self, ctx, *, name: str):
""" Change username. """
try:
await self.bot.user.edit(username=name)
await ctx.send(f"Successfully changed username to **{name}**")
except discord.HTTPException as err:
await ctx.send(err)
@change.command(name="nickname")
@commands.check(repo.is_owner)
async def change_nickname(self, ctx, *, name: str = None):
""" Change nickname. """
try:
await ctx.guild.me.edit(nick=name)
if name:
await ctx.send(f"Successfully changed nickname to **{name}**")
else:
await ctx.send("Successfully removed nickname")
except Exception as err:
await ctx.send(err)
@change.command(name="avatar")
@commands.check(repo.is_owner)
async def change_avatar(self, ctx, url: str = None):
""" Change avatar. """
if url is None and len(ctx.message.attachments) == 1:
url = ctx.message.attachments[0].url
else:
url = url.strip('<>')
try:
bio = await http.get(url, res_method="read")
await self.bot.user.edit(avatar=bio)
await ctx.send(f"Successfully changed the avatar. Currently using:\n{url}")
except aiohttp.InvalidURL:
await ctx.send("The URL is invalid...")
except discord.InvalidArgument:
await ctx.send("This URL does not contain a usable image")
except discord.HTTPException as err:
await ctx.send(err)
@commands.command()
@commands.check(repo.is_owner)
async def steal(self, ctx, emojiname, url: str = None):
"""Steals emojis"""
if emojiname is None or "http" in emojiname:
return await ctx.send("No emoji name provided")
if url is None and len(ctx.message.attachments) == 1:
url = ctx.message.attachments[0].url
else:
url = url.strip('<>')
try:
botguild = self.bot.get_guild(423879867457863680)
bio = await http.get(url, res_method="read")
await botguild.create_custom_emoji(name=emojiname, image=bio)
await ctx.message.delete()
await ctx.send(f"Successfully stolen emoji.")
except aiohttp.InvalidURL:
await ctx.send("The URL is invalid...")
except discord.InvalidArgument:
await ctx.send("This URL does not contain a usable image")
except discord.HTTPException as err:
await ctx.send(err)
@commands.command(pass_context=True, name='eval')
@commands.check(repo.is_owner)
async def _eval(self, ctx, *, body: str):
"""Evaluates a code"""
env = {
'bot': self.bot,
'ctx': ctx,
'channel': ctx.channel,
'author': ctx.author,
'guild': ctx.guild,
'message': ctx.message,
'_': self._last_result
}
if "bot.http.token" in body:
return await ctx.send(f"You can't take my token {ctx.author.name}")
env.update(globals())
body = self.cleanup_code(body)
stdout = io.StringIO()
to_compile = f'async def func():\n{textwrap.indent(body, " ")}'
try:
exec(to_compile, env)
except Exception as e:
return await ctx.send(f'```py\n{e.__class__.__name__}: {e}\n```')
func = env['func']
try:
with redirect_stdout(stdout):
ret = await func()
except Exception as e:
value = stdout.getvalue()
await ctx.send(f'```py\n{value}{traceback.format_exc()}\n```')
else:
value = stdout.getvalue()
reactiontosend = self.bot.get_emoji(508388437661843483)
await ctx.message.add_reaction(reactiontosend)
if ret is None:
if value:
await ctx.send(f'```py\n{value}\n```')
else:
if self.config.token in ret:
ret = self.config.realtoken
self._last_result = ret
await ctx.send(f'Inputted code:\n```py\n{body}\n```\n\nOutputted Code:\n```py\n{value}{ret}\n```')
@commands.group(aliases=["as"])
@commands.check(repo.is_owner)
async def sudo(self, ctx):
"""Run a cmd under an altered context
"""
if ctx.invoked_subcommand is None:
await ctx.send("...")
@sudo.command(aliases=["u", "--u", "--user", "user"])
@commands.check(repo.is_owner)
async def sudo_user(self, ctx, who: Union[discord.Member, discord.User], *, command: str):
"""Run a cmd under someone else's name
"""
msg = copy(ctx.message)
msg.author = who
msg.content = ctx.prefix + command
new_ctx = await self.bot.get_context(msg)
await self.bot.invoke(new_ctx)
@sudo.command(aliases=["c", "--c", "--channel", "channel"])
@commands.check(repo.is_owner)
async def sudo_channel(self, ctx, chid: int, *, command: str):
"""Run a command as another user."""
cmd = copy(ctx.message)
cmd.channel = self.bot.get_channel(chid)
cmd.content = ctx.prefix + command
new_ctx = await self.bot.get_context(cmd)
await self.bot.invoke(new_ctx)
@commands.command()
@commands.check(repo.is_owner)
async def cogs(self, ctx):
mod = ", ".join(list(self.bot.cogs))
await ctx.send(f"The current modules are:\n```\n{mod}\n```")
@commands.command(aliases=['gsi'])
@commands.check(repo.is_owner)
async def getserverinfo(self, ctx, *, guild_id: int):
""" Makes me get the information from a guild id"""
guild = self.bot.get_guild(guild_id)
if guild is None:
return await ctx.send("Hmph.. I got nothing..")
members = set(guild.members)
bots = filter(lambda m: m.bot, members)
bots = set(bots)
members = len(members) - len(bots)
if guild == ctx.guild:
roles = " ".join([x.mention for x in guild.roles != "@everyone"])
else:
roles = ", ".join([x.name for x in guild.roles if x.name != "@everyone"])
info = discord.Embed(title="Guild info", description=f"» Name: {guild.name}\n» Members/Bots: `{members}:{len(bots)}`"f"\n» Owner: {guild.owner}\n» Created at: {guild.created_at}"f"\n» Roles: {roles}", color=discord.Color.blue())
info.set_thumbnail(url=guild.icon_url)
await ctx.send(embed=info)
@commands.command(alisases=['bsl'])
@commands.check(repo.is_owner)
async def botservers(self, ctx):
"""Lists servers"""
owner = ctx.author
guilds = sorted(list(self.bot.guilds),
key=lambda s: s.name.lower())
msg = ""
for i, guild in enumerate(guilds, 1):
members = set(guild.members)
bots = filter(lambda m: m.bot, members)
bots = set(bots)
members = len(members) - len(bots)
msg += "`{}:` {}, `{}` `{} members, {} bots` \n".format(i, guild.name, guild.id, members, len(bots))
for page in pagify(msg, ['\n']):
await ctx.send(page)
@commands.command(aliases=["webhooktest"])
@commands.check(repo.is_owner)
async def whtest(self, ctx, whlink: str, *, texttosend):
try:
await ctx.message.delete()
hook = Webhook(whlink, is_async=True)
await hook.send(texttosend)
await hook.close()
except ValueError:
return await ctx.send("I couldn't send the message..")
@commands.command()
@commands.check(repo.is_owner)
async def blacklist(self, ctx, uid: int):
with open("blacklist.json", "r+") as file:
content = json.load(file)
content["blacklist"].append(uid)
file.seek(0)
json.dump(content, file)
file.truncate()
await ctx.send(f"I have successfully blacklisted the id **{uid}**")
def setup(bot):
bot.add_cog(Admin(bot))
| 1,273
| 10,490
| 46
|
ff3989ef53fcbd5d6a133b5ce43e595a8a5131de
| 9,683
|
py
|
Python
|
SIX_DOF.py
|
HarrisonLeece/Circinus
|
d5934f9d59f6b63635d5d053e48339292394c106
|
[
"MIT"
] | null | null | null |
SIX_DOF.py
|
HarrisonLeece/Circinus
|
d5934f9d59f6b63635d5d053e48339292394c106
|
[
"MIT"
] | null | null | null |
SIX_DOF.py
|
HarrisonLeece/Circinus
|
d5934f9d59f6b63635d5d053e48339292394c106
|
[
"MIT"
] | null | null | null |
'''
@Authors: Harrison Leece, James Hribal, Max Fung, Nils Heidenreich
@Purpose: Explore 6DOF rocket trajectory, esspecially quaternion rotation
Learning resources: https://eater.net/quaternions
'''
import numpy as np
import oyaml as yaml
import math
class Rotator:
'''
https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Using_quaternions_as_rotations
This function should take inputs: 'Cartesian, unit rotation-axis (Vector),
Rotation Angle in radians (Theta) and form a quaternion vector
'''
'''
https://math.stackexchange.com/questions/40164/how-do-you-rotate-a-vector-by-a-unit-quaternion
'''
'''
https://en.wikipedia.org/wiki/Quaternion#Hamilton_product
https://math.stackexchange.com/questions/40164/how-do-you-rotate-a-vector-by-a-unit-quaternion
'''
'''
Convert some arbitrary vector to a unit vector (divide components by the magnitude)
'''
'''
Checker function to verify a vector of arbitrary length is a unit vector
Tolerance variable to allow 'close enough' cases to succeed
'''
'''
https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Using_quaternions_as_rotations
q' = q2q1
q1 first, then q2
USE QUATERNION MULTIPLICATION RULE:
v*w where v and w are both quaternions with no real part
v*w = v x w - v * w
v*w where v and w are both quaternions with real part s and t (see wikipedia)
v*w = (s + v)*(t + w) = (st - v * w)+(sw + tv + v x w)
'''
class Rocket(Rotator):
'''
Calculate the angle of attack (alpha) in radians using the rocket's velocity direction
and rotation state.
Return alpha
'''
'''
Use environmental data regarding gust velocity and rocket geometry to estimate
the rotation axis and rotation magnitude (radians) of a rocket
Return a rotation quaternion for this axis and magnitude
'''
'''
Use the angle of attack, drag+lift coefficients and rocket geometry to
estimate the rotation axis and rotation magnitude (radians) of a rocket
Return a rotation quaternion for this axis and magnitude
'''
'''
Place holder - calcultes tvc rotation
Not needed for final version
Return a rotation quaternion for this axis and magnitude
'''
'''
lock rotation of the craft despite forces acting on the body
useful for constraining rocket to a rail at launch for example
(Prevents integration of accelerations to velocities)
'''
'''
Unlocks rotation
'''
class Environment():
'''
The environment object helps compartmentalize environmental data (atmospheric
temperature, pressure, gusts etc...). The object can then be accessed
to fetch atmospheric or environmental data for the rotator object desired
'''
'''
For these be sure to check which altitude you are working with. For now I have
it as altitude relative to center of the earth
'''
'''
The Reference object is a Fixed Earth, centered at 0,0,0 with no rotation
'''
'''
Inherits the Reference (Non-rotating earth) and creates a rotating earth
'''
class Launchpad(RotatingEarth):
'''
Turn the coordinates of the launch site into spherical coordinates and
set as the position of the object
RRS coordinates:
fmt=dms
35 degrees, 21 minutes, 2 seconds North
117 degrees, 48 minutes, 30 seconds West
fmts:>> 'dd' << decimal degree, >> 'dmm' << degree + decimal minutes
>> dms << degrees, minutes, and seconds
Format input as nested lists, North first, then west
list = [[35,21,2],[117,48,39]]
'''
if __name__ == '__main__':
with open('rocket_info.yaml') as rocket_info:
rocket_data = yaml.load(rocket_info, Loader=yaml.FullLoader)
rot_tester = Rotator()
rot_tester.report_body_vector()
rot_quaternion = np.array([[-.707],[0], [.707],[0]])
rot_tester.rotate_body(rot_quaternion)
rot_tester.report_body_vector()
rocenv = Environment(None, None)
rocket = Rocket(rocket_data, rocenv)
| 35.996283
| 115
| 0.627388
|
'''
@Authors: Harrison Leece, James Hribal, Max Fung, Nils Heidenreich
@Purpose: Explore 6DOF rocket trajectory, esspecially quaternion rotation
Learning resources: https://eater.net/quaternions
'''
import numpy as np
import oyaml as yaml
import math
class Rotator:
def __init__(self):
self.re = 0; self.i = 0; self.j = 0; self.k = 1
self.body_vector = np.array([[0],[1],[0],[0]])
'''
https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Using_quaternions_as_rotations
This function should take inputs: 'Cartesian, unit rotation-axis (Vector),
Rotation Angle in radians (Theta) and form a quaternion vector
'''
def form_quaternion(self, vector, theta):
assert self.vector_is_unit(vector), 'Class: Rotator, Fxn: form_quaternion, vector is not a unit quaternion'
r = np.cos(theta/2)
i = -1*np.sin(theta/2)*vector[0]
j = -1*np.sin(theta/2)*vector[1]
k = -1*np.sin(theta/2)*vector[2]
quaternion = np.array([[r],[i],[j],[k]])
return quaternion
'''
https://math.stackexchange.com/questions/40164/how-do-you-rotate-a-vector-by-a-unit-quaternion
'''
def rotate_body(self, quaternion):
left = quaternion
right = np.array([[quaternion[0]], -quaternion[1], -quaternion[2], -quaternion[3]])
h1 = self.hamilton_product(left, self.body_vector)
print('H1: {}'.format(h1))
self.body_vector = self.hamilton_product(h1,right)
'''
https://en.wikipedia.org/wiki/Quaternion#Hamilton_product
https://math.stackexchange.com/questions/40164/how-do-you-rotate-a-vector-by-a-unit-quaternion
'''
def hamilton_product(self, vec1, vec2):
a1 = vec1[0]; a2 = vec2[0]
b1 = vec1[1]; b2 = vec2[1]
c1 = vec1[2]; c2 = vec2[2]
d1 = vec1[3]; d2 = vec2[3]
r = float(a1*a2 - b1*b2 - c1*c2 - d1*d2)
x = float(a1*b2 + b1*a2 + c1*d2 - d1*c2)
y = float(a1*c2 - b1*d2 + c1*a2 + d1*b2)
z = float(d1*d2 + b1*c2 - c1*b2 + d1*a2)
return np.array([[r],[x],[y],[z]])
def report_body_vector(self):
print(self.body_vector)
'''
Convert some arbitrary vector to a unit vector (divide components by the magnitude)
'''
def unitify_vector(self, vector):
mag = np.linalg.norm(vector)
return vector/mag
'''
Checker function to verify a vector of arbitrary length is a unit vector
Tolerance variable to allow 'close enough' cases to succeed
'''
def vector_is_unit(vec):
squares = [x*x for x in vec]
vec_sum = np.sum(squares)
norm = np.sqrt(vec_sum)
tolerance = .01
if abs(norm-1) < tolerance:
return true
return false
'''
https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Using_quaternions_as_rotations
q' = q2q1
q1 first, then q2
USE QUATERNION MULTIPLICATION RULE:
v*w where v and w are both quaternions with no real part
v*w = v x w - v * w
v*w where v and w are both quaternions with real part s and t (see wikipedia)
v*w = (s + v)*(t + w) = (st - v * w)+(sw + tv + v x w)
'''
def combine_quaternions(self, q1, q2):
re1 = q1[0]; re2 = q2[0]
q1 = q1[1:3]; q2 = q2[1:3]
cross = np.cross(q2, q1); dot = np.dot(q2, q1)
re_prime = (re1*re2 - dot)
temp = re1*q2 + re2*q1 + cross
q_prime = np.array([[re_prime],[temp[0]],[temp[1]],[temp[2]]])
return q_prime
class Rocket(Rotator):
def __init__(self, rocket_data, environment_obj, reference=None, units='english'):
super().__init__()
'''
abs_orientation is a (unit) vector representing the orientation of the rocket
relative to the launch location axis or global axis
'''
self.abs_orientation = np.array([[0],[0],[1]])
'''
rel_orientation is a (unit) vector representing the orientation of the rocket
relative to the velocity vector (defined as [0,0,1] if velocity is zero)
'''
self.rel_orientation = np.array([[0],[0],[1]])
self.velocity = np.array([[0],[0],[0]])
self.abs_position = np.array([[0],[0],[0]])
'''
TODO: Get reltaive position and velocities, this object compared to reference
object
'''
self.relative_position = np.array([[None],[None],[None]])
self.relative_velocity = np.array([[None],[None],[None]])
'''
Calculate the angle of attack (alpha) in radians using the rocket's velocity direction
and rotation state.
Return alpha
'''
def calculate_alpha(self):
pass
'''
Use environmental data regarding gust velocity and rocket geometry to estimate
the rotation axis and rotation magnitude (radians) of a rocket
Return a rotation quaternion for this axis and magnitude
'''
def calculate_gust_rotation(self):
pass
'''
Use the angle of attack, drag+lift coefficients and rocket geometry to
estimate the rotation axis and rotation magnitude (radians) of a rocket
Return a rotation quaternion for this axis and magnitude
'''
def calculate_drag_rotation(self):
pass
'''
Place holder - calcultes tvc rotation
Not needed for final version
Return a rotation quaternion for this axis and magnitude
'''
def calculate_tvc_rotation(self):
pass
'''
lock rotation of the craft despite forces acting on the body
useful for constraining rocket to a rail at launch for example
(Prevents integration of accelerations to velocities)
'''
def lock_rotation(self):
pass
'''
Unlocks rotation
'''
def unlock_rotation(self):
pass
class Environment():
'''
The environment object helps compartmentalize environmental data (atmospheric
temperature, pressure, gusts etc...). The object can then be accessed
to fetch atmospheric or environmental data for the rotator object desired
'''
def __init__(self, rocket_position, seed, units='english'):
self.EARTH_MASS_SLUG = 4.0948607276025*10**23
self.EARTH_MASS_KG = 5.972 * 10**24
self.EARTH_RADIUS_MI = 3958.8
self.EARTH_RADIUS_FT = 20902000
self.EARTH_RADIUS_M = 6371000
self.units = units
'''
Use the arguments to fetch data from functions
'''
pass
'''
For these be sure to check which altitude you are working with. For now I have
it as altitude relative to center of the earth
'''
def calc_gravity(self, altitude):
if self.units == 'english':
altitude = altitude * 0.3048
g = (self.EARTH_MASS_KG*6.67408*10**(-11))/(altitude**2)
return g
def fetch_atm_pressure(self, altitude):
pass
def fetch_atm_temperature(self, altitude):
pass
def fetch_atm_density(self, altitude):
pass
'''
The Reference object is a Fixed Earth, centered at 0,0,0 with no rotation
'''
class Reference():
def __init__(self):
self.position = np.array([[0],[0],[0]])
'''
Inherits the Reference (Non-rotating earth) and creates a rotating earth
'''
class RotatingEarth(Reference):
def __init__(self):
self.position= np.array([[0],[0],[0]])
#Angular velocity in radians/s
self.angular_velocity = 7.2921159 * 10**-5
#Angular displacement around the z axis
self.angular_dispalcement = 0
def rotate_step(self, step):
self.angular_dispalcement = self.angular_dispalcement + self.angular_velocity*step
class Launchpad(RotatingEarth):
def __init__(self, units='english'):
self.EARTH_RADIUS_FT = 20902000
self.EARTH_RADIUS_M = 6371000
#set units for this object
self.units = units
#Using spherical coordinates for this (r,theta,phi)
self.position= np.array([[self.get_radius],[0],[0]])
#Angular velocity in radians/s
self.angular_velocity = 7.2921159 * 10**-5
#Angular displacement around the z axis
self.angular_dispalcement = 0
def get_radius(self):
if self.units == 'english':
return self.EARTH_RADIUS_FT
return self.EARTH_RADIUS_M
'''
Turn the coordinates of the launch site into spherical coordinates and
set as the position of the object
RRS coordinates:
fmt=dms
35 degrees, 21 minutes, 2 seconds North
117 degrees, 48 minutes, 30 seconds West
fmts:>> 'dd' << decimal degree, >> 'dmm' << degree + decimal minutes
>> dms << degrees, minutes, and seconds
Format input as nested lists, North first, then west
list = [[35,21,2],[117,48,39]]
'''
def resolve_coordinates(self, input, fmt='dms'):
#parse the input list into radians in format: [North Radians], [West Radians]
radians_list = []
for list in input:
degrees = list[0]
if (fmt == 'dmm' or fmt == 'dms'):
decimals = list[1]*1/60
if (fmt == 'dms'):
decimals = decimals + list[2]*1/3600
degrees = degrees + decimals
#convert degrees to radian
radians_list.append(degrees*np.pi/180)
#
if __name__ == '__main__':
with open('rocket_info.yaml') as rocket_info:
rocket_data = yaml.load(rocket_info, Loader=yaml.FullLoader)
rot_tester = Rotator()
rot_tester.report_body_vector()
rot_quaternion = np.array([[-.707],[0], [.707],[0]])
rot_tester.rotate_body(rot_quaternion)
rot_tester.report_body_vector()
rocenv = Environment(None, None)
rocket = Rocket(rocket_data, rocenv)
| 4,917
| 7
| 725
|
480b454c4f3f99e6503621c21cc4b0ccfce7fa43
| 1,683
|
py
|
Python
|
warrior/Actions/DevActions/dev_actions.py
|
pavithra-gowda/warrior
|
19b153310552b986b86b5470fcfea9547a74c3a9
|
[
"Apache-2.0"
] | null | null | null |
warrior/Actions/DevActions/dev_actions.py
|
pavithra-gowda/warrior
|
19b153310552b986b86b5470fcfea9547a74c3a9
|
[
"Apache-2.0"
] | 1
|
2021-12-13T20:04:13.000Z
|
2021-12-13T20:04:13.000Z
|
warrior/Actions/DevActions/dev_actions.py
|
pavithra-gowda/warrior
|
19b153310552b986b86b5470fcfea9547a74c3a9
|
[
"Apache-2.0"
] | null | null | null |
import Framework.Utils as Utils
from Framework.Utils import data_utils
from Fremework.Utils.testcase_Utils import pNote
class MyActions(object):
"""" Default __init__ field must be used when using classes for keywords """
def full_name(self, student, first_name= 'first', last_name= 'last', full_name= 'first last'):
"""
combine first and last name
"""
# status will be used to save the status of the test that wheather it is failed or pass
status = True
# we will return the dictionary of keys and value to maintain the logs
log_dic={}
wdesc= 'combine first and last name'
full_name = None
data = data_Utils.get_credentials(self.datafile, student, [first_name, last_name, full_name])
if first_name and last_name:
pNote("first name is {0}".format(first_name))
pNote("last name is {0}".format(last_name))
temp_full_name = first_name + ' ' + last_name
if temp_full_name != full_name:
status= False
pNote('full name is {0}'.format(full_name))
else:
pNote("names are not provided")
status = False
log_dic["student"]= student
log_dic["first_names"]= first_name
log_dic["second_name"]= second_name
log_dic["full_name"]= full_name
return status, log_dic
| 36.586957
| 101
| 0.641711
|
import Framework.Utils as Utils
from Framework.Utils import data_utils
from Fremework.Utils.testcase_Utils import pNote
class MyActions(object):
"""" Default __init__ field must be used when using classes for keywords """
def __init__(self):
self.resultfile = Utils.config_Utils.resultfile
self.datafile = Utils.config_Utils.datafile
self.logsdir = Utils.config_Utils.logsdir
self.filename = Utils.config_Utils.filename
self.logfile = Utils.config_Utils.logfile
def full_name(self, student, first_name= 'first', last_name= 'last', full_name= 'first last'):
"""
combine first and last name
"""
# status will be used to save the status of the test that wheather it is failed or pass
status = True
# we will return the dictionary of keys and value to maintain the logs
log_dic={}
wdesc= 'combine first and last name'
full_name = None
data = data_Utils.get_credentials(self.datafile, student, [first_name, last_name, full_name])
if first_name and last_name:
pNote("first name is {0}".format(first_name))
pNote("last name is {0}".format(last_name))
temp_full_name = first_name + ' ' + last_name
if temp_full_name != full_name:
status= False
pNote('full name is {0}'.format(full_name))
else:
pNote("names are not provided")
status = False
log_dic["student"]= student
log_dic["first_names"]= first_name
log_dic["second_name"]= second_name
log_dic["full_name"]= full_name
return status, log_dic
| 258
| 0
| 26
|
51104acb8b2c3ea9fc29d8399745835746c384e2
| 413
|
py
|
Python
|
scripts/avsb/visualize.py
|
maryprimary/frg
|
e789439f599eb884a6220ae5b471cf610b0c2b2a
|
[
"MIT"
] | null | null | null |
scripts/avsb/visualize.py
|
maryprimary/frg
|
e789439f599eb884a6220ae5b471cf610b0c2b2a
|
[
"MIT"
] | 12
|
2021-02-04T06:46:36.000Z
|
2021-07-01T00:43:38.000Z
|
scripts/avsb/visualize.py
|
maryprimary/frg
|
e789439f599eb884a6220ae5b471cf610b0c2b2a
|
[
"MIT"
] | null | null | null |
'''显示结果'''
import numpy
from helpers.drawer import draw_heatmap
def main():
'''入口'''
lval = 5.20
rpath = 'heatmap8/avsb'
uval = numpy.load('{0}/{1:.2f}U.npy'.format(rpath, lval))
draw_heatmap(uval[0, 0, 0, 0, :, :, 0])
draw_heatmap(uval[1, 1, 1, 1, :, :, 0])
draw_heatmap(uval[1, 0, 0, 1, :, :, 0])
draw_heatmap(uval[0, 1, 1, 0, :, :, 0])
if __name__ == '__main__':
main()
| 21.736842
| 61
| 0.544794
|
'''显示结果'''
import numpy
from helpers.drawer import draw_heatmap
def main():
'''入口'''
lval = 5.20
rpath = 'heatmap8/avsb'
uval = numpy.load('{0}/{1:.2f}U.npy'.format(rpath, lval))
draw_heatmap(uval[0, 0, 0, 0, :, :, 0])
draw_heatmap(uval[1, 1, 1, 1, :, :, 0])
draw_heatmap(uval[1, 0, 0, 1, :, :, 0])
draw_heatmap(uval[0, 1, 1, 0, :, :, 0])
if __name__ == '__main__':
main()
| 0
| 0
| 0
|
d2c57943c6d4a8aba77f42d9392defc82e3aa234
| 1,509
|
py
|
Python
|
tests/test_j2sparql.py
|
vliz-be-opsci/pykg2tbl
|
0455c5b58a0bde5e3453cd2242e89f7870d49d68
|
[
"MIT"
] | null | null | null |
tests/test_j2sparql.py
|
vliz-be-opsci/pykg2tbl
|
0455c5b58a0bde5e3453cd2242e89f7870d49d68
|
[
"MIT"
] | null | null | null |
tests/test_j2sparql.py
|
vliz-be-opsci/pykg2tbl
|
0455c5b58a0bde5e3453cd2242e89f7870d49d68
|
[
"MIT"
] | null | null | null |
import unittest
import pytest
import sys
import os
from util4tests import enable_test_logging, run_single_test, log
from pykg2tbl import KG2TblService, KGFileSource, KG2EndpointSource, J2SparqlBuilder
ALL_TRIPLES_SPARQL = "SELECT * WHERE { ?s ?p ?o. } LIMIT 10"
BODC_ENDPOINT = "http://vocab.nerc.ac.uk/sparql/sparql"
if __name__ == "__main__":
run_single_test(__file__)
| 36.804878
| 102
| 0.71173
|
import unittest
import pytest
import sys
import os
from util4tests import enable_test_logging, run_single_test, log
from pykg2tbl import KG2TblService, KGFileSource, KG2EndpointSource, J2SparqlBuilder
ALL_TRIPLES_SPARQL = "SELECT * WHERE { ?s ?p ?o. } LIMIT 10"
BODC_ENDPOINT = "http://vocab.nerc.ac.uk/sparql/sparql"
class TestBuilder(unittest.TestCase):
def test_basic_query_sparql(self):
template_folder = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'sparql_templates')
j2sqb = J2SparqlBuilder(template_folder)
qry = j2sqb.build_sparql_query("all.sparql")
self.assertIsNotNone(qry, "result qry should exist")
self.assertEqual('''SELECT *
WHERE {
?s ?p ?o.
}
LIMIT 10''',qry,'unexpected qry result')
def test_get_variables_sparql_query(self):
#TODO write test to get all the variables from a sparql template
template_folder = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'sparql_templates')
log.debug(f"template folder = {template_folder}")
j2sqb = J2SparqlBuilder(template_folder)
variables = j2sqb.variables_in_query(name="bodc_find.sparql")
log.info(f"all variables {variables}")
self.assertIsNotNone(variables,'variables should exist')
def test_injested_query_sparql(self):
#test a sparql template who uses variables to make a sparql query to see if it works
pass
if __name__ == "__main__":
run_single_test(__file__)
| 1,003
| 16
| 111
|
acc786eefab7f0810d55f87c6a7ce8c231f23c01
| 18,029
|
py
|
Python
|
tutorials/evoked/30_eeg_erp.py
|
ts2-lescot/mne-python
|
e4b16dc57a6a188aa06332b73d911e8131972522
|
[
"BSD-3-Clause"
] | null | null | null |
tutorials/evoked/30_eeg_erp.py
|
ts2-lescot/mne-python
|
e4b16dc57a6a188aa06332b73d911e8131972522
|
[
"BSD-3-Clause"
] | 1
|
2021-04-24T05:21:19.000Z
|
2021-04-27T07:47:52.000Z
|
tutorials/evoked/30_eeg_erp.py
|
ts2-lescot/mne-python
|
e4b16dc57a6a188aa06332b73d911e8131972522
|
[
"BSD-3-Clause"
] | 1
|
2021-01-07T23:08:52.000Z
|
2021-01-07T23:08:52.000Z
|
"""
.. _tut-erp:
EEG processing and Event Related Potentials (ERPs)
==================================================
This tutorial shows how to perform standard ERP analyses in MNE-Python. Most of
the material here is covered in other tutorials too, but for convenience the
functions and methods most useful for ERP analyses are collected here, with
links to other tutorials where more detailed information is given.
As usual we'll start by importing the modules we need and loading some example
data. Instead of parsing the events from the raw data's :term:`stim channel`
(like we do in :ref:`this tutorial <tut-events-vs-annotations>`), we'll load
the events from an external events file. Finally, to speed up computations so
our documentation server can handle them, we'll crop the raw data from ~4.5
minutes down to 90 seconds.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, preload=False)
sample_data_events_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw-eve.fif')
events = mne.read_events(sample_data_events_file)
raw.crop(tmax=90) # in seconds; happens in-place
# discard events >90 seconds (not strictly necessary: avoids some warnings)
events = events[events[:, 0] <= raw.last_samp]
###############################################################################
# The file that we loaded has already been partially processed: 3D sensor
# locations have been saved as part of the ``.fif`` file, the data have been
# low-pass filtered at 40 Hz, and a common average reference is set for the
# EEG channels, stored as a projector (see :ref:`section-avg-ref-proj` in the
# :ref:`tut-set-eeg-ref` tutorial for more info about when you may want to do
# this). We'll discuss how to do each of these below.
#
# Since this is a combined EEG+MEG dataset, let's start by restricting the data
# to just the EEG and EOG channels. This will cause the other projectors saved
# in the file (which apply only to magnetometer channels) to be removed. By
# looking at the measurement info we can see that we now have 59 EEG channels
# and 1 EOG channel.
raw.pick(['eeg', 'eog']).load_data()
raw.info
###############################################################################
# Channel names and types
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# In practice it's quite common to have some channels labelled as EEG that are
# actually EOG channels. `~mne.io.Raw` objects have a
# `~mne.io.Raw.set_channel_types` method that you can use to change a channel
# that is labeled as ``eeg`` into an ``eog`` type. You can also rename channels
# using the `~mne.io.Raw.rename_channels` method. Detailed examples of both of
# these methods can be found in the tutorial :ref:`tut-raw-class`. In this data
# the channel types are all correct already, so for now we'll just rename the
# channels to remove a space and a leading zero in the channel names, and
# convert to lowercase:
channel_renaming_dict = {name: name.replace(' 0', '').lower()
for name in raw.ch_names}
_ = raw.rename_channels(channel_renaming_dict) # happens in-place
###############################################################################
# Channel locations
# ^^^^^^^^^^^^^^^^^
#
# The tutorial :ref:`tut-sensor-locations` describes MNE-Python's handling of
# sensor positions in great detail. To briefly summarize: MNE-Python
# distinguishes :term:`montages <montage>` (which contain sensor positions in
# 3D: ``x``, ``y``, ``z``, in meters) from :term:`layouts <layout>` (which
# define 2D arrangements of sensors for plotting approximate overhead diagrams
# of sensor positions). Additionally, montages may specify *idealized* sensor
# positions (based on, e.g., an idealized spherical headshape model) or they
# may contain *realistic* sensor positions obtained by digitizing the 3D
# locations of the sensors when placed on the actual subject's head.
#
# This dataset has realistic digitized 3D sensor locations saved as part of the
# ``.fif`` file, so we can view the sensor locations in 2D or 3D using the
# `~mne.io.Raw.plot_sensors` method:
raw.plot_sensors(show_names=True)
fig = raw.plot_sensors('3d')
###############################################################################
# If you're working with a standard montage like the `10-20 <ten_twenty_>`_
# system, you can add sensor locations to the data like this:
# ``raw.set_montage('standard_1020')``. See :ref:`tut-sensor-locations` for
# info on what other standard montages are built-in to MNE-Python.
#
# If you have digitized realistic sensor locations, there are dedicated
# functions for loading those digitization files into MNE-Python; see
# :ref:`reading-dig-montages` for discussion and :ref:`dig-formats` for a list
# of supported formats. Once loaded, the digitized sensor locations can be
# added to the data by passing the loaded montage object to
# ``raw.set_montage()``.
#
#
# Setting the EEG reference
# ^^^^^^^^^^^^^^^^^^^^^^^^^
#
# As mentioned above, this data already has an EEG common average reference
# added as a :term:`projector`. We can view the effect of this on the raw data
# by plotting with and without the projector applied:
for proj in (False, True):
fig = raw.plot(n_channels=5, proj=proj, scalings=dict(eeg=50e-6))
fig.subplots_adjust(top=0.9) # make room for title
ref = 'Average' if proj else 'No'
fig.suptitle(f'{ref} reference', size='xx-large', weight='bold')
###############################################################################
# The referencing scheme can be changed with the function
# `mne.set_eeg_reference` (which by default operates on a *copy* of the data)
# or the `raw.set_eeg_reference() <mne.io.Raw.set_eeg_reference>` method (which
# always modifies the data in-place). The tutorial :ref:`tut-set-eeg-ref` shows
# several examples of this.
#
#
# Filtering
# ^^^^^^^^^
#
# MNE-Python has extensive support for different ways of filtering data. For a
# general discussion of filter characteristics and MNE-Python defaults, see
# :ref:`disc-filtering`. For practical examples of how to apply filters to your
# data, see :ref:`tut-filter-resample`. Here, we'll apply a simple high-pass
# filter for illustration:
raw.filter(l_freq=0.1, h_freq=None)
###############################################################################
# Evoked responses: epoching and averaging
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The general process for extracting evoked responses from continuous data is
# to use the `~mne.Epochs` constructor, and then average the resulting epochs
# to create an `~mne.Evoked` object. In MNE-Python, events are represented as
# a :class:`NumPy array <numpy.ndarray>` of sample numbers and integer event
# codes. The event codes are stored in the last column of the events array:
np.unique(events[:, -1])
###############################################################################
# The :ref:`tut-event-arrays` tutorial discusses event arrays in more detail.
# Integer event codes are mapped to more descriptive text using a Python
# :class:`dictionary <dict>` usually called ``event_id``. This mapping is
# determined by your experiment code (i.e., it reflects which event codes you
# chose to use to represent different experimental events or conditions). For
# the :ref:`sample-dataset` data has the following mapping:
event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'face': 5, 'buttonpress': 32}
###############################################################################
# Now we can extract epochs from the continuous data. An interactive plot
# allows you to click on epochs to mark them as "bad" and drop them from the
# analysis (it is not interactive on the documentation website, but will be
# when you run `epochs.plot() <mne.Epochs.plot>` in a Python console).
epochs = mne.Epochs(raw, events, event_id=event_dict, tmin=-0.3, tmax=0.7,
preload=True)
fig = epochs.plot()
###############################################################################
# It is also possible to automatically drop epochs, when first creating them or
# later on, by providing maximum peak-to-peak signal value thresholds (pass to
# the `~mne.Epochs` constructor as the ``reject`` parameter; see
# :ref:`tut-reject-epochs-section` for details). You can also do this after
# the epochs are already created, using the `~mne.Epochs.drop_bad` method:
reject_criteria = dict(eeg=100e-6, # 100 µV
eog=200e-6) # 200 µV
_ = epochs.drop_bad(reject=reject_criteria)
###############################################################################
# Next we generate a barplot of which channels contributed most to epochs
# getting rejected. If one channel is responsible for lots of epoch rejections,
# it may be worthwhile to mark that channel as "bad" in the `~mne.io.Raw`
# object and then re-run epoching (fewer channels w/ more good epochs may be
# preferable to keeping all channels but losing many epochs). See
# :ref:`tut-bad-channels` for more info.
epochs.plot_drop_log()
###############################################################################
# Another way in which epochs can be automatically dropped is if the
# `~mne.io.Raw` object they're extracted from contains :term:`annotations` that
# begin with either ``bad`` or ``edge`` ("edge" annotations are automatically
# inserted when concatenating two separate `~mne.io.Raw` objects together). See
# :ref:`tut-reject-data-spans` for more information about annotation-based
# epoch rejection.
#
# Now that we've dropped the bad epochs, let's look at our evoked responses for
# some conditions we care about. Here the `~mne.Epochs.average` method will
# create and `~mne.Evoked` object, which we can then plot. Notice that we\
# select which condition we want to average using the square-bracket indexing
# (like a :class:`dictionary <dict>`); that returns a smaller epochs object
# containing just the epochs from that condition, to which we then apply the
# `~mne.Epochs.average` method:
l_aud = epochs['auditory/left'].average()
l_vis = epochs['visual/left'].average()
###############################################################################
# These `~mne.Evoked` objects have their own interactive plotting method
# (though again, it won't be interactive on the documentation website):
# click-dragging a span of time will generate a scalp field topography for that
# time span. Here we also demonstrate built-in color-coding the channel traces
# by location:
fig1 = l_aud.plot()
fig2 = l_vis.plot(spatial_colors=True)
###############################################################################
# Scalp topographies can also be obtained non-interactively with the
# `~mne.Evoked.plot_topomap` method. Here we display topomaps of the average
# field in 50 ms time windows centered at -200 ms, 100 ms, and 400 ms.
l_aud.plot_topomap(times=[-0.2, 0.1, 0.4], average=0.05)
###############################################################################
# Considerable customization of these plots is possible, see the docstring of
# `~mne.Evoked.plot_topomap` for details.
#
# There is also a built-in method for combining "butterfly" plots of the
# signals with scalp topographies, called `~mne.Evoked.plot_joint`. Like
# `~mne.Evoked.plot_topomap` you can specify times for the scalp topographies
# or you can let the method choose times automatically, as is done here:
l_aud.plot_joint()
###############################################################################
# Global field power (GFP)
# ^^^^^^^^^^^^^^^^^^^^^^^^
#
# Global field power :footcite:`Lehmann1980,Lehmann1984,Murray2008` is,
# generally speaking, a measure of agreement of the signals picked up by all
# sensors across the entire scalp: if all sensors have the same value at a
# given time point, the GFP will be zero at that time point; if the signals
# differ, the GFP will be non-zero at that time point. GFP
# peaks may reflect "interesting" brain activity, warranting further
# investigation. Mathematically, the GFP is the population standard
# deviation across all sensors, calculated separately for every time point.
#
# You can plot the GFP using `evoked.plot(gfp=True) <mne.Evoked.plot>`. The GFP
# trace will be black if ``spatial_colors=True`` and green otherwise. The EEG
# reference does not affect the GFP:
# sphinx_gallery_thumbnail_number=11
for evk in (l_aud, l_vis):
evk.plot(gfp=True, spatial_colors=True, ylim=dict(eeg=[-12, 12]))
###############################################################################
# To plot the GFP by itself you can pass ``gfp='only'`` (this makes it easier
# to read off the GFP data values, because the scale is aligned):
l_aud.plot(gfp='only')
###############################################################################
# As stated above, the GFP is the population standard deviation of the signal
# across channels. To compute it manually, we can leverage the fact that
# `evoked.data <mne.Evoked.data>` is a :class:`NumPy array <numpy.ndarray>`,
# and verify by plotting it using matplotlib commands:
gfp = l_aud.data.std(axis=0, ddof=0)
# Reproducing the MNE-Python plot style seen above
fig, ax = plt.subplots()
ax.plot(l_aud.times, gfp * 1e6, color='lime')
ax.fill_between(l_aud.times, gfp * 1e6, color='lime', alpha=0.2)
ax.set(xlabel='Time (s)', ylabel='GFP (µV)', title='EEG')
###############################################################################
# Analyzing regions of interest (ROIs): averaging across channels
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Since our sample data is responses to left and right auditory and visual
# stimuli, we may want to compare left versus right ROIs. To average across
# channels in a region of interest, we first find the channel indices we want.
# Looking back at the 2D sensor plot above, we might choose the following for
# left and right ROIs:
left = ['eeg17', 'eeg18', 'eeg25', 'eeg26']
right = ['eeg23', 'eeg24', 'eeg34', 'eeg35']
left_ix = mne.pick_channels(l_aud.info['ch_names'], include=left)
right_ix = mne.pick_channels(l_aud.info['ch_names'], include=right)
###############################################################################
# Now we can create a new Evoked with 2 virtual channels (one for each ROI):
roi_dict = dict(left_ROI=left_ix, right_ROI=right_ix)
roi_evoked = mne.channels.combine_channels(l_aud, roi_dict, method='mean')
print(roi_evoked.info['ch_names'])
roi_evoked.plot()
###############################################################################
# Comparing conditions
# ^^^^^^^^^^^^^^^^^^^^
#
# If we wanted to compare our auditory and visual stimuli, a useful function is
# `mne.viz.plot_compare_evokeds`. By default this will combine all channels in
# each evoked object using global field power (or RMS for MEG channels); here
# instead we specify to combine by averaging, and restrict it to a subset of
# channels by passing ``picks``:
evokeds = dict(auditory=l_aud, visual=l_vis)
picks = [f'eeg{n}' for n in range(10, 15)]
mne.viz.plot_compare_evokeds(evokeds, picks=picks, combine='mean')
###############################################################################
# We can also easily get confidence intervals by treating each epoch as a
# separate observation using the `~mne.Epochs.iter_evoked` method. A confidence
# interval across subjects could also be obtained, by passing a list of
# `~mne.Evoked` objects (one per subject) to the
# `~mne.viz.plot_compare_evokeds` function.
evokeds = dict(auditory=list(epochs['auditory/left'].iter_evoked()),
visual=list(epochs['visual/left'].iter_evoked()))
mne.viz.plot_compare_evokeds(evokeds, combine='mean', picks=picks)
###############################################################################
# We can also compare conditions by subtracting one `~mne.Evoked` object from
# another using the `mne.combine_evoked` function (this function also allows
# pooling of epochs without subtraction).
aud_minus_vis = mne.combine_evoked([l_aud, l_vis], weights=[1, -1])
aud_minus_vis.plot_joint()
###############################################################################
# .. warning::
#
# The code above yields an **equal-weighted difference**. If you have
# imbalanced trial numbers, you might want to equalize the number of events
# per condition first by using `epochs.equalize_event_counts()
# <mne.Epochs.equalize_event_counts>` before averaging.
#
#
# Grand averages
# ^^^^^^^^^^^^^^
#
# To compute grand averages across conditions (or subjects), you can pass a
# list of `~mne.Evoked` objects to `mne.grand_average`. The result is another
# `~mne.Evoked` object.
grand_average = mne.grand_average([l_aud, l_vis])
print(grand_average)
###############################################################################
# For combining *conditions* it is also possible to make use of :term:`HED`
# tags in the condition names when selecting which epochs to average. For
# example, we have the condition names:
list(event_dict)
###############################################################################
# We can select the auditory conditions (left and right together) by passing:
epochs['auditory'].average()
###############################################################################
# see :ref:`tut-section-subselect-epochs` for details.
#
# The tutorials :ref:`tut-epochs-class` and :ref:`tut-evoked-class` have many
# more details about working with the `~mne.Epochs` and `~mne.Evoked` classes.
#
# .. _ten_twenty: https://en.wikipedia.org/wiki/10%E2%80%9320_system_(EEG)
#
#
# References
# ----------
# .. footbibliography::
| 47.32021
| 79
| 0.645349
|
"""
.. _tut-erp:
EEG processing and Event Related Potentials (ERPs)
==================================================
This tutorial shows how to perform standard ERP analyses in MNE-Python. Most of
the material here is covered in other tutorials too, but for convenience the
functions and methods most useful for ERP analyses are collected here, with
links to other tutorials where more detailed information is given.
As usual we'll start by importing the modules we need and loading some example
data. Instead of parsing the events from the raw data's :term:`stim channel`
(like we do in :ref:`this tutorial <tut-events-vs-annotations>`), we'll load
the events from an external events file. Finally, to speed up computations so
our documentation server can handle them, we'll crop the raw data from ~4.5
minutes down to 90 seconds.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, preload=False)
sample_data_events_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw-eve.fif')
events = mne.read_events(sample_data_events_file)
raw.crop(tmax=90) # in seconds; happens in-place
# discard events >90 seconds (not strictly necessary: avoids some warnings)
events = events[events[:, 0] <= raw.last_samp]
###############################################################################
# The file that we loaded has already been partially processed: 3D sensor
# locations have been saved as part of the ``.fif`` file, the data have been
# low-pass filtered at 40 Hz, and a common average reference is set for the
# EEG channels, stored as a projector (see :ref:`section-avg-ref-proj` in the
# :ref:`tut-set-eeg-ref` tutorial for more info about when you may want to do
# this). We'll discuss how to do each of these below.
#
# Since this is a combined EEG+MEG dataset, let's start by restricting the data
# to just the EEG and EOG channels. This will cause the other projectors saved
# in the file (which apply only to magnetometer channels) to be removed. By
# looking at the measurement info we can see that we now have 59 EEG channels
# and 1 EOG channel.
raw.pick(['eeg', 'eog']).load_data()
raw.info
###############################################################################
# Channel names and types
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# In practice it's quite common to have some channels labelled as EEG that are
# actually EOG channels. `~mne.io.Raw` objects have a
# `~mne.io.Raw.set_channel_types` method that you can use to change a channel
# that is labeled as ``eeg`` into an ``eog`` type. You can also rename channels
# using the `~mne.io.Raw.rename_channels` method. Detailed examples of both of
# these methods can be found in the tutorial :ref:`tut-raw-class`. In this data
# the channel types are all correct already, so for now we'll just rename the
# channels to remove a space and a leading zero in the channel names, and
# convert to lowercase:
channel_renaming_dict = {name: name.replace(' 0', '').lower()
for name in raw.ch_names}
_ = raw.rename_channels(channel_renaming_dict) # happens in-place
###############################################################################
# Channel locations
# ^^^^^^^^^^^^^^^^^
#
# The tutorial :ref:`tut-sensor-locations` describes MNE-Python's handling of
# sensor positions in great detail. To briefly summarize: MNE-Python
# distinguishes :term:`montages <montage>` (which contain sensor positions in
# 3D: ``x``, ``y``, ``z``, in meters) from :term:`layouts <layout>` (which
# define 2D arrangements of sensors for plotting approximate overhead diagrams
# of sensor positions). Additionally, montages may specify *idealized* sensor
# positions (based on, e.g., an idealized spherical headshape model) or they
# may contain *realistic* sensor positions obtained by digitizing the 3D
# locations of the sensors when placed on the actual subject's head.
#
# This dataset has realistic digitized 3D sensor locations saved as part of the
# ``.fif`` file, so we can view the sensor locations in 2D or 3D using the
# `~mne.io.Raw.plot_sensors` method:
raw.plot_sensors(show_names=True)
fig = raw.plot_sensors('3d')
###############################################################################
# If you're working with a standard montage like the `10-20 <ten_twenty_>`_
# system, you can add sensor locations to the data like this:
# ``raw.set_montage('standard_1020')``. See :ref:`tut-sensor-locations` for
# info on what other standard montages are built-in to MNE-Python.
#
# If you have digitized realistic sensor locations, there are dedicated
# functions for loading those digitization files into MNE-Python; see
# :ref:`reading-dig-montages` for discussion and :ref:`dig-formats` for a list
# of supported formats. Once loaded, the digitized sensor locations can be
# added to the data by passing the loaded montage object to
# ``raw.set_montage()``.
#
#
# Setting the EEG reference
# ^^^^^^^^^^^^^^^^^^^^^^^^^
#
# As mentioned above, this data already has an EEG common average reference
# added as a :term:`projector`. We can view the effect of this on the raw data
# by plotting with and without the projector applied:
for proj in (False, True):
fig = raw.plot(n_channels=5, proj=proj, scalings=dict(eeg=50e-6))
fig.subplots_adjust(top=0.9) # make room for title
ref = 'Average' if proj else 'No'
fig.suptitle(f'{ref} reference', size='xx-large', weight='bold')
###############################################################################
# The referencing scheme can be changed with the function
# `mne.set_eeg_reference` (which by default operates on a *copy* of the data)
# or the `raw.set_eeg_reference() <mne.io.Raw.set_eeg_reference>` method (which
# always modifies the data in-place). The tutorial :ref:`tut-set-eeg-ref` shows
# several examples of this.
#
#
# Filtering
# ^^^^^^^^^
#
# MNE-Python has extensive support for different ways of filtering data. For a
# general discussion of filter characteristics and MNE-Python defaults, see
# :ref:`disc-filtering`. For practical examples of how to apply filters to your
# data, see :ref:`tut-filter-resample`. Here, we'll apply a simple high-pass
# filter for illustration:
raw.filter(l_freq=0.1, h_freq=None)
###############################################################################
# Evoked responses: epoching and averaging
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The general process for extracting evoked responses from continuous data is
# to use the `~mne.Epochs` constructor, and then average the resulting epochs
# to create an `~mne.Evoked` object. In MNE-Python, events are represented as
# a :class:`NumPy array <numpy.ndarray>` of sample numbers and integer event
# codes. The event codes are stored in the last column of the events array:
np.unique(events[:, -1])
###############################################################################
# The :ref:`tut-event-arrays` tutorial discusses event arrays in more detail.
# Integer event codes are mapped to more descriptive text using a Python
# :class:`dictionary <dict>` usually called ``event_id``. This mapping is
# determined by your experiment code (i.e., it reflects which event codes you
# chose to use to represent different experimental events or conditions). For
# the :ref:`sample-dataset` data has the following mapping:
event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'face': 5, 'buttonpress': 32}
###############################################################################
# Now we can extract epochs from the continuous data. An interactive plot
# allows you to click on epochs to mark them as "bad" and drop them from the
# analysis (it is not interactive on the documentation website, but will be
# when you run `epochs.plot() <mne.Epochs.plot>` in a Python console).
epochs = mne.Epochs(raw, events, event_id=event_dict, tmin=-0.3, tmax=0.7,
preload=True)
fig = epochs.plot()
###############################################################################
# It is also possible to automatically drop epochs, when first creating them or
# later on, by providing maximum peak-to-peak signal value thresholds (pass to
# the `~mne.Epochs` constructor as the ``reject`` parameter; see
# :ref:`tut-reject-epochs-section` for details). You can also do this after
# the epochs are already created, using the `~mne.Epochs.drop_bad` method:
reject_criteria = dict(eeg=100e-6, # 100 µV
eog=200e-6) # 200 µV
_ = epochs.drop_bad(reject=reject_criteria)
###############################################################################
# Next we generate a barplot of which channels contributed most to epochs
# getting rejected. If one channel is responsible for lots of epoch rejections,
# it may be worthwhile to mark that channel as "bad" in the `~mne.io.Raw`
# object and then re-run epoching (fewer channels w/ more good epochs may be
# preferable to keeping all channels but losing many epochs). See
# :ref:`tut-bad-channels` for more info.
epochs.plot_drop_log()
###############################################################################
# Another way in which epochs can be automatically dropped is if the
# `~mne.io.Raw` object they're extracted from contains :term:`annotations` that
# begin with either ``bad`` or ``edge`` ("edge" annotations are automatically
# inserted when concatenating two separate `~mne.io.Raw` objects together). See
# :ref:`tut-reject-data-spans` for more information about annotation-based
# epoch rejection.
#
# Now that we've dropped the bad epochs, let's look at our evoked responses for
# some conditions we care about. Here the `~mne.Epochs.average` method will
# create and `~mne.Evoked` object, which we can then plot. Notice that we\
# select which condition we want to average using the square-bracket indexing
# (like a :class:`dictionary <dict>`); that returns a smaller epochs object
# containing just the epochs from that condition, to which we then apply the
# `~mne.Epochs.average` method:
l_aud = epochs['auditory/left'].average()
l_vis = epochs['visual/left'].average()
###############################################################################
# These `~mne.Evoked` objects have their own interactive plotting method
# (though again, it won't be interactive on the documentation website):
# click-dragging a span of time will generate a scalp field topography for that
# time span. Here we also demonstrate built-in color-coding the channel traces
# by location:
fig1 = l_aud.plot()
fig2 = l_vis.plot(spatial_colors=True)
###############################################################################
# Scalp topographies can also be obtained non-interactively with the
# `~mne.Evoked.plot_topomap` method. Here we display topomaps of the average
# field in 50 ms time windows centered at -200 ms, 100 ms, and 400 ms.
l_aud.plot_topomap(times=[-0.2, 0.1, 0.4], average=0.05)
###############################################################################
# Considerable customization of these plots is possible, see the docstring of
# `~mne.Evoked.plot_topomap` for details.
#
# There is also a built-in method for combining "butterfly" plots of the
# signals with scalp topographies, called `~mne.Evoked.plot_joint`. Like
# `~mne.Evoked.plot_topomap` you can specify times for the scalp topographies
# or you can let the method choose times automatically, as is done here:
l_aud.plot_joint()
###############################################################################
# Global field power (GFP)
# ^^^^^^^^^^^^^^^^^^^^^^^^
#
# Global field power :footcite:`Lehmann1980,Lehmann1984,Murray2008` is,
# generally speaking, a measure of agreement of the signals picked up by all
# sensors across the entire scalp: if all sensors have the same value at a
# given time point, the GFP will be zero at that time point; if the signals
# differ, the GFP will be non-zero at that time point. GFP
# peaks may reflect "interesting" brain activity, warranting further
# investigation. Mathematically, the GFP is the population standard
# deviation across all sensors, calculated separately for every time point.
#
# You can plot the GFP using `evoked.plot(gfp=True) <mne.Evoked.plot>`. The GFP
# trace will be black if ``spatial_colors=True`` and green otherwise. The EEG
# reference does not affect the GFP:
# sphinx_gallery_thumbnail_number=11
for evk in (l_aud, l_vis):
evk.plot(gfp=True, spatial_colors=True, ylim=dict(eeg=[-12, 12]))
###############################################################################
# To plot the GFP by itself you can pass ``gfp='only'`` (this makes it easier
# to read off the GFP data values, because the scale is aligned):
l_aud.plot(gfp='only')
###############################################################################
# As stated above, the GFP is the population standard deviation of the signal
# across channels. To compute it manually, we can leverage the fact that
# `evoked.data <mne.Evoked.data>` is a :class:`NumPy array <numpy.ndarray>`,
# and verify by plotting it using matplotlib commands:
gfp = l_aud.data.std(axis=0, ddof=0)
# Reproducing the MNE-Python plot style seen above
fig, ax = plt.subplots()
ax.plot(l_aud.times, gfp * 1e6, color='lime')
ax.fill_between(l_aud.times, gfp * 1e6, color='lime', alpha=0.2)
ax.set(xlabel='Time (s)', ylabel='GFP (µV)', title='EEG')
###############################################################################
# Analyzing regions of interest (ROIs): averaging across channels
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Since our sample data is responses to left and right auditory and visual
# stimuli, we may want to compare left versus right ROIs. To average across
# channels in a region of interest, we first find the channel indices we want.
# Looking back at the 2D sensor plot above, we might choose the following for
# left and right ROIs:
left = ['eeg17', 'eeg18', 'eeg25', 'eeg26']
right = ['eeg23', 'eeg24', 'eeg34', 'eeg35']
left_ix = mne.pick_channels(l_aud.info['ch_names'], include=left)
right_ix = mne.pick_channels(l_aud.info['ch_names'], include=right)
###############################################################################
# Now we can create a new Evoked with 2 virtual channels (one for each ROI):
roi_dict = dict(left_ROI=left_ix, right_ROI=right_ix)
roi_evoked = mne.channels.combine_channels(l_aud, roi_dict, method='mean')
print(roi_evoked.info['ch_names'])
roi_evoked.plot()
###############################################################################
# Comparing conditions
# ^^^^^^^^^^^^^^^^^^^^
#
# If we wanted to compare our auditory and visual stimuli, a useful function is
# `mne.viz.plot_compare_evokeds`. By default this will combine all channels in
# each evoked object using global field power (or RMS for MEG channels); here
# instead we specify to combine by averaging, and restrict it to a subset of
# channels by passing ``picks``:
evokeds = dict(auditory=l_aud, visual=l_vis)
picks = [f'eeg{n}' for n in range(10, 15)]
mne.viz.plot_compare_evokeds(evokeds, picks=picks, combine='mean')
###############################################################################
# We can also easily get confidence intervals by treating each epoch as a
# separate observation using the `~mne.Epochs.iter_evoked` method. A confidence
# interval across subjects could also be obtained, by passing a list of
# `~mne.Evoked` objects (one per subject) to the
# `~mne.viz.plot_compare_evokeds` function.
evokeds = dict(auditory=list(epochs['auditory/left'].iter_evoked()),
visual=list(epochs['visual/left'].iter_evoked()))
mne.viz.plot_compare_evokeds(evokeds, combine='mean', picks=picks)
###############################################################################
# We can also compare conditions by subtracting one `~mne.Evoked` object from
# another using the `mne.combine_evoked` function (this function also allows
# pooling of epochs without subtraction).
aud_minus_vis = mne.combine_evoked([l_aud, l_vis], weights=[1, -1])
aud_minus_vis.plot_joint()
###############################################################################
# .. warning::
#
# The code above yields an **equal-weighted difference**. If you have
# imbalanced trial numbers, you might want to equalize the number of events
# per condition first by using `epochs.equalize_event_counts()
# <mne.Epochs.equalize_event_counts>` before averaging.
#
#
# Grand averages
# ^^^^^^^^^^^^^^
#
# To compute grand averages across conditions (or subjects), you can pass a
# list of `~mne.Evoked` objects to `mne.grand_average`. The result is another
# `~mne.Evoked` object.
grand_average = mne.grand_average([l_aud, l_vis])
print(grand_average)
###############################################################################
# For combining *conditions* it is also possible to make use of :term:`HED`
# tags in the condition names when selecting which epochs to average. For
# example, we have the condition names:
list(event_dict)
###############################################################################
# We can select the auditory conditions (left and right together) by passing:
epochs['auditory'].average()
###############################################################################
# see :ref:`tut-section-subselect-epochs` for details.
#
# The tutorials :ref:`tut-epochs-class` and :ref:`tut-evoked-class` have many
# more details about working with the `~mne.Epochs` and `~mne.Evoked` classes.
#
# .. _ten_twenty: https://en.wikipedia.org/wiki/10%E2%80%9320_system_(EEG)
#
#
# References
# ----------
# .. footbibliography::
| 0
| 0
| 0
|
e3bdde5ca15cdab92e7ceb01b1e2eb574d075a1d
| 164
|
py
|
Python
|
problemsets/Codeforces/Python/A1095.py
|
juarezpaulino/coderemite
|
a4649d3f3a89d234457032d14a6646b3af339ac1
|
[
"Apache-2.0"
] | null | null | null |
problemsets/Codeforces/Python/A1095.py
|
juarezpaulino/coderemite
|
a4649d3f3a89d234457032d14a6646b3af339ac1
|
[
"Apache-2.0"
] | null | null | null |
problemsets/Codeforces/Python/A1095.py
|
juarezpaulino/coderemite
|
a4649d3f3a89d234457032d14a6646b3af339ac1
|
[
"Apache-2.0"
] | null | null | null |
"""
*
* Author: Juarez Paulino(coderemite)
* Email: juarez.paulino@gmail.com
*
"""
n,k=int(input()),0
s,x,t='',1,input()
while k<n: s+=t[k];k+=x;x+=1
print(s)
| 16.4
| 38
| 0.579268
|
"""
*
* Author: Juarez Paulino(coderemite)
* Email: juarez.paulino@gmail.com
*
"""
n,k=int(input()),0
s,x,t='',1,input()
while k<n: s+=t[k];k+=x;x+=1
print(s)
| 0
| 0
| 0
|
13523c859982e146b74d5680ca538f2bf774a03e
| 2,746
|
py
|
Python
|
axonius_api_client/cli/grp_system/grp_central_core/cmd_restore_from_aws_s3.py
|
rwils83/axonius_api_client
|
1990ed4d1287482a4648dc51edcaa5eb08255f5b
|
[
"MIT"
] | null | null | null |
axonius_api_client/cli/grp_system/grp_central_core/cmd_restore_from_aws_s3.py
|
rwils83/axonius_api_client
|
1990ed4d1287482a4648dc51edcaa5eb08255f5b
|
[
"MIT"
] | 3
|
2021-05-18T14:28:30.000Z
|
2021-09-06T20:01:56.000Z
|
axonius_api_client/cli/grp_system/grp_central_core/cmd_restore_from_aws_s3.py
|
rwils83/axonius_api_client
|
1990ed4d1287482a4648dc51edcaa5eb08255f5b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Command line interface for Axonius API Client."""
from ....tools import json_dump
from ...context import CONTEXT_SETTINGS, click
from ...options import AUTH, add_options
KEY_NAME = click.option(
"--key-name",
"-kn",
"key_name",
help="Key name of file object in [bucket_name] to restore",
required=True,
show_envvar=True,
show_default=True,
)
BUCKET_NAME = click.option(
"--bucket-name",
"-bn",
"bucket_name",
default=None,
help="Name of bucket in S3 to get [key_name] from",
show_envvar=True,
show_default=True,
)
ACCESS_KEY_ID = click.option(
"--access-key-id",
"-aki",
"access_key_id",
default=None,
help="AWS Access Key Id to use to access [bucket_name]",
show_envvar=True,
show_default=True,
)
SECRET_ACCESS_KEY = click.option(
"--secret-access-key",
"-sak",
"secret_access_key",
default=None,
help="AWS Secret Access Key to use to access [bucket_name]",
show_envvar=True,
show_default=True,
)
PRESHARED_KEY = click.option(
"--preshared-key",
"-pk",
"preshared_key",
default=None,
help="Password to use to decrypt [key_name]",
show_envvar=True,
show_default=True,
)
ALLOW_RE_RESTORE = click.option(
"--allow-re-restore/--no-allow-re-restore",
"-arr/-narr",
"allow_re_restore",
help="Restore [key_name] even if it has already been restored",
is_flag=True,
default=False,
show_envvar=True,
show_default=True,
)
DELETE_BACKUPS = click.option(
"--delete-backups/--no-delete-backups",
"-db/-ndb",
"delete_backups",
help="Delete [key_name] from [bucket_name] after restore has finished",
is_flag=True,
default=None,
show_envvar=True,
show_default=True,
)
OPTIONS = [
*AUTH,
ACCESS_KEY_ID,
SECRET_ACCESS_KEY,
PRESHARED_KEY,
ALLOW_RE_RESTORE,
DELETE_BACKUPS,
BUCKET_NAME,
KEY_NAME,
]
EPILOG = """
If values for these options are not provided, they will default to
the settings under Global Settings > Amazon S3 Settings:
\b
* bucket-name: Amazon S3 bucket name
* access-key-id: AWS Access Key Id
* secret-access-key: AWS Secret Access Key
* preshared-key: Backup encryption passphrase
"""
@click.command(
name="restore-from-aws-s3",
context_settings=CONTEXT_SETTINGS,
epilog=EPILOG,
)
@add_options(OPTIONS)
@click.pass_context
def cmd(ctx, url, key, secret, **kwargs):
"""Perform a manual restore of a backup in AWS S3."""
client = ctx.obj.start_client(url=url, key=key, secret=secret)
with ctx.obj.exc_wrap(wraperror=ctx.obj.wraperror):
data = client.system.central_core.restore_from_aws_s3(**kwargs)
click.secho(json_dump(data))
| 23.878261
| 75
| 0.671158
|
# -*- coding: utf-8 -*-
"""Command line interface for Axonius API Client."""
from ....tools import json_dump
from ...context import CONTEXT_SETTINGS, click
from ...options import AUTH, add_options
KEY_NAME = click.option(
"--key-name",
"-kn",
"key_name",
help="Key name of file object in [bucket_name] to restore",
required=True,
show_envvar=True,
show_default=True,
)
BUCKET_NAME = click.option(
"--bucket-name",
"-bn",
"bucket_name",
default=None,
help="Name of bucket in S3 to get [key_name] from",
show_envvar=True,
show_default=True,
)
ACCESS_KEY_ID = click.option(
"--access-key-id",
"-aki",
"access_key_id",
default=None,
help="AWS Access Key Id to use to access [bucket_name]",
show_envvar=True,
show_default=True,
)
SECRET_ACCESS_KEY = click.option(
"--secret-access-key",
"-sak",
"secret_access_key",
default=None,
help="AWS Secret Access Key to use to access [bucket_name]",
show_envvar=True,
show_default=True,
)
PRESHARED_KEY = click.option(
"--preshared-key",
"-pk",
"preshared_key",
default=None,
help="Password to use to decrypt [key_name]",
show_envvar=True,
show_default=True,
)
ALLOW_RE_RESTORE = click.option(
"--allow-re-restore/--no-allow-re-restore",
"-arr/-narr",
"allow_re_restore",
help="Restore [key_name] even if it has already been restored",
is_flag=True,
default=False,
show_envvar=True,
show_default=True,
)
DELETE_BACKUPS = click.option(
"--delete-backups/--no-delete-backups",
"-db/-ndb",
"delete_backups",
help="Delete [key_name] from [bucket_name] after restore has finished",
is_flag=True,
default=None,
show_envvar=True,
show_default=True,
)
OPTIONS = [
*AUTH,
ACCESS_KEY_ID,
SECRET_ACCESS_KEY,
PRESHARED_KEY,
ALLOW_RE_RESTORE,
DELETE_BACKUPS,
BUCKET_NAME,
KEY_NAME,
]
EPILOG = """
If values for these options are not provided, they will default to
the settings under Global Settings > Amazon S3 Settings:
\b
* bucket-name: Amazon S3 bucket name
* access-key-id: AWS Access Key Id
* secret-access-key: AWS Secret Access Key
* preshared-key: Backup encryption passphrase
"""
@click.command(
name="restore-from-aws-s3",
context_settings=CONTEXT_SETTINGS,
epilog=EPILOG,
)
@add_options(OPTIONS)
@click.pass_context
def cmd(ctx, url, key, secret, **kwargs):
"""Perform a manual restore of a backup in AWS S3."""
client = ctx.obj.start_client(url=url, key=key, secret=secret)
with ctx.obj.exc_wrap(wraperror=ctx.obj.wraperror):
data = client.system.central_core.restore_from_aws_s3(**kwargs)
click.secho(json_dump(data))
| 0
| 0
| 0
|
f20965bf4a26da4bfb598e4cc77e1c9347070578
| 496
|
py
|
Python
|
algoanim/sorts/yslow.py
|
Gaming32/Python-AlgoAnim
|
c6df06e263f52d57ca91471830ff8fa14f1d85db
|
[
"MIT"
] | null | null | null |
algoanim/sorts/yslow.py
|
Gaming32/Python-AlgoAnim
|
c6df06e263f52d57ca91471830ff8fa14f1d85db
|
[
"MIT"
] | null | null | null |
algoanim/sorts/yslow.py
|
Gaming32/Python-AlgoAnim
|
c6df06e263f52d57ca91471830ff8fa14f1d85db
|
[
"MIT"
] | null | null | null |
from algoanim.array import Array
from algoanim.sort import Sort
SORT_CLASS = YSlowSort
| 18.37037
| 40
| 0.461694
|
from algoanim.array import Array
from algoanim.sort import Sort
def yslow(A, l, r):
if r - l > 0:
if A[r] < A[l]:
A[l], A[r] = A[r], A[l]
m = (r - l + 1) // 2
for _ in [0, 1]:
A = yslow(A, l, r-m)
A = yslow(A, l + m, r)
A = yslow(A, l + 1, r - 1)
return A
class YSlowSort(Sort):
name = 'YSlow Sort'
def run(self, array: Array) -> None:
yslow(array, 0, len(array) - 1)
SORT_CLASS = YSlowSort
| 307
| 52
| 46
|
5e317ecf395a2349de6f2351a075fe488ace4261
| 22,311
|
py
|
Python
|
tests/test_parser.py
|
shaljam/streaming-form-data
|
65f764fe521c38db681c3ef384d6b998496df79b
|
[
"MIT"
] | null | null | null |
tests/test_parser.py
|
shaljam/streaming-form-data
|
65f764fe521c38db681c3ef384d6b998496df79b
|
[
"MIT"
] | null | null | null |
tests/test_parser.py
|
shaljam/streaming-form-data
|
65f764fe521c38db681c3ef384d6b998496df79b
|
[
"MIT"
] | 1
|
2020-10-13T03:21:46.000Z
|
2020-10-13T03:21:46.000Z
|
from contextlib import contextmanager
from io import BytesIO
import hashlib
from numpy import random
import pytest
from requests_toolbelt import MultipartEncoder
from streaming_form_data import ParseFailedException, StreamingFormDataParser
from streaming_form_data.targets import (
BaseTarget,
FileTarget,
SHA256Target,
ValueTarget,
)
from streaming_form_data.validators import MaxSizeValidator, ValidationError
@contextmanager
# The following tests have been added from tornado's
# MultipartFormDataTestCase
# https://github.com/tornadoweb/tornado/blob/master/tornado/test/httputil_test.py
| 23.836538
| 81
| 0.640626
|
from contextlib import contextmanager
from io import BytesIO
import hashlib
from numpy import random
import pytest
from requests_toolbelt import MultipartEncoder
from streaming_form_data import ParseFailedException, StreamingFormDataParser
from streaming_form_data.targets import (
BaseTarget,
FileTarget,
SHA256Target,
ValueTarget,
)
from streaming_form_data.validators import MaxSizeValidator, ValidationError
@contextmanager
def local_seed(seed):
state = random.get_state()
try:
random.seed(seed)
yield
finally:
random.set_state(state)
def get_random_bytes(size, seed):
with local_seed(seed):
return random.bytes(size)
def open_dataset(filename):
if filename == 'file.txt':
filedata = b'this is a txt file\r\n' * 10
elif filename == 'image-600x400.png':
filedata = get_random_bytes(1780, 600)
elif filename == 'image-2560x1600.png':
filedata = get_random_bytes(11742, 2560)
elif filename == 'image-500k.png':
filedata = get_random_bytes(437814, 500)
elif filename == 'image-high-res.jpg':
filedata = get_random_bytes(9450866, 945)
elif filename == 'empty.html':
filedata = b''
elif filename == 'hyphen-hyphen.txt':
filedata = b'--'
elif filename == 'LF.txt':
filedata = b'\n'
elif filename == 'CRLF.txt':
filedata = b'\r\n'
elif filename == '1M.dat':
filedata = get_random_bytes(1024 * 1024, 1024)
elif filename == '1M-1.dat':
filedata = get_random_bytes(1024 * 1024 - 1, 1024 - 1)
elif filename == '1M+1.dat':
filedata = get_random_bytes(1024 * 1024 + 1, 1024 + 1)
else:
raise Exception('Unknown file name: ' + filename)
return BytesIO(filedata)
def encoded_dataset(filename):
with open_dataset(filename) as dataset_:
fields = {filename: (filename, dataset_, 'text/plain')}
encoder = MultipartEncoder(fields=fields)
return (encoder.content_type, encoder.to_string())
def test_smoke():
encoder = MultipartEncoder(fields={'name': 'hello'})
parser = StreamingFormDataParser(
headers={'Content-Type': encoder.content_type}
)
parser.data_received(encoder.to_string())
def test_basic_single():
target = ValueTarget()
encoder = MultipartEncoder(fields={'value': 'hello world'})
parser = StreamingFormDataParser(
headers={'Content-Type': encoder.content_type}
)
parser.register('value', target)
parser.data_received(encoder.to_string())
assert target.value == b'hello world'
assert target._started
assert target._finished
def test_case_insensitive_content_type():
content_type_header = 'Content-Type'
for header_key in (
content_type_header,
content_type_header.lower(),
content_type_header.upper(),
'cOnTeNt-tYPe',
):
target = ValueTarget()
encoder = MultipartEncoder(fields={'value': 'hello world'})
parser = StreamingFormDataParser(
headers={header_key: encoder.content_type}
)
parser.register('value', target)
parser.data_received(encoder.to_string())
assert target.value == b'hello world'
def test_missing_content_type():
with pytest.raises(ParseFailedException):
StreamingFormDataParser({})
with pytest.raises(ParseFailedException):
StreamingFormDataParser({'key': 'value'})
def test_incorrect_content_type():
for value in (
'multipart/mixed; boundary=1234',
'multipart/form-data',
'multipart/form-data; delimiter=1234',
):
with pytest.raises(ParseFailedException):
StreamingFormDataParser({'Content-Type': value})
def test_basic_multiple():
first = ValueTarget()
second = ValueTarget()
third = ValueTarget()
encoder = MultipartEncoder(
fields={'first': 'foo', 'second': 'bar', 'third': 'baz'}
)
parser = StreamingFormDataParser(
headers={'Content-Type': encoder.content_type}
)
parser.register('first', first)
parser.register('second', second)
parser.register('third', third)
parser.data_received(encoder.to_string())
assert first.value == b'foo'
assert second.value == b'bar'
assert third.value == b'baz'
def test_chunked_single():
expected_value = 'hello world'
target = ValueTarget()
encoder = MultipartEncoder(fields={'value': expected_value})
body = encoder.to_string()
parser = StreamingFormDataParser(
headers={'Content-Type': encoder.content_type}
)
parser.register('value', target)
index = body.index(b'world')
parser.data_received(body[:index])
parser.data_received(body[index:])
assert target.value == expected_value.encode('utf-8')
def test_chunked_multiple():
expected_first_value = 'foo' * 1000
expected_second_value = 'bar' * 1000
expected_third_value = 'baz' * 1000
first = ValueTarget()
second = ValueTarget()
third = ValueTarget()
encoder = MultipartEncoder(
fields={
'first': expected_first_value,
'second': expected_second_value,
'third': expected_third_value,
}
)
body = encoder.to_string()
parser = StreamingFormDataParser(
headers={'Content-Type': encoder.content_type}
)
parser.register('first', first)
parser.register('second', second)
parser.register('third', third)
chunks = []
size = 100
while len(body):
chunks.append(body[:size])
body = body[size:]
for chunk in chunks:
parser.data_received(chunk)
assert first.value == expected_first_value.encode('utf-8')
assert second.value == expected_second_value.encode('utf-8')
assert third.value == expected_third_value.encode('utf-8')
def test_break_chunk_at_boundary():
expected_first_value = 'hello' * 500
expected_second_value = 'hello' * 500
first = ValueTarget()
second = ValueTarget()
encoder = MultipartEncoder(
fields={'first': 'hello' * 500, 'second': 'hello' * 500}
)
body = encoder.to_string()
boundary = encoder.boundary.encode('utf-8')
parser = StreamingFormDataParser(
headers={'Content-Type': encoder.content_type}
)
parser.register('first', first)
parser.register('second', second)
index = body[50:].index(boundary) + 5
parser.data_received(body[:index])
parser.data_received(body[index:])
assert first.value == expected_first_value.encode('utf-8')
assert second.value == expected_second_value.encode('utf-8')
def test_file_content_single():
filenames = (
'file.txt',
'image-600x400.png',
'image-2560x1600.png',
'empty.html',
'hyphen-hyphen.txt',
'LF.txt',
'CRLF.txt',
'1M.dat',
'1M-1.dat',
'1M+1.dat',
)
for filename in filenames:
with open_dataset(filename) as dataset_:
expected_value = dataset_.read()
content_type, body = encoded_dataset(filename)
target = ValueTarget()
parser = StreamingFormDataParser(
headers={'Content-Type': content_type}
)
parser.register(filename, target)
parser.data_received(body)
assert target.value == expected_value
def test_file_content_multiple():
with open_dataset('file.txt') as dataset_:
expected_value = dataset_.read()
content_type, body = encoded_dataset('file.txt')
txt = ValueTarget()
parser = StreamingFormDataParser(headers={'Content-Type': content_type})
parser.register('file.txt', txt)
size = 50
chunks = []
while body:
chunks.append(body[:size])
body = body[size:]
for chunk in chunks:
parser.data_received(chunk)
assert txt.value == expected_value
def test_file_content_varying_chunk_size():
with open_dataset('file.txt') as dataset_:
expected_value = dataset_.read()
content_type, body = encoded_dataset('file.txt')
for index in range(len(body)):
txt = ValueTarget()
parser = StreamingFormDataParser(
headers={'Content-Type': content_type}
)
parser.register('file.txt', txt)
parser.data_received(body[:index])
parser.data_received(body[index:])
assert txt.value == expected_value
def test_mixed_content_varying_chunk_size():
with open_dataset('file.txt') as dataset_:
expected_value = dataset_.read()
with open_dataset('file.txt') as dataset_:
fields = {
'name': 'hello world',
'age': '10',
'cv.txt': ('file.txt', dataset_, 'text/plain'),
}
encoder = MultipartEncoder(fields=fields)
body = encoder.to_string()
content_type = encoder.content_type
for index in range(len(body)):
name = ValueTarget()
age = ValueTarget()
cv = ValueTarget()
parser = StreamingFormDataParser(
headers={'Content-Type': content_type}
)
parser.register('name', name)
parser.register('age', age)
parser.register('cv.txt', cv)
parser.data_received(body[:index])
parser.data_received(body[index:])
assert name.value == b'hello world'
assert age.value == b'10'
assert cv.value == expected_value
def test_parameter_contains_crlf():
target = ValueTarget()
encoder = MultipartEncoder(fields={'value': 'hello\r\nworld'})
parser = StreamingFormDataParser(
headers={'Content-Type': encoder.content_type}
)
parser.register('value', target)
parser.data_received(encoder.to_string())
assert target.value == b'hello\r\nworld'
def test_parameter_ends_with_crlf():
target = ValueTarget()
encoder = MultipartEncoder(fields={'value': 'hello\r\n'})
parser = StreamingFormDataParser(
headers={'Content-Type': encoder.content_type}
)
parser.register('value', target)
parser.data_received(encoder.to_string())
assert target.value == b'hello\r\n'
def test_parameter_starts_with_crlf():
target = ValueTarget()
encoder = MultipartEncoder(fields={'value': '\r\nworld'})
parser = StreamingFormDataParser(
headers={'Content-Type': encoder.content_type}
)
parser.register('value', target)
parser.data_received(encoder.to_string())
assert target.value == b'\r\nworld'
def test_parameter_contains_part_of_delimiter():
data = b'''\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo
--123
--1234--'''.replace(
b'\n', b'\r\n'
)
target = ValueTarget()
parser = StreamingFormDataParser(
headers={'Content-Type': 'multipart/form-data; boundary=1234'}
)
parser.register('files', target)
parser.data_received(data)
assert target.multipart_filename == 'ab.txt'
assert target.value == b'Foo\r\n--123'
assert target._started
assert target._finished
def test_multiple_files():
txt_filename = 'file.txt'
png_filename = 'image-600x400.png'
with open_dataset(txt_filename) as dataset_:
expected_txt = dataset_.read()
with open_dataset(png_filename) as dataset_:
expected_png = dataset_.read()
txt_target = ValueTarget()
png_target = ValueTarget()
with open_dataset(txt_filename) as txt_file, open_dataset(
png_filename
) as png_file:
encoder = MultipartEncoder(
fields={
txt_filename: (txt_filename, txt_file, 'application/plain'),
png_filename: (png_filename, png_file, 'image/png'),
}
)
parser = StreamingFormDataParser(
headers={'Content-Type': encoder.content_type}
)
parser.register(txt_filename, txt_target)
parser.register(png_filename, png_target)
parser.data_received(encoder.to_string())
assert txt_target.value == expected_txt
assert png_target.value == expected_png
def test_large_file():
for filename in [
'image-500k.png',
'image-2560x1600.png',
'image-600x400.png',
'image-high-res.jpg',
]:
with open_dataset(filename) as dataset_:
expected_value = dataset_.read()
content_type, body = encoded_dataset(filename)
target = ValueTarget()
parser = StreamingFormDataParser(
headers={'Content-Type': content_type}
)
parser.register(filename, target)
parser.data_received(body)
assert target.value == expected_value
# The following tests have been added from tornado's
# MultipartFormDataTestCase
# https://github.com/tornadoweb/tornado/blob/master/tornado/test/httputil_test.py
def test_file_upload():
data = b'''\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo
--1234--'''.replace(
b'\n', b'\r\n'
)
target = ValueTarget()
parser = StreamingFormDataParser(
headers={'Content-Type': 'multipart/form-data; boundary=1234'}
)
parser.register('files', target)
parser.data_received(data)
assert target.multipart_filename == 'ab.txt'
assert target.value == b'Foo'
assert target._started
assert target._finished
def test_unquoted_names():
data = b'''\
--1234
Content-Disposition: form-data; name=files; filename=ab.txt
Foo
--1234--'''.replace(
b'\n', b'\r\n'
)
target = ValueTarget()
parser = StreamingFormDataParser(
headers={'Content-Type': 'multipart/form-data; boundary=1234'}
)
parser.register('files', target)
parser.data_received(data)
assert target.value == b'Foo'
def test_special_filenames():
filenames = [
'a;b.txt',
'a"b.txt',
'a";b.txt',
'a;"b.txt',
'a";";.txt',
'a\\"b.txt',
'a\\b.txt',
]
for filename in filenames:
data = (
'''\
--1234
Content-Disposition: form-data; name=files; filename={}
Foo
--1234--'''.format(
filename
)
.replace('\n', '\r\n')
.encode('utf-8')
)
target = ValueTarget()
parser = StreamingFormDataParser(
headers={'Content-Type': 'multipart/form-data; boundary=1234'}
)
parser.register('files', target)
parser.data_received(data)
assert target.value == b'Foo'
def test_boundary_starts_and_ends_with_quotes():
data = b'''\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo
--1234--'''.replace(
b'\n', b'\r\n'
)
target = ValueTarget()
parser = StreamingFormDataParser(
headers={'Content-Type': 'multipart/form-data; boundary="1234"'}
)
parser.register('files', target)
parser.data_received(data)
assert target.multipart_filename == 'ab.txt'
assert target.value == b'Foo'
def test_missing_headers():
data = '''\
--1234
Foo
--1234--'''.replace(
'\n', '\r\n'
).encode(
'utf-8'
)
target = ValueTarget()
parser = StreamingFormDataParser(
headers={'Content-Type': 'multipart/form-data; boundary=1234'}
)
parser.register('files', target)
parser.data_received(data)
assert target.value == b''
def test_invalid_content_disposition():
data = b'''\
--1234
Content-Disposition: invalid; name="files"; filename="ab.txt"
Foo
--1234--'''.replace(
b'\n', b'\r\n'
)
target = ValueTarget()
parser = StreamingFormDataParser(
headers={'Content-Type': 'multipart/form-data; boundary=1234'}
)
parser.register('files', target)
with pytest.raises(ParseFailedException):
parser.data_received(data)
assert target.value == b''
def test_without_name_parameter():
data = b'''\
--1234
Content-Disposition: form-data; filename="ab.txt"
Foo
--1234--'''.replace(
b'\n', b'\r\n'
)
target = ValueTarget()
parser = StreamingFormDataParser(
headers={'Content-Type': 'multipart/form-data; boundary=1234'}
)
parser.register('files', target)
parser.data_received(data)
assert target.value == b''
def test_data_after_final_boundary():
data = b'''\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo
--1234--
'''.replace(
b'\n', b'\r\n'
)
target = ValueTarget()
parser = StreamingFormDataParser(
headers={'Content-Type': 'multipart/form-data; boundary=1234'}
)
parser.register('files', target)
parser.data_received(data)
assert target.value == b'Foo'
def test_register_after_data_received():
encoder = MultipartEncoder(fields={'name': 'hello'})
parser = StreamingFormDataParser(
headers={'Content-Type': encoder.content_type}
)
parser.data_received(encoder.to_string())
with pytest.raises(ParseFailedException):
parser.register('name', ValueTarget())
def test_missing_filename_directive():
data = b'''\
--1234
Content-Disposition: form-data; name="files"
Foo
--1234--
'''.replace(
b'\n', b'\r\n'
)
target = ValueTarget()
assert not target.multipart_filename
parser = StreamingFormDataParser(
headers={'Content-Type': 'multipart/form-data; boundary=1234'}
)
parser.register('files', target)
parser.data_received(data)
assert target.value == b'Foo'
assert not target.multipart_filename
def test_filename_passed_to_target():
filename = 'file.txt'
content_type, body = encoded_dataset(filename)
target = ValueTarget()
assert not target.multipart_filename
parser = StreamingFormDataParser(headers={'Content-Type': content_type})
parser.register(filename, target)
parser.data_received(body)
assert target.multipart_filename == filename
def test_target_raises_exception():
filename = 'file.txt'
content_type, body = encoded_dataset(filename)
class BadTarget(BaseTarget):
def data_received(self, data):
raise ValueError()
target = BadTarget()
parser = StreamingFormDataParser(headers={'Content-Type': content_type})
parser.register(filename, target)
with pytest.raises(ValueError):
parser.data_received(body)
def test_target_exceeds_max_size():
data = b'''\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo
--1234--'''.replace(
b'\n', b'\r\n'
)
target = ValueTarget(validator=MaxSizeValidator(1))
parser = StreamingFormDataParser(
headers={'Content-Type': 'multipart/form-data; boundary=1234'}
)
parser.register('files', target)
with pytest.raises(ValidationError):
parser.data_received(data)
assert target._started
assert target._finished
def test_file_target_exceeds_max_size():
data = b'''\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo
--1234--'''.replace(
b'\n', b'\r\n'
)
target = FileTarget('/tmp/file.txt', validator=MaxSizeValidator(1))
parser = StreamingFormDataParser(
headers={'Content-Type': 'multipart/form-data; boundary=1234'}
)
parser.register('files', target)
with pytest.raises(ValidationError):
parser.data_received(data)
assert target._started
assert target._finished
def test_content_type_passed_to_target():
filename = 'image-600x400.png'
with open_dataset(filename) as dataset_:
expected_data = dataset_.read()
target = ValueTarget()
with open_dataset(filename) as file_:
encoder = MultipartEncoder(
fields={filename: (filename, file_, 'image/png')}
)
parser = StreamingFormDataParser(
headers={'Content-Type': encoder.content_type}
)
parser.register(filename, target)
parser.data_received(encoder.to_string())
assert target.value == expected_data
assert target.multipart_content_type == 'image/png'
def test_multiple_targets():
filename = 'image-600x400.png'
with open_dataset(filename) as dataset_:
expected_data = dataset_.read()
value_target = ValueTarget()
sha256_target = SHA256Target()
with open_dataset(filename) as file_:
encoder = MultipartEncoder(
fields={filename: (filename, file_, 'image/png')}
)
parser = StreamingFormDataParser(
headers={'Content-Type': encoder.content_type}
)
parser.register(filename, value_target)
parser.register(filename, sha256_target)
assert not value_target.value
assert sha256_target.value == hashlib.sha256(b'').hexdigest()
parser.data_received(encoder.to_string())
assert value_target.value == expected_data
assert sha256_target.value == hashlib.sha256(expected_data).hexdigest()
def test_extra_headers():
# example from https://tools.ietf.org/html/rfc2388
data = b'''\
--1234
Content-Disposition: form-data; name="files"
Content-Type: text/plain;charset=windows-1250
Content-Transfer-Encoding: quoted-printable
Joe owes =80100.
--1234--'''.replace(
b'\n', b'\r\n'
)
target = ValueTarget()
parser = StreamingFormDataParser(
headers={'Content-Type': 'multipart/form-data; boundary=1234'}
)
parser.register('files', target)
parser.data_received(data)
assert target.value == b'Joe owes =80100.'
def test_case_insensitive_content_disposition_header():
content_disposition_header = 'Content-Disposition'
for header in (
content_disposition_header,
content_disposition_header.lower(),
content_disposition_header.upper(),
):
data = b'''\
--1234
{header}: form-data; name="files"; filename="ab.txt"
Foo
--1234--'''.replace(
b'\n', b'\r\n'
).replace(
b'{header}', header.encode('utf-8')
)
target = ValueTarget()
parser = StreamingFormDataParser(
headers={'Content-Type': 'multipart/form-data; boundary=1234'}
)
parser.register('files', target)
parser.data_received(data)
assert target.value == b'Foo'
| 20,716
| 0
| 942
|
6a4411330754e041753c15c87e2436746005b50d
| 1,998
|
py
|
Python
|
oshi/lme.py
|
netgroup/Dreamer-Management-Scripts
|
11fe627ff2fb601f0b1c41c42ae4a6f8a9f5cb21
|
[
"Apache-2.0"
] | null | null | null |
oshi/lme.py
|
netgroup/Dreamer-Management-Scripts
|
11fe627ff2fb601f0b1c41c42ae4a6f8a9f5cb21
|
[
"Apache-2.0"
] | null | null | null |
oshi/lme.py
|
netgroup/Dreamer-Management-Scripts
|
11fe627ff2fb601f0b1c41c42ae4a6f8a9f5cb21
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import re
import sys
import os
from subprocess import Popen,PIPE
if __name__ == '__main__':
push_rules()
| 25.615385
| 108
| 0.683183
|
#!/usr/bin/python
import re
import sys
import os
from subprocess import Popen,PIPE
def get_if_index(in_if_name):
output = Popen(['ovs-vsctl find Interface name=%s' % in_if_name], shell=True, stdout=PIPE).communicate()[0]
if output != None and output != "" :
return re.search( r'ofport(.*): (\d*)', output).group(2)
else:
print "Error Port Not Available"
sys.exit(-2)
def add_flow(rule):
output = Popen([rule], shell=True, stdout=PIPE).communicate()[0]
def translate_rule(rule):
# ports reg exp
out_port = re.compile('output:(.*?),')
in_port = re.compile('in_port=(.*?),')
out_port_end = ","
#test if rule has in_port
if 'in_port' in rule and not re.search(in_port, rule):
print "Error Wrong In Port"
sys.exit(-2)
elif 'in_port' in rule and re.search(in_port, rule):
in_if_name = in_port.search(rule).group(1)
in_if_index = get_if_index(in_if_name)
rule = re.sub(in_port, "in_port="+in_if_index+",", rule)
#test if rule has output_port
if 'output' in rule and not re.search(out_port, rule):
#print "output: not followed by comma, retry.."
out_port = re.compile('output:(.*?)\"(\Z)')
out_port_end = "\""
if not re.search(out_port, rule):
print "Error Wrong Output Port"
sys.exit(-2)
out_if_name = out_port.search(rule).group(1)
out_if_index = get_if_index(out_if_name)
rule = re.sub(out_port, "output:"+out_if_index+out_port_end, rule)
elif 'output' in rule and re.search(out_port, rule):
out_if_name = out_port.search(rule).group(1)
out_if_index = get_if_index(out_if_name)
rule = re.sub(out_port, "output:"+out_if_index+out_port_end, rule)
return rule
def push_rules():
path = "lmerules.sh"
if os.path.exists(path) == False:
print "Error Rules File Not Exists"
sys.exit(-2)
filesh = open(path, 'r')
lines = filesh.readlines()
for line in lines:
if "start" not in line and "end" not in line:
rule = line[:-1]
rule = translate_rule(rule)
add_flow(rule)
if __name__ == '__main__':
push_rules()
| 1,764
| 0
| 92
|
f2d19c9aaacf2f6a53e0a55c017ae4dcf96f3238
| 3,034
|
py
|
Python
|
taxon/backends/memory.py
|
jdp/taxon
|
822ac9c92d3aa57484e328c99a5af8a8002991d6
|
[
"MIT"
] | 7
|
2015-01-10T07:25:24.000Z
|
2018-05-04T17:47:42.000Z
|
taxon/backends/memory.py
|
jdp/taxon
|
822ac9c92d3aa57484e328c99a5af8a8002991d6
|
[
"MIT"
] | null | null | null |
taxon/backends/memory.py
|
jdp/taxon
|
822ac9c92d3aa57484e328c99a5af8a8002991d6
|
[
"MIT"
] | null | null | null |
import operator
try:
from collections import Counter
except ImportError:
from ._counter import Counter
from .backend import Backend
from ..query import Query
| 31.604167
| 86
| 0.542518
|
import operator
try:
from collections import Counter
except ImportError:
from ._counter import Counter
from .backend import Backend
from ..query import Query
class MemoryBackend(Backend):
def __init__(self):
self.empty()
def tag_items(self, tag, *items):
if tag not in self.tags:
self.tags[tag] = 0
self.tagged[tag] = set()
new_items = set(items) - self.tagged[tag]
if len(new_items) == 0:
return []
self.tags[tag] += len(new_items)
self.tagged[tag].update(set(new_items))
self.items += Counter(new_items)
return list(new_items)
def untag_items(self, tag, *items):
old_items = set(items) & self.tagged[tag]
if len(old_items) == 0:
return []
self.tags[tag] -= len(old_items)
self.tagged[tag] -= set(old_items)
self.items -= Counter(old_items)
return list(old_items)
def remove_items(self, *items):
removed = []
for item in set(items):
if item not in self.items:
continue
for tag in self.all_tags():
if item not in self.tagged[tag]:
continue
self.tagged[tag] -= set([item])
self.tags[tag] -= 1
self.items[item] -= 1
removed.append(item)
return removed
def all_tags(self):
return [tag[0] for tag in self.tags.items() if tag[1] > 0]
def all_items(self):
return [item[0] for item in self.items.items() if item[1] > 0]
def query(self, q):
if isinstance(q, Query):
fn, args = q.freeze()
return self._raw_query(fn, args)
elif isinstance(q, tuple):
fn, args = q
return self._raw_query(fn, args)
else:
raise ValueError
def _raw_query(self, fn, args):
if fn == 'tag':
if len(args) == 1:
return None, self.tagged.get(args[0], [])
else:
groups = [self.tagged.get(tag, []) for tag in args]
return None, reduce(operator.add, groups)
elif fn == 'and':
results = [set(items) for _, items in [self._raw_query(*a) for a in args]]
return None, reduce(operator.__and__, results)
elif fn == 'or':
results = [set(items) for _, items in [self._raw_query(*a) for a in args]]
return None, reduce(operator.__or__, results)
elif fn == 'not':
results = [set(items) for _, items in [self._raw_query(*a) for a in args]]
results.insert(0, set(self.all_items()))
return None, reduce(operator.sub, results)
else:
raise ValueError
def empty(self):
self.tagged = dict()
self.items = Counter()
self.tags = Counter()
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return u"%s()" % (self.__class__.__name__)
| 2,539
| 8
| 319
|
0ac474e1e7533b08c8837044f8fe017e777d82e6
| 364
|
py
|
Python
|
experiment_data_and_analysis/AccuracyAnalysis/mayavi_example.py
|
JakeFountain/Spooky
|
e0a2d4ea878467d6bc7f385220f29c85fd65a190
|
[
"Apache-2.0"
] | null | null | null |
experiment_data_and_analysis/AccuracyAnalysis/mayavi_example.py
|
JakeFountain/Spooky
|
e0a2d4ea878467d6bc7f385220f29c85fd65a190
|
[
"Apache-2.0"
] | null | null | null |
experiment_data_and_analysis/AccuracyAnalysis/mayavi_example.py
|
JakeFountain/Spooky
|
e0a2d4ea878467d6bc7f385220f29c85fd65a190
|
[
"Apache-2.0"
] | 2
|
2019-03-12T02:06:32.000Z
|
2019-05-12T15:29:41.000Z
|
from mayavi import mlab
n_mer, n_long = 6, 11
dphi = np.pi / 1000.0
phi = np.arange(0.0, 2 * pi + 0.5 * dphi, dphi)
mu = phi * n_mer
x = np.cos(mu) * (1 + np.cos(n_long * mu / n_mer) * 0.5)
y = np.sin(mu) * (1 + np.cos(n_long * mu / n_mer) * 0.5)
z = np.sin(n_long * mu / n_mer) * 0.5
t = np.sin(mu)
mlab.plot3d(x, y, z, t, tube_radius=0.025, colormap='Spectral')
| 33.090909
| 63
| 0.590659
|
from mayavi import mlab
n_mer, n_long = 6, 11
dphi = np.pi / 1000.0
phi = np.arange(0.0, 2 * pi + 0.5 * dphi, dphi)
mu = phi * n_mer
x = np.cos(mu) * (1 + np.cos(n_long * mu / n_mer) * 0.5)
y = np.sin(mu) * (1 + np.cos(n_long * mu / n_mer) * 0.5)
z = np.sin(n_long * mu / n_mer) * 0.5
t = np.sin(mu)
mlab.plot3d(x, y, z, t, tube_radius=0.025, colormap='Spectral')
| 0
| 0
| 0
|
1274f1dac29488da85dd79cccab14802e253602b
| 689
|
py
|
Python
|
src/absolute_uri.py
|
nigma/djutil
|
85b7c21acbcd4d3e8cef4246cdb5049cbede8748
|
[
"MIT"
] | 1
|
2015-04-16T14:43:25.000Z
|
2015-04-16T14:43:25.000Z
|
src/absolute_uri.py
|
nigma/djutil
|
85b7c21acbcd4d3e8cef4246cdb5049cbede8748
|
[
"MIT"
] | null | null | null |
src/absolute_uri.py
|
nigma/djutil
|
85b7c21acbcd4d3e8cef4246cdb5049cbede8748
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from django.contrib.sites.models import get_current_site
| 27.56
| 66
| 0.718433
|
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from django.contrib.sites.models import get_current_site
def add_domain(path, domain, secure=False):
if path.startswith("http://") or path.startswith("https://"):
return path
domain = ("https://" if secure else "http://") + domain
return urljoin(domain, path)
def build_site_url(path, request=None):
current_site = get_current_site(request=request)
domain = current_site.domain
secure = request.is_secure() if request is not None else False
return add_domain(path, domain, secure=secure)
| 423
| 0
| 46
|
f8369411bcf3950b58aef28e018e3ef6c0a4a4f1
| 367
|
py
|
Python
|
revolver/group.py
|
michaelcontento/revolver
|
bbae82df0804ff2708a82fd0016b776664ee2deb
|
[
"Apache-2.0"
] | 1
|
2015-05-16T17:55:26.000Z
|
2015-05-16T17:55:26.000Z
|
revolver/group.py
|
michaelcontento/revolver
|
bbae82df0804ff2708a82fd0016b776664ee2deb
|
[
"Apache-2.0"
] | null | null | null |
revolver/group.py
|
michaelcontento/revolver
|
bbae82df0804ff2708a82fd0016b776664ee2deb
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
from cuisine import group_check as get
from cuisine import group_create as create
from cuisine import group_ensure as ensure
from cuisine import group_user_add as user_add
from cuisine import group_user_check as user_check
from cuisine import group_user_ensure as user_ensure
| 33.363636
| 64
| 0.836512
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
from cuisine import group_check as get
from cuisine import group_create as create
from cuisine import group_ensure as ensure
from cuisine import group_user_add as user_add
from cuisine import group_user_check as user_check
from cuisine import group_user_ensure as user_ensure
| 0
| 0
| 0
|
29951beb8b1b68a35ede1131be3ff6ad070a45f5
| 3,777
|
py
|
Python
|
evaluation/consuming_service_generator.py
|
Peniac/NSE-CSC
|
7bb313b59f3b92c1349b03bc11e8fb17512e1518
|
[
"Apache-2.0"
] | null | null | null |
evaluation/consuming_service_generator.py
|
Peniac/NSE-CSC
|
7bb313b59f3b92c1349b03bc11e8fb17512e1518
|
[
"Apache-2.0"
] | null | null | null |
evaluation/consuming_service_generator.py
|
Peniac/NSE-CSC
|
7bb313b59f3b92c1349b03bc11e8fb17512e1518
|
[
"Apache-2.0"
] | null | null | null |
'''
Definitions
1. consuming service: is a network service that requires the consumption of additional VNF(s) that pertain to a different service, i.e., providing services.
2. CSC engaged VNFs (w.r.t. the consuming service): are exactly two distinct VNFs. One VNF forwards the traffic to the receiver CSC engaged VNF of the providing service, whereas the other VNF receives the traffic, which is now processed by (a subset of) the providing service.
General assumptions
1. Every network service is modelled as a directed graph. Nodes represent VNFs, and edges represent virtual links. VNFs require CPU, and edges require bandwidth.
2. We consider sequential services, e.g., 'A->B->C' is OK, whereas 'A->B,A->C,B->C' is NOT OK
Environment
1. CPU in [0.72, 1.44, 2.16, 2.88, 3.6GHz] (randomly)
2. bandwidth in [10Mbps, 200Mbps] (randomly)
3. length (i.e., number of VNFs per service) in [3,8] (randomly)
4. The two CSC engaged VNFs are randomly sampled (without replacement)
5. Duration in [3,10] time intervals
'''
# dependencies
import networkx as nx
import random
def initializeConsumingService(providing_services, service_index, time):
'''
Function that generates a consuming service, in the form of a graph. The consuming service pairs with a providing service, and the corresponding consuming service graph adds VNFs and edges for the CSC, also.
'''
# the providing service that the consuming service will pair with
providing_service = random.choice(providing_services)
# list of possible VNF CPU requirements
CPUs = [0.72, 1.44, 2.16, 2.88, 3.6]
CPUs = [round(cpu/7.2,2) for cpu in CPUs]
# the length of the network service
service_length = random.randint(3,8)
# list of CSC VNF indices
first_CSC_engaged_VNF = random.choice(range(service_length-1))
CSC_engaged_VNFs = [first_CSC_engaged_VNF, first_CSC_engaged_VNF+1]
# create empty directional graph
G = nx.DiGraph(id = service_index, type = 'consuming', provider_pair = providing_service.graph['id'], expires_in = time + random.randint(3,10))
# populate the consuming service graph with VNF nodes
for j in range(service_length):
if j not in CSC_engaged_VNFs:
VNF_type = 'VNF'
else:
VNF_type = 'C_CSC_VNF'
G.add_node("C{0}VNF{1}".format(service_index,j), type = VNF_type, cpu = random.choice(CPUs), serid = service_index, sertype = 'consumer')
nodes = list(G.nodes())
# add edges between VNFs sequentially
for j in range(service_length-1):
G.add_edge(nodes[j],nodes[j+1], source = nodes[j], dest= nodes[j+1], bandwidth = random.randrange(10,100), sertype = 'consuming')
# the corresponding CSC VNF indices of the providing service
CSC_engaged_VNFs_provider = [n for n in providing_service.nodes if providing_service.nodes[n]['type'] == 'CSC_VNF']
# add the CSC nodes of the providing service to the consuming service
for j in CSC_engaged_VNFs_provider:
G.add_node(j, type = 'P_CSC_VNF', sertype = 'provider')
# add the 2 CSC-engaged edges
# from consuming to providing
G.add_edge(nodes[CSC_engaged_VNFs[0]], CSC_engaged_VNFs_provider[0], source = nodes[CSC_engaged_VNFs[0]], dest = CSC_engaged_VNFs_provider[0], bandwidth = random.randrange(10,100), sertype = 'providing')
# from providing to consuming
if len(CSC_engaged_VNFs_provider) == 2:
G.add_edge(CSC_engaged_VNFs_provider[1], nodes[CSC_engaged_VNFs[1]], source = CSC_engaged_VNFs_provider[1], dest = nodes[CSC_engaged_VNFs[1]], bandwidth = random.randrange(10,100), sertype = 'providing')
else:
G.add_edge(CSC_engaged_VNFs_provider[0], nodes[CSC_engaged_VNFs[1]], source = CSC_engaged_VNFs_provider[0], dest = nodes[CSC_engaged_VNFs[1]], bandwidth = random.randrange(10,100), sertype = 'providing')
return G
| 53.197183
| 278
| 0.735769
|
'''
Definitions
1. consuming service: is a network service that requires the consumption of additional VNF(s) that pertain to a different service, i.e., providing services.
2. CSC engaged VNFs (w.r.t. the consuming service): are exactly two distinct VNFs. One VNF forwards the traffic to the receiver CSC engaged VNF of the providing service, whereas the other VNF receives the traffic, which is now processed by (a subset of) the providing service.
General assumptions
1. Every network service is modelled as a directed graph. Nodes represent VNFs, and edges represent virtual links. VNFs require CPU, and edges require bandwidth.
2. We consider sequential services, e.g., 'A->B->C' is OK, whereas 'A->B,A->C,B->C' is NOT OK
Environment
1. CPU in [0.72, 1.44, 2.16, 2.88, 3.6GHz] (randomly)
2. bandwidth in [10Mbps, 200Mbps] (randomly)
3. length (i.e., number of VNFs per service) in [3,8] (randomly)
4. The two CSC engaged VNFs are randomly sampled (without replacement)
5. Duration in [3,10] time intervals
'''
# dependencies
import networkx as nx
import random
def initializeConsumingService(providing_services, service_index, time):
'''
Function that generates a consuming service, in the form of a graph. The consuming service pairs with a providing service, and the corresponding consuming service graph adds VNFs and edges for the CSC, also.
'''
# the providing service that the consuming service will pair with
providing_service = random.choice(providing_services)
# list of possible VNF CPU requirements
CPUs = [0.72, 1.44, 2.16, 2.88, 3.6]
CPUs = [round(cpu/7.2,2) for cpu in CPUs]
# the length of the network service
service_length = random.randint(3,8)
# list of CSC VNF indices
first_CSC_engaged_VNF = random.choice(range(service_length-1))
CSC_engaged_VNFs = [first_CSC_engaged_VNF, first_CSC_engaged_VNF+1]
# create empty directional graph
G = nx.DiGraph(id = service_index, type = 'consuming', provider_pair = providing_service.graph['id'], expires_in = time + random.randint(3,10))
# populate the consuming service graph with VNF nodes
for j in range(service_length):
if j not in CSC_engaged_VNFs:
VNF_type = 'VNF'
else:
VNF_type = 'C_CSC_VNF'
G.add_node("C{0}VNF{1}".format(service_index,j), type = VNF_type, cpu = random.choice(CPUs), serid = service_index, sertype = 'consumer')
nodes = list(G.nodes())
# add edges between VNFs sequentially
for j in range(service_length-1):
G.add_edge(nodes[j],nodes[j+1], source = nodes[j], dest= nodes[j+1], bandwidth = random.randrange(10,100), sertype = 'consuming')
# the corresponding CSC VNF indices of the providing service
CSC_engaged_VNFs_provider = [n for n in providing_service.nodes if providing_service.nodes[n]['type'] == 'CSC_VNF']
# add the CSC nodes of the providing service to the consuming service
for j in CSC_engaged_VNFs_provider:
G.add_node(j, type = 'P_CSC_VNF', sertype = 'provider')
# add the 2 CSC-engaged edges
# from consuming to providing
G.add_edge(nodes[CSC_engaged_VNFs[0]], CSC_engaged_VNFs_provider[0], source = nodes[CSC_engaged_VNFs[0]], dest = CSC_engaged_VNFs_provider[0], bandwidth = random.randrange(10,100), sertype = 'providing')
# from providing to consuming
if len(CSC_engaged_VNFs_provider) == 2:
G.add_edge(CSC_engaged_VNFs_provider[1], nodes[CSC_engaged_VNFs[1]], source = CSC_engaged_VNFs_provider[1], dest = nodes[CSC_engaged_VNFs[1]], bandwidth = random.randrange(10,100), sertype = 'providing')
else:
G.add_edge(CSC_engaged_VNFs_provider[0], nodes[CSC_engaged_VNFs[1]], source = CSC_engaged_VNFs_provider[0], dest = nodes[CSC_engaged_VNFs[1]], bandwidth = random.randrange(10,100), sertype = 'providing')
return G
| 0
| 0
| 0
|
553ba126daefa4225f49cd1ed880ebdf7092f4f9
| 428
|
py
|
Python
|
quickstart/models.py
|
gladsonvm/drf-nested
|
e53cfa116c76ce573207401035edfac6a46cd1be
|
[
"MIT"
] | null | null | null |
quickstart/models.py
|
gladsonvm/drf-nested
|
e53cfa116c76ce573207401035edfac6a46cd1be
|
[
"MIT"
] | null | null | null |
quickstart/models.py
|
gladsonvm/drf-nested
|
e53cfa116c76ce573207401035edfac6a46cd1be
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
| 25.176471
| 70
| 0.752336
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class UserProfile(models.Model):
add_info = models.CharField(null=True, blank=True, max_length=100)
user = models.OneToOneField(User)
class nestedmodel(models.Model):
info = models.CharField(null=True, blank=True, max_length=100)
user = models.ForeignKey(User)
profile = models.ForeignKey(UserProfile)
| 0
| 278
| 46
|
f2c87a50078598a9e64acc7a01d1bb775a171f62
| 3,915
|
py
|
Python
|
check_backup_age.py
|
martialblog/check_backup_age
|
1203a458be6f27ed30fcdb49b1f8c8a80a695782
|
[
"MIT"
] | null | null | null |
check_backup_age.py
|
martialblog/check_backup_age
|
1203a458be6f27ed30fcdb49b1f8c8a80a695782
|
[
"MIT"
] | null | null | null |
check_backup_age.py
|
martialblog/check_backup_age
|
1203a458be6f27ed30fcdb49b1f8c8a80a695782
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import os
import argparse
import re
import datetime
import subprocess
class EXIT():
"""
Exit codes from:
https://docs.icinga.com/latest/en/pluginapi.html
"""
OK = 0
WARN = 1
CRIT = 2
UNKOWN = 3
def commandline(args):
"""
Settings for the commandline arguments.
Returns the parsed arguments.
"""
parser = argparse.ArgumentParser(description='Checks the timestamps for files in a directory.')
parser.add_argument("-p", "--path", required=True,
help="Path to offline backup list file or directory")
parser.add_argument("-w", "--warning",
help="Threshold for warnings in days. Default: 2 Days")
parser.add_argument("-c", "--critical",
help="Threshold for criticals in days. Default: 5 Days")
parser.add_argument("-f", "--format",
help="Format of the date in the file. Default: Y-m-d")
parser.add_argument("-r", "--regex",
help="Regular Expression to extract date from file. Default: [0-9]{4}-[0-9]{2}-[0-9]{2}")
parser.add_argument("-v", "--verbose",
help="Increase output verbosity",
action="store_true")
parser.set_defaults(verbose=False,
critical=5,
warning=2)
return parser.parse_args(args)
def readdata(path):
"""
Checks if the path exists, then reads the file or directory and returns the data.
"""
if not os.path.exists(path):
print('No such path {0}'.format(path))
sys.exit(EXIT.WARN)
if os.path.isfile(path):
with open(path) as f:
data = f.read()
elif os.path.isdir(path):
data = subprocess.check_output(['ls', '--full-time', path])
data = data.decode("utf-8").rstrip('\n')
return data
def extract_dates(data, date_format='%Y-%m-%d', date_regex='[0-9]{4}-[0-9]{2}-[0-9]{2}'):
"""
Extracts dates from a string using regular expressions, then converts the dates to datetime objects and returns a list.
"""
dates = []
regex = re.compile(date_regex)
date_strings = regex.findall(data)
for date_string in date_strings:
dates.append(datetime.datetime.strptime(date_string, date_format).date())
return sorted(dates)
def check_delta(delta, warn, crit):
"""
Checks the category of the calculated delta (OK, WARN, FAIL) and exits the program accordingly.
"""
last_backup = 'Last backup was {0} days ago'.format(delta.days)
isokay = delta.days < warn
iswarn = delta.days >= warn and delta.days < crit
iscrit = delta.days >= crit
if isokay:
print('OK - ' + last_backup)
sys.exit(EXIT.OK)
elif iswarn:
print('WARN - ' + last_backup)
sys.exit(EXIT.WARN)
elif iscrit:
print('CRIT - ' + last_backup)
sys.exit(EXIT.CRIT)
else:
print('UNKNOWN - Not really sure what is happening')
sys.exit(EXIT.UNKOWN)
def calculate_delta(dates):
"""
Calculates how far the gives dates deviate from today's date. Returns a datetime.timedelta object.
"""
today = datetime.datetime.today().date()
delta = 0
for i in range(0, len(dates)):
delta = -(dates[i] - today)
# If there are to dates in the file for example
if not isinstance(delta, datetime.timedelta):
print('UNKNOWN - Probably error while reading the file')
sys.exit(EXIT.UNKOWN)
return delta
if __name__ == "__main__":
args = commandline(sys.argv[1:])
main(args)
| 25.422078
| 123
| 0.603831
|
#!/usr/bin/env python3
import sys
import os
import argparse
import re
import datetime
import subprocess
class EXIT():
"""
Exit codes from:
https://docs.icinga.com/latest/en/pluginapi.html
"""
OK = 0
WARN = 1
CRIT = 2
UNKOWN = 3
def commandline(args):
"""
Settings for the commandline arguments.
Returns the parsed arguments.
"""
parser = argparse.ArgumentParser(description='Checks the timestamps for files in a directory.')
parser.add_argument("-p", "--path", required=True,
help="Path to offline backup list file or directory")
parser.add_argument("-w", "--warning",
help="Threshold for warnings in days. Default: 2 Days")
parser.add_argument("-c", "--critical",
help="Threshold for criticals in days. Default: 5 Days")
parser.add_argument("-f", "--format",
help="Format of the date in the file. Default: Y-m-d")
parser.add_argument("-r", "--regex",
help="Regular Expression to extract date from file. Default: [0-9]{4}-[0-9]{2}-[0-9]{2}")
parser.add_argument("-v", "--verbose",
help="Increase output verbosity",
action="store_true")
parser.set_defaults(verbose=False,
critical=5,
warning=2)
return parser.parse_args(args)
def readdata(path):
"""
Checks if the path exists, then reads the file or directory and returns the data.
"""
if not os.path.exists(path):
print('No such path {0}'.format(path))
sys.exit(EXIT.WARN)
if os.path.isfile(path):
with open(path) as f:
data = f.read()
elif os.path.isdir(path):
data = subprocess.check_output(['ls', '--full-time', path])
data = data.decode("utf-8").rstrip('\n')
return data
def extract_dates(data, date_format='%Y-%m-%d', date_regex='[0-9]{4}-[0-9]{2}-[0-9]{2}'):
"""
Extracts dates from a string using regular expressions, then converts the dates to datetime objects and returns a list.
"""
dates = []
regex = re.compile(date_regex)
date_strings = regex.findall(data)
for date_string in date_strings:
dates.append(datetime.datetime.strptime(date_string, date_format).date())
return sorted(dates)
def check_delta(delta, warn, crit):
"""
Checks the category of the calculated delta (OK, WARN, FAIL) and exits the program accordingly.
"""
last_backup = 'Last backup was {0} days ago'.format(delta.days)
isokay = delta.days < warn
iswarn = delta.days >= warn and delta.days < crit
iscrit = delta.days >= crit
if isokay:
print('OK - ' + last_backup)
sys.exit(EXIT.OK)
elif iswarn:
print('WARN - ' + last_backup)
sys.exit(EXIT.WARN)
elif iscrit:
print('CRIT - ' + last_backup)
sys.exit(EXIT.CRIT)
else:
print('UNKNOWN - Not really sure what is happening')
sys.exit(EXIT.UNKOWN)
def calculate_delta(dates):
"""
Calculates how far the gives dates deviate from today's date. Returns a datetime.timedelta object.
"""
today = datetime.datetime.today().date()
delta = 0
for i in range(0, len(dates)):
delta = -(dates[i] - today)
# If there are to dates in the file for example
if not isinstance(delta, datetime.timedelta):
print('UNKNOWN - Probably error while reading the file')
sys.exit(EXIT.UNKOWN)
return delta
def main(args):
path = str(args.path)
crit = int(args.critical)
warn = int(args.warning)
rdata = readdata(path)
dates = extract_dates(rdata)
delta = calculate_delta(dates)
check_delta(delta=delta, warn=warn, crit=crit)
if __name__ == "__main__":
args = commandline(sys.argv[1:])
main(args)
| 228
| 0
| 23
|
5e8f943f19843c86705229924cd80deefbcb73fd
| 2,170
|
py
|
Python
|
ooobuild/lo/xml/sax/x_fast_token_handler.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/xml/sax/x_fast_token_handler.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/xml/sax/x_fast_token_handler.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.xml.sax
import typing
from abc import abstractmethod
from ...uno.x_interface import XInterface as XInterface_8f010a43
class XFastTokenHandler(XInterface_8f010a43):
"""
interface to translate XML strings to integer tokens.
An instance of this interface can be registered at a XFastParser. It should be able to translate all XML names (element local names, attribute local names and constant attribute values) to integer tokens.
A token value must be greater or equal to zero and less than FastToken.NAMESPACE. If a string identifier is not known to this instance, FastToken.DONTKNOW is returned.
See Also:
`API XFastTokenHandler <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1xml_1_1sax_1_1XFastTokenHandler.html>`_
"""
__ooo_ns__: str = 'com.sun.star.xml.sax'
__ooo_full_ns__: str = 'com.sun.star.xml.sax.XFastTokenHandler'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.xml.sax.XFastTokenHandler'
@abstractmethod
def getTokenFromUTF8(self, Identifier: 'typing.Tuple[int, ...]') -> int:
"""
returns an integer token for the given string
"""
@abstractmethod
def getUTF8Identifier(self, Token: int) -> 'typing.Tuple[int, ...]':
"""
returns an identifier for the given integer token as a byte sequence encoded in UTF-8.
"""
__all__ = ['XFastTokenHandler']
| 40.185185
| 208
| 0.731336
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.xml.sax
import typing
from abc import abstractmethod
from ...uno.x_interface import XInterface as XInterface_8f010a43
class XFastTokenHandler(XInterface_8f010a43):
"""
interface to translate XML strings to integer tokens.
An instance of this interface can be registered at a XFastParser. It should be able to translate all XML names (element local names, attribute local names and constant attribute values) to integer tokens.
A token value must be greater or equal to zero and less than FastToken.NAMESPACE. If a string identifier is not known to this instance, FastToken.DONTKNOW is returned.
See Also:
`API XFastTokenHandler <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1xml_1_1sax_1_1XFastTokenHandler.html>`_
"""
__ooo_ns__: str = 'com.sun.star.xml.sax'
__ooo_full_ns__: str = 'com.sun.star.xml.sax.XFastTokenHandler'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.xml.sax.XFastTokenHandler'
@abstractmethod
def getTokenFromUTF8(self, Identifier: 'typing.Tuple[int, ...]') -> int:
"""
returns an integer token for the given string
"""
@abstractmethod
def getUTF8Identifier(self, Token: int) -> 'typing.Tuple[int, ...]':
"""
returns an identifier for the given integer token as a byte sequence encoded in UTF-8.
"""
__all__ = ['XFastTokenHandler']
| 0
| 0
| 0
|
220fea1a916c09c767cc2366cd7504e5daae93e3
| 5,216
|
py
|
Python
|
cap2/pipeline/short_read/amrs.py
|
nanusefue/CAP2-1
|
670b343ac7629fe0e64e86263ae420b01952f427
|
[
"MIT"
] | 9
|
2020-07-10T15:45:12.000Z
|
2022-01-19T10:44:13.000Z
|
cap2/pipeline/short_read/amrs.py
|
nanusefue/CAP2-1
|
670b343ac7629fe0e64e86263ae420b01952f427
|
[
"MIT"
] | 14
|
2020-06-15T16:04:54.000Z
|
2022-03-12T01:05:47.000Z
|
cap2/pipeline/short_read/amrs.py
|
nanusefue/CAP2-1
|
670b343ac7629fe0e64e86263ae420b01952f427
|
[
"MIT"
] | 5
|
2021-01-05T01:26:48.000Z
|
2022-01-23T11:20:49.000Z
|
import luigi
import subprocess
from os.path import join, dirname, basename
from ..utils.cap_task import CapTask
from ..config import PipelineConfig
from ..utils.conda import CondaPackage
from ..preprocessing.clean_reads import CleanReads
from ..databases.amr_db import GrootDB, MegaResDB, CardDB
| 31.047619
| 96
| 0.568252
|
import luigi
import subprocess
from os.path import join, dirname, basename
from ..utils.cap_task import CapTask
from ..config import PipelineConfig
from ..utils.conda import CondaPackage
from ..preprocessing.clean_reads import CleanReads
from ..databases.amr_db import GrootDB, MegaResDB, CardDB
class GrootAMR(CapTask):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pkg = CondaPackage(
package="groot==1.1.2",
executable="groot",
channel="bioconda",
config_filename=self.config_filename,
)
self.config = PipelineConfig(self.config_filename)
self.out_dir = self.config.out_dir
self.db = GrootDB(config_filename=self.config_filename)
self.reads = CleanReads(
sample_name=self.sample_name,
pe1=self.pe1,
pe2=self.pe2,
config_filename=self.config_filename
)
@classmethod
def _module_name(cls):
return 'groot'
def requires(self):
return self.pkg, self.db, self.reads
@classmethod
def version(cls):
return 'v1.0.0'
@classmethod
def dependencies(cls):
return ['groot==1.1.2', GrootDB, CleanReads]
def output(self):
return {
'alignment': self.get_target('alignment', 'bam'),
}
def _run(self):
align_cmd = f'{self.pkg.bin} align '
align_cmd += f'-i {self.db.groot_index} -f {self.reads.reads[0]},{self.reads.reads[1]} '
align_cmd += f'-p {self.cores} > {self.output()["alignment"].path}'
report_cmd = f'{self.pkg.bin} report -i {self.output()["alignment"].path} '
report_cmd += '--lowCov --plotCov'
rm_cmd = f'rm {self.output()["alignment"].path}'
subprocess.check_call(align_cmd + ' && ' + report_cmd + ' | ' + rm_cmd, shell=True)
class MegaRes(CapTask):
thresh = luigi.FloatParameter(default=80.0)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pkg = CondaPackage(
package="resistome_analyzer",
executable="resistome_analyzer",
channel="bioconda",
config_filename=self.config_filename,
)
self.aligner = CondaPackage(
package="bowtie2",
executable="bowtie2",
channel="bioconda",
config_filename=self.config_filename,
)
self.config = PipelineConfig(self.config_filename)
self.out_dir = self.config.out_dir
self.db = MegaResDB(config_filename=self.config_filename)
self.reads = CleanReads(
sample_name=self.sample_name,
pe1=self.pe1,
pe2=self.pe2,
config_filename=self.config_filename
)
def module_name(self):
return 'megares'
def requires(self):
return self.pkg, self.aligner, self.db, self.reads
def output(self):
mytarget = lambda el: luigi.LocalTarget(join(self.out_dir, el))
return {
'sam': self.get_target('sam', 'sam'),
'gene': self.get_target('gene', 'tsv'),
'group': self.get_target('group', 'tsv'),
'classus': self.get_target('classus', 'tsv'),
'mech': self.get_target('mech', 'tsv'),
}
def _run(self):
sam_file = self.output()["sam"].path
cmd1 = (
f'{self.aligner.bin} '
f'-p {threads} '
'--very-sensitive '
f' -x {self.db.bowtie2_index} '
f' -1 {self.reads.reads[0]} '
f' -2 {self.reads.reads[1]} '
'| samtools view -F 4 '
f'> {sam_file} '
)
cmd2 = (
f'{self.pkg.bin} '
f'-ref_fp {self.db.fasta} '
f'-sam_fp {sam_file} '
f'-annot_fp {self.db.annotations} '
f'-gene_fp {self.output()["gene"].path} '
f'-group_fp {self.output()["group"].path} '
f'-class_fp {self.output()["classus"].path} '
f'-mech_fp {self.output()["mech"].path} '
f'-t {self.thresh}'
)
subprocess.call(cmd1 + ' && ' + cmd2, shell=True)
class CARD(CapTask):
sample_name = luigi.Parameter()
pe1 = luigi.Parameter()
pe2 = luigi.Parameter()
config_filename = luigi.Parameter()
cores = luigi.IntParameter(default=1)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pkg = CondaPackage(
package="megares",
executable="megares",
channel="bioconda",
config_filename=self.config_filename,
)
self.config = PipelineConfig(self.config_filename)
self.out_dir = self.config.out_dir
self.db = CardDB(config_filename=self.config_filename)
self.reads = CleanReads(
sample_name=self.sample_name,
pe1=self.pe1,
pe2=self.pe2,
config_filename=self.config_filename
)
def requires(self):
return self.pkg, self.db, self.reads
def output(self):
pass
def run(self):
cmd = (
)
subprocess.call(cmd, shell=True)
| 4,137
| 709
| 69
|
a94350db177c24901064b6f7d2929cd25fa5ae9d
| 3,981
|
py
|
Python
|
apps/users/managers.py
|
dlooto/driver-vision
|
676256891971df1d5eee990be54fb31f485d0ae3
|
[
"MIT"
] | 2
|
2020-06-16T01:52:47.000Z
|
2021-03-24T08:34:23.000Z
|
apps/users/managers.py
|
dlooto/driver-vision
|
676256891971df1d5eee990be54fb31f485d0ae3
|
[
"MIT"
] | 2
|
2020-02-12T03:09:54.000Z
|
2020-06-05T22:47:17.000Z
|
apps/users/managers.py
|
dlooto/driver-vision
|
676256891971df1d5eee990be54fb31f485d0ae3
|
[
"MIT"
] | null | null | null |
#coding=utf-8
#
# Created on Mar 21, 2014, by Junn
#
#
from django.contrib.auth.models import BaseUserManager
from django.utils import timezone
from utils import eggs, logs, http
from django.core.cache import cache
VALID_ATTRS = ('nickname', 'email', 'phone', 'gender', 'avatar')
| 32.900826
| 107
| 0.542828
|
#coding=utf-8
#
# Created on Mar 21, 2014, by Junn
#
#
from django.contrib.auth.models import BaseUserManager
from django.utils import timezone
from utils import eggs, logs, http
from django.core.cache import cache
VALID_ATTRS = ('nickname', 'email', 'phone', 'gender', 'avatar')
def mk_key(id):
return 'u%s' % id
class CustomUserManager(BaseUserManager):
def _create_user(self, username, password=None, is_active=True, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
user = self.model(username=username,
is_staff=False, is_active=is_active, is_superuser=False,
last_login=now, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def get_by_phone(self, phone):
try:
return self.get(username=phone)
except self.model.DoesNotExist:
return None
def update_user(self, user, req):
data = req.DATA
for attr in VALID_ATTRS: #双重循环, 以后需要改进算法
if attr in data:
setattr(user, attr, data.get(attr))
user.save(using=self._db)
return user
def create_superuser(self, username, password, **extra_fields):
u = self._create_user(username, password, **extra_fields)
u.is_staff = True
u.is_superuser = True
u.save(using=self._db)
return u
def create_open_user(self, source_site, openid, access_token, expires_in, open_name='', avatar_url=''):
'''创建第3方登录账号
@param source_site: 第3方平台名称
@param openid: 用户在第3方平台的账号id
@param access_token: 第3方平台的访问token
@param expires_in: access_token的超时时间
@param open_name: 用户在第3方平台的昵称
@param avatar_url: 用户在第3方平台的头像url
'''
from auth.models import OpenAccount
try:
## 第3方平台注册用户不允许直接登录, 除非重置了密码(重置密码需要先绑定手机号)
user = self._create_user(
username=eggs.gen_uuid1(), nickname=open_name, acct_type='O'
)
try:
user.save_avatar(http.request_file(avatar_url)) #请求远端获取图片并保存
except Exception, e:
logs.err(__name__, eggs.lineno(), e)
pass
user.update_pdu(1) #设置头像标识位
user.update_pdu(2) #设置昵称标识位
user.save()
user.cache()
open_acct = OpenAccount(user=user, source_site=source_site,
openid=openid, access_token=access_token, expires_in=int(expires_in)
)
open_acct.save()
return True, user
except Exception, e:
logs.err(__name__, eggs.lineno(), e)
return False, u'账号绑定异常'
############################################################ cache methods
def cache_all(self): #TODO: abstract these cache_xxx method into base class ...
users = self.all()
for u in users:
u.cache()
logs.info('====================================> All user entities cached.')
def get_cached(self, uid): #TODO: using cache later...
'''return cached user object'''
user = cache.get(mk_key(uid))
if not user:
try:
user = self.get(id=int(uid))
user.cache()
except self.model.DoesNotExist:
logs.err(__name__, eggs.lineno(), 'User not found: %s' % uid)
return None
except Exception, e:
logs.err(__name__, eggs.lineno(), 'get_cached user error: %s' % e)
return None
return user
| 836
| 3,055
| 50
|
9c6838ed116deb3a2770d9b6ad5a6c062dbeb8a7
| 430
|
py
|
Python
|
Python/WebCam GUI Filters/Gray.py
|
abhijeet007rocks8/Useful-Scripts
|
2c8bd8c1cca4960c2333806194af7341497269e1
|
[
"MIT"
] | 32
|
2021-10-02T07:30:48.000Z
|
2022-03-20T13:43:32.000Z
|
Python/WebCam GUI Filters/Gray.py
|
abhijeet007rocks8/Useful-Scripts
|
2c8bd8c1cca4960c2333806194af7341497269e1
|
[
"MIT"
] | 170
|
2021-10-02T07:13:00.000Z
|
2022-03-31T20:40:51.000Z
|
Python/WebCam GUI Filters/Gray.py
|
abhijeet007rocks8/Useful-Scripts
|
2c8bd8c1cca4960c2333806194af7341497269e1
|
[
"MIT"
] | 69
|
2021-10-02T07:30:53.000Z
|
2022-03-30T08:25:54.000Z
|
import cv2
import numpy as np
import mediapipe as mp
cap = cv2.VideoCapture(0)
ret, frame = cap. read ()
while (True):
ret, frame = cap. read ()
frame = cv2.flip(frame,1)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('Gray Filter', gray)
if cv2.waitKey(10) & 0xFF==ord('q'):
break
if cv2.waitKey(10) & 0xFF==ord('s'):
import email_sender
cap. release ()
cv2.destroyAllWindows()
| 25.294118
| 50
| 0.64186
|
import cv2
import numpy as np
import mediapipe as mp
cap = cv2.VideoCapture(0)
ret, frame = cap. read ()
while (True):
ret, frame = cap. read ()
frame = cv2.flip(frame,1)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('Gray Filter', gray)
if cv2.waitKey(10) & 0xFF==ord('q'):
break
if cv2.waitKey(10) & 0xFF==ord('s'):
import email_sender
cap. release ()
cv2.destroyAllWindows()
| 0
| 0
| 0
|
8bb0a1ae516f4e3eaeb925931a16ef163067e5bd
| 1,537
|
py
|
Python
|
tests/test_models.py
|
alexwlchan/docstore
|
dcffa76cb74e685e5ac027be6536d9662cad460f
|
[
"MIT"
] | 40
|
2019-01-13T18:46:18.000Z
|
2022-03-26T00:41:53.000Z
|
tests/test_models.py
|
alexwlchan/docstore
|
dcffa76cb74e685e5ac027be6536d9662cad460f
|
[
"MIT"
] | 48
|
2019-03-02T10:42:42.000Z
|
2022-03-21T08:26:11.000Z
|
tests/test_models.py
|
alexwlchan/docstore
|
dcffa76cb74e685e5ac027be6536d9662cad460f
|
[
"MIT"
] | 1
|
2021-07-13T22:46:36.000Z
|
2021-07-13T22:46:36.000Z
|
import datetime
import uuid
import pytest
from docstore.models import Dimensions, Document, File, Thumbnail, from_json, to_json
@pytest.mark.parametrize("documents", [[1, 2, 3], {"a", "b", "c"}])
| 25.196721
| 85
| 0.62069
|
import datetime
import uuid
import pytest
from docstore.models import Dimensions, Document, File, Thumbnail, from_json, to_json
def is_recent(ds):
return (datetime.datetime.now() - ds).seconds < 2
def test_document_defaults():
d1 = Document(title="My test document")
assert uuid.UUID(d1.id)
assert is_recent(d1.date_saved)
assert d1.tags == []
assert d1.files == []
d2 = Document(title="A different document")
assert d1.id != d2.id
def test_file_defaults():
f = File(
filename="cats.jpg",
path="files/c/cats.jpg",
size=100,
checksum="sha256:123",
thumbnail=Thumbnail(
path="thumbnails/c/cats.jpg",
dimensions=Dimensions(400, 300),
tint_color="#ffffff",
),
)
uuid.UUID(f.id)
assert is_recent(f.date_saved)
def test_can_serialise_document_to_json():
f = File(
filename="cats.jpg",
path="files/c/cats.jpg",
size=100,
checksum="sha256:123",
thumbnail=Thumbnail(
path="thumbnails/c/cats.jpg",
dimensions=Dimensions(400, 300),
tint_color="#ffffff",
),
)
documents = [Document(title="Another test document", files=[f])]
assert from_json(to_json(documents)) == documents
@pytest.mark.parametrize("documents", [[1, 2, 3], {"a", "b", "c"}])
def test_to_json_with_bad_list_is_typeerror(documents):
with pytest.raises(TypeError, match=r"Expected type List\[Document\]!"):
to_json(documents)
| 1,219
| 0
| 114
|
97bbb23607a0663fc7bb7eb63651800524c61af0
| 778
|
py
|
Python
|
setup.py
|
JanStgmnn/meta-labs-python
|
a05caba7e5eb0630b304e2aedf5d4a4aa3036f44
|
[
"MIT"
] | 1
|
2021-05-24T19:02:08.000Z
|
2021-05-24T19:02:08.000Z
|
setup.py
|
JanStgmnn/meta-labs-python
|
a05caba7e5eb0630b304e2aedf5d4a4aa3036f44
|
[
"MIT"
] | null | null | null |
setup.py
|
JanStgmnn/meta-labs-python
|
a05caba7e5eb0630b304e2aedf5d4a4aa3036f44
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="metalabs_sdk", # Replace with your own username
version="0.1.1",
author="Jeffrey Annaraj",
author_email="jannaraj@baffled.dev",
description="SDK for MetaLabs API ",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/JAnnaraj/meta-labs_sdk",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
keywords='metalabs',
install_requires=['urllib3']
)
| 32.416667
| 58
| 0.650386
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="metalabs_sdk", # Replace with your own username
version="0.1.1",
author="Jeffrey Annaraj",
author_email="jannaraj@baffled.dev",
description="SDK for MetaLabs API ",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/JAnnaraj/meta-labs_sdk",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
keywords='metalabs',
install_requires=['urllib3']
)
| 0
| 0
| 0
|
ed85d29525ec40eeaef723354135ec53e29dbcf5
| 1,415
|
py
|
Python
|
azure-mgmt-web/azure/mgmt/web/models/file_system_application_logs_config.py
|
HydAu/AzureSDKForPython
|
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
|
[
"Apache-2.0"
] | null | null | null |
azure-mgmt-web/azure/mgmt/web/models/file_system_application_logs_config.py
|
HydAu/AzureSDKForPython
|
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
|
[
"Apache-2.0"
] | null | null | null |
azure-mgmt-web/azure/mgmt/web/models/file_system_application_logs_config.py
|
HydAu/AzureSDKForPython
|
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class FileSystemApplicationLogsConfig(Model):
"""
Application logs to file system configuration
:param level: Log level. Possible values include: 'Off', 'Verbose',
'Information', 'Warning', 'Error'
:type level: str or :class:`LogLevel <azure.mgmt.web.models.LogLevel>`
"""
_attribute_map = {
'level': {'key': 'level', 'type': 'LogLevel'},
}
| 35.375
| 76
| 0.640989
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class FileSystemApplicationLogsConfig(Model):
"""
Application logs to file system configuration
:param level: Log level. Possible values include: 'Off', 'Verbose',
'Information', 'Warning', 'Error'
:type level: str or :class:`LogLevel <azure.mgmt.web.models.LogLevel>`
"""
_attribute_map = {
'level': {'key': 'level', 'type': 'LogLevel'},
}
def __init__(self, level=None):
self.level = level
| 37
| 0
| 27
|
b82ef50205094d9997859ea3617022b4bad7350f
| 63
|
py
|
Python
|
__init__.py
|
DavidJRobertson/kicad_scripts
|
4fbc687033260ea6ec919717e0d37ca0d7a9cf37
|
[
"BSD-3-Clause"
] | 368
|
2016-07-27T06:42:36.000Z
|
2022-03-31T23:11:25.000Z
|
__init__.py
|
mehmetcanbudak/kicad_scripts
|
7a874fd59d6162636062032e4d2c66b205b52fbe
|
[
"BSD-3-Clause"
] | 37
|
2017-06-01T08:15:20.000Z
|
2022-02-19T17:51:34.000Z
|
__init__.py
|
mehmetcanbudak/kicad_scripts
|
7a874fd59d6162636062032e4d2c66b205b52fbe
|
[
"BSD-3-Clause"
] | 114
|
2017-01-05T05:08:16.000Z
|
2022-03-28T06:03:15.000Z
|
from __future__ import absolute_import
from . import teardrops
| 21
| 38
| 0.857143
|
from __future__ import absolute_import
from . import teardrops
| 0
| 0
| 0
|
3e19b4d28b24361d206d389e5a2aa96dbbe59313
| 1,252
|
py
|
Python
|
ndk/definitions/contact.py
|
VunkLai/ndk
|
76894d2b81297ed0b7b48a35227d919d50e8fb64
|
[
"MIT"
] | 1
|
2020-10-23T07:02:52.000Z
|
2020-10-23T07:02:52.000Z
|
ndk/definitions/contact.py
|
VunkLai/ndk
|
76894d2b81297ed0b7b48a35227d919d50e8fb64
|
[
"MIT"
] | null | null | null |
ndk/definitions/contact.py
|
VunkLai/ndk
|
76894d2b81297ed0b7b48a35227d919d50e8fb64
|
[
"MIT"
] | null | null | null |
import attr
from ndk.construct import Construct
from ndk.directives import *
from ndk.options import contact as options
@attr.s
| 32.102564
| 70
| 0.746006
|
import attr
from ndk.construct import Construct
from ndk.directives import *
from ndk.options import contact as options
@attr.s
class ContactDirective(Construct):
__object_type__ = 'contact'
contact_name = PrimaryKey()
alias = StringField()
contactgroups = OneToMany('ContactGroup')
minimum_importance = IntegerField()
host_notifications_enabled = BooleanField(required=True)
service_notifications_enabled = BooleanField(required=True)
host_notifications_period = OneToOne(
'TimePeriod', required=True)
service_notifications_period = OneToOne(
'TimePeriod', required=True)
host_notifications_options = ChoiceField(
options.HostNotifications, required=True)
service_notifications_options = ChoiceField(
options.ServiceNotifications, required=True)
host_notification_commands = OneToOne('Command', required=True)
service_notification_commands = OneToOne('Command', required=True)
email = StringField()
pager = StringField()
addressx = StringField()
can_submit_commands = BooleanField()
retain_status_information = BooleanField()
retain_nonstatus_information = BooleanField()
@property
def pk(self):
return self.contact_name
| 25
| 1,073
| 22
|
45e2397e2dec44f82a2719f8b237ca83b5d8a294
| 17,416
|
py
|
Python
|
examples/task_seq2seq_autotitle.py
|
sijunx/bert4keras
|
eb2d7a5ccdf89d724a0e62d55a5292faaf01f395
|
[
"Apache-2.0"
] | null | null | null |
examples/task_seq2seq_autotitle.py
|
sijunx/bert4keras
|
eb2d7a5ccdf89d724a0e62d55a5292faaf01f395
|
[
"Apache-2.0"
] | null | null | null |
examples/task_seq2seq_autotitle.py
|
sijunx/bert4keras
|
eb2d7a5ccdf89d724a0e62d55a5292faaf01f395
|
[
"Apache-2.0"
] | null | null | null |
#! -*- coding: utf-8 -*-
# bert做Seq2Seq任务,采用UNILM方案
# 介绍链接:https://kexue.fm/archives/6933
from __future__ import print_function
import glob
import os
import numpy as np
import sys
from bert4keras.backend import keras, K
from bert4keras.layers import Loss
from bert4keras.models import build_transformer_model, tf
from bert4keras.tokenizers import Tokenizer, load_vocab
from bert4keras.optimizers import Adam
from bert4keras.snippets import sequence_padding, open
from bert4keras.snippets import DataGenerator, AutoRegressiveDecoder
from keras.models import Model
from examples import modeling
from examples.my_args import arg_dic
from tensorflow.python.framework.graph_util import convert_variables_to_constants
from keras import backend as K
from tensorflow.python.platform import gfile
# parameter ==========================
wkdir = '/Users/xusijun/Documents/NLP009/bert4keras-master001/keras_to_tensorflow-master'
pb_filename = 'model070.pb'
# 基本参数
maxlen = 256
batch_size = 16
# steps_per_epoch = 1000
steps_per_epoch = 1000
# epochs = 10000
epochs = 10
# bert配置
# config_path = '/root/kg/bert/chinese_wwm_L-12_H-768_A-12/bert_config.json'
# checkpoint_path = '/root/kg/bert/chinese_wwm_L-12_H-768_A-12/bert_model.ckpt'
# dict_path = '/root/kg/bert/chinese_wwm_L-12_H-768_A-12/vocab.txt'
# config_path = '/Users/xusijun/Documents/NLP009/bert4keras-master001/chinese_wwm_L-12_H-768_A-12/bert_config.json'
# checkpoint_path = '/Users/xusijun/Documents/NLP009/bert4keras-master001/chinese_wwm_L-12_H-768_A-12/bert_model.ckpt'
# dict_path = '/Users/xusijun/Documents/NLP009/bert4keras-master001/chinese_wwm_L-12_H-768_A-12/vocab.txt'
config_path = '/Users/xusijun/Documents/NLP009/bert4keras-master001/albert_tiny_google_zh_489k/albert_config.json'
checkpoint_path = '/Users/xusijun/Documents/NLP009/bert4keras-master001/albert_tiny_google_zh_489k/albert_model.ckpt'
dict_path = '/Users/xusijun/Documents/NLP009/bert4keras-master001/albert_tiny_google_zh_489k/vocab.txt'
# 训练样本。THUCNews数据集,每个样本保存为一个txt。
# txts = glob.glob('/root/thuctc/THUCNews/*/*.txt')
# txts = glob.glob('/Users/xusijun/Documents/NLP009/bert4keras-master001/MyNews/*/*.txt')
txts = glob.glob('/Users/xusijun/Documents/NLP009/bert4keras-master001/THUCNews/*/*.txt')
# 加载并精简词表,建立分词器
# token_dict, keep_tokens = load_vocab(
# dict_path=dict_path,
# simplified=True,
# startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'],
# )
token_dict = load_vocab(
dict_path=dict_path,
# startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'],
)
tokenizer = Tokenizer(token_dict, do_lower_case=True)
class data_generator(DataGenerator):
"""数据生成器
"""
class CrossEntropy(Loss):
"""交叉熵作为loss,并mask掉输入部分
"""
model = build_transformer_model(
config_path,
checkpoint_path,
application='unilm',
# keep_tokens=keep_tokens, # 只保留keep_tokens中的字,精简原字表
keep_tokens=None, # 只保留keep_tokens中的字,精简原字表
)
output = CrossEntropy(2)(model.inputs + model.outputs)
model = Model(model.inputs, output)
model.compile(optimizer=Adam(1e-5))
model.summary()
class AutoTitle(AutoRegressiveDecoder):
"""seq2seq解码器
"""
@AutoRegressiveDecoder.wraps(default_rtype='probas')
autotitle = AutoTitle(start_id=None, end_id=tokenizer._token_end_id, maxlen=32)
# save model to pb ====================
def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
@param session The TensorFlow session to be frozen.
@param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
@param output_names Names of the relevant graph outputs.
@param clear_devices Remove the device directives from the graph for better portability.
@return The frozen graph definition.
"""
graph = session.graph
with graph.as_default():
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = convert_variables_to_constants(session, input_graph_def,
output_names, freeze_var_names)
return frozen_graph
class Evaluator(keras.callbacks.Callback):
"""评估与保存
"""
if __name__ == '__main__':
model.load_weights('./myFile70.h5')
just_show()
evaluator = Evaluator()
train_generator = data_generator(txts, batch_size)
model.fit(
train_generator.forfit(),
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=[evaluator]
)
else:
model.load_weights('./best_model003.weights')
| 44.65641
| 1,517
| 0.676906
|
#! -*- coding: utf-8 -*-
# bert做Seq2Seq任务,采用UNILM方案
# 介绍链接:https://kexue.fm/archives/6933
from __future__ import print_function
import glob
import os
import numpy as np
import sys
from bert4keras.backend import keras, K
from bert4keras.layers import Loss
from bert4keras.models import build_transformer_model, tf
from bert4keras.tokenizers import Tokenizer, load_vocab
from bert4keras.optimizers import Adam
from bert4keras.snippets import sequence_padding, open
from bert4keras.snippets import DataGenerator, AutoRegressiveDecoder
from keras.models import Model
from examples import modeling
from examples.my_args import arg_dic
from tensorflow.python.framework.graph_util import convert_variables_to_constants
from keras import backend as K
from tensorflow.python.platform import gfile
# parameter ==========================
wkdir = '/Users/xusijun/Documents/NLP009/bert4keras-master001/keras_to_tensorflow-master'
pb_filename = 'model070.pb'
# 基本参数
maxlen = 256
batch_size = 16
# steps_per_epoch = 1000
steps_per_epoch = 1000
# epochs = 10000
epochs = 10
# bert配置
# config_path = '/root/kg/bert/chinese_wwm_L-12_H-768_A-12/bert_config.json'
# checkpoint_path = '/root/kg/bert/chinese_wwm_L-12_H-768_A-12/bert_model.ckpt'
# dict_path = '/root/kg/bert/chinese_wwm_L-12_H-768_A-12/vocab.txt'
# config_path = '/Users/xusijun/Documents/NLP009/bert4keras-master001/chinese_wwm_L-12_H-768_A-12/bert_config.json'
# checkpoint_path = '/Users/xusijun/Documents/NLP009/bert4keras-master001/chinese_wwm_L-12_H-768_A-12/bert_model.ckpt'
# dict_path = '/Users/xusijun/Documents/NLP009/bert4keras-master001/chinese_wwm_L-12_H-768_A-12/vocab.txt'
config_path = '/Users/xusijun/Documents/NLP009/bert4keras-master001/albert_tiny_google_zh_489k/albert_config.json'
checkpoint_path = '/Users/xusijun/Documents/NLP009/bert4keras-master001/albert_tiny_google_zh_489k/albert_model.ckpt'
dict_path = '/Users/xusijun/Documents/NLP009/bert4keras-master001/albert_tiny_google_zh_489k/vocab.txt'
# 训练样本。THUCNews数据集,每个样本保存为一个txt。
# txts = glob.glob('/root/thuctc/THUCNews/*/*.txt')
# txts = glob.glob('/Users/xusijun/Documents/NLP009/bert4keras-master001/MyNews/*/*.txt')
txts = glob.glob('/Users/xusijun/Documents/NLP009/bert4keras-master001/THUCNews/*/*.txt')
# 加载并精简词表,建立分词器
# token_dict, keep_tokens = load_vocab(
# dict_path=dict_path,
# simplified=True,
# startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'],
# )
token_dict = load_vocab(
dict_path=dict_path,
# startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'],
)
tokenizer = Tokenizer(token_dict, do_lower_case=True)
class data_generator(DataGenerator):
"""数据生成器
"""
def __iter__(self, random=False):
batch_token_ids, batch_segment_ids = [], []
for is_end, txt in self.sample(random):
text = open(txt, encoding='utf-8').read()
text = text.split('\n')
if len(text) > 1:
title = text[0]
content = '\n'.join(text[1:])
token_ids, segment_ids = tokenizer.encode(
content, title, maxlen=maxlen
)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
if len(batch_token_ids) == self.batch_size or is_end:
batch_token_ids = sequence_padding(batch_token_ids)
batch_segment_ids = sequence_padding(batch_segment_ids)
yield [batch_token_ids, batch_segment_ids], None
batch_token_ids, batch_segment_ids = [], []
class CrossEntropy(Loss):
"""交叉熵作为loss,并mask掉输入部分
"""
def compute_loss(self, inputs, mask=None):
y_true, y_mask, y_pred = inputs
y_true = y_true[:, 1:] # 目标token_ids
y_mask = y_mask[:, 1:] # segment_ids,刚好指示了要预测的部分
y_pred = y_pred[:, :-1] # 预测序列,错开一位
loss = K.sparse_categorical_crossentropy(y_true, y_pred)
loss = K.sum(loss * y_mask) / K.sum(y_mask)
return loss
model = build_transformer_model(
config_path,
checkpoint_path,
application='unilm',
# keep_tokens=keep_tokens, # 只保留keep_tokens中的字,精简原字表
keep_tokens=None, # 只保留keep_tokens中的字,精简原字表
)
output = CrossEntropy(2)(model.inputs + model.outputs)
model = Model(model.inputs, output)
model.compile(optimizer=Adam(1e-5))
model.summary()
class AutoTitle(AutoRegressiveDecoder):
"""seq2seq解码器
"""
@AutoRegressiveDecoder.wraps(default_rtype='probas')
def predict(self, inputs, output_ids, states):
print("--------------------- 开始 ---------------------")
print("prdict inputs:", inputs)
print("prdict output_ids:", output_ids)
print("prdict states:", states)
token_ids, segment_ids = inputs
token_ids = np.concatenate([token_ids, output_ids], 1)
segment_ids = np.concatenate([segment_ids, np.ones_like(output_ids)], 1)
print("predict token_ids:", token_ids)
print("predict segment_ids:", segment_ids)
topk = 1
proba = model.predict([token_ids, segment_ids])
print("proba:", proba)
log_proba = np.log(proba + 1e-6) # 取对数,方便计算
print("log_proba:", log_proba)
icount =0
maxIndex = 0
maxValue = -9999.0
temp = 78
while(icount<len(proba[0][temp])):
if(proba[0][temp][icount] > maxValue):
maxValue = proba[0][temp][icount]
maxIndex = icount
icount = icount+1
print("maxIndex:", maxIndex, " maxValue:", maxValue)
# maxIndex: 8125 maxValue: 0.27502504
return self.last_token(model).predict([token_ids, segment_ids])
# print("result", scores)
# print("states", states)
# icount =0
# maxIndex = 0
# maxValue = -9999.0
# while(icount<len(scores[0])):
# if(scores[0][icount] > maxValue):
# maxValue = scores[0][icount]
# maxIndex = icount
# icount = icount+1
# print("maxIndex:", maxIndex, " maxValue:", maxValue)
# print("--------------------- 结束 ---------------------")
# return scores, states
def generate(self, text, topk=1):
max_c_len = maxlen - self.maxlen
token_ids, segment_ids = tokenizer.encode(text, maxlen=max_c_len)
# print('token_ids: ', len(token_ids), token_ids)
# print('segment_ids: ', len(segment_ids), segment_ids)
output_ids = self.beam_search([token_ids, segment_ids],
topk=topk) # 基于beam search
x01 = output_ids[0]
# x02 = output_ids[1]
# x03 = output_ids[2]
# x04 = output_ids[3]
# x05 = output_ids[4]
y01 = tokenizer.decode(x01)
print("y01:", y01)
# y02 = tokenizer.decode(x02)
# print("y02:", y02)
# y03 = tokenizer.decode(x03)
# print("y03:", y03)
# y04 = tokenizer.decode(x04)
# print("y04:", y04)
# y05 = tokenizer.decode(x05)
# print("y05:", y05)
return tokenizer.decode(output_ids[0])
autotitle = AutoTitle(start_id=None, end_id=tokenizer._token_end_id, maxlen=32)
# save model to pb ====================
def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
@param session The TensorFlow session to be frozen.
@param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
@param output_names Names of the relevant graph outputs.
@param clear_devices Remove the device directives from the graph for better portability.
@return The frozen graph definition.
"""
graph = session.graph
with graph.as_default():
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = convert_variables_to_constants(session, input_graph_def,
output_names, freeze_var_names)
return frozen_graph
def my_keras_to_pb():
# save keras model as tf pb files ===============
frozen_graph = freeze_session(K.get_session(), output_names=[out.op.name for out in model.outputs])
tf.train.write_graph(frozen_graph, wkdir, pb_filename, as_text=False)
# # load & inference the model ==================
with tf.Session() as sess:
# load model from pb file
with gfile.FastGFile(wkdir+'/'+pb_filename,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
g_in = tf.import_graph_def(graph_def)
# write to tensorboard (check tensorboard for each op names)
writer = tf.summary.FileWriter(wkdir+'/log/')
writer.add_graph(sess.graph)
writer.flush()
writer.close()
# print all operation names
print('\n===== ouptut operation names =====\n')
for op in sess.graph.get_operations():
print(op)
# inference by the model (op name must comes with :0 to specify the index of its output)
tensor_output = sess.graph.get_tensor_by_name('cross_entropy_1/Identity:0')
# Input-Token:0
tensor_input = sess.graph.get_tensor_by_name('Input-Token:0')
# Input-Segment:0
seg_input = sess.graph.get_tensor_by_name('Input-Segment:0')
text = '夏天来临,皮肤在强烈紫外线的照射下,晒伤不可避免,因此,晒后及时修复显得尤为重要,否则可能会造成长期伤害。专家表示,选择晒后护肤品要慎重,芦荟凝胶是最安全,有效的一种选择,晒伤严重者,还请及 时 就医 。'
# max_c_len = maxlen - self.maxlen
# max_c_len = maxlen - 56 + 3
# token_ids, segment_ids = tokenizer.encode(text, maxlen=max_c_len)
#
# x = np.vstack((np.random.rand(1000,10),-np.random.rand(1000,10)))
# y = np.vstack((np.ones((1000,1)),np.zeros((1000,1))))
# x = [[2, 2352, 6702, 2234, 758, 5407, 2127, 578, 7404, 1642, 6269, 6293, 991, 670, 1399, 4393, 670, 5340, 1189, 731, 6707, 2666, 6512, 1119, 2590, 1301, 3]]
# y = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# token_ids, segment_ids = inputs
x = np.array([[2, 2352, 6702, 2234, 758, 5407, 2127, 578, 7404, 1642, 6269, 6293, 991, 670, 1399, 4393, 670, 5340, 1189, 731, 6707, 2666, 6512, 1119, 2590, 1301, 3]])
y = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
# token_ids = np.concatenate([token_ids, output_ids], 1)
# segment_ids = np.concatenate([segment_ids, np.ones_like(output_ids)], 1)
# print("predict token_ids:", token_ids)
# print("predict segment_ids:", segment_ids)
# print(x.shape)
# print(y.shape)
# predictions = sess.run(tensor_output, {tensor_input: x, seg_input: y})
# # print('\n===== output predicted results =====\n')
# print(predictions)
print('xxxxxxxxxx')
def my_test001():
text1 = '语言模型'
# text2 = "你好"
tokens1 = tokenizer.tokenize(text1)
print(tokens1)
# tokens2 = tokenizer.tokenize(text2)
# print(tokens2)
# indices_new, segments_new = tokenizer.encode(text1, text2, max_length=512)
indices_new, segments_new = tokenizer.encode(text1)
print(indices_new[:10])
# [101, 6427, 6241, 3563, 1798, 102, 0, 0, 0, 0]
print(segments_new[:10])
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# 提取特征
predicts_new = model.predict([np.array([indices_new]), np.array([segments_new])])[0]
for i, token in enumerate(tokens1):
print(token, predicts_new[i].tolist()[:5])
# for i, token in enumerate(tokens2):
# print(token, predicts_new[i].tolist()[:5])
print("xxxxx")
def my_test002():
#加载语言模型
#model = build_bert_model(config_path=config_path, checkpoint_path=checkpoint_path, with_mlm=True)
token_ids, segment_ids = tokenizer.encode(u'科学技术是第一生产力')
# mask掉“技术”
# token_ids[3] = token_ids[4] = token_dict['[MASK]']
token_ids[3] = token_ids[4] = token_dict['[UNK]']
# 用mlm模型预测被mask掉的部分
probas = model.predict([np.array([token_ids]), np.array([segment_ids])])[0]
mask01 = tokenizer.decode(probas[3:5].argmax(axis=1))
print(mask01) # 结果正是“技术”
token_ids, segment_ids = tokenizer.encode(u'数学是利用符号语言研究数量、结构、变化以及空间等概念的一门学科')
# mask掉“技术”
#token_ids[1] = token_ids[2] = tokenizer._token_dict['[MASK]']
# 用mlm模型预测被mask掉的部分
probas = model.predict([np.array([token_ids]), np.array([segment_ids])])[0]
print(tokenizer.decode(probas[1:3].argmax(axis=1))) # 结果正是“数学”
print("xxx")
def just_show():
# s1 = u'记者傅亚雨沈阳报道 来到沈阳,国奥队依然没有摆脱雨水的困扰。7月31日下午6点,国奥队的日常训练再度受到大雨的干扰,无奈之下队员们只慢跑了25分钟就草草收场。31日上午10点,国奥队在奥体中心外场训练的时候,天就是阴沉沉的,气象预报显示当天下午沈阳就有大雨,但幸好队伍上午的训练并没有受到任何干扰。 下午6点,当球队抵达训练场时,大雨已经下了几个小时,而且丝毫没有停下来的意思。抱着试一试的态度,球队开始了当天下午的例行训练,25分钟过去了,天气没有任何转好的迹象,为了保护球员们,国奥队决定中止当天的训练,全队立即返回酒店。 在雨中训练对足球队来说并不是什么稀罕事,但在奥运会即将开始之前,全队变得“娇贵”了。在沈阳最后一周的训练,国奥队首先要保证现有的球员不再出现意外的伤病情况以免影响正式比赛,因此这一阶段控制训练受伤、控制感冒等疾病的出现被队伍放在了相当重要的位置。而抵达沈阳之后,中后卫冯萧霆就一直没有训练,冯萧霆是7月27日在长春患上了感冒,因此也没有参加29日跟塞尔维亚的热身赛。队伍介绍说,冯萧霆并没有出现发烧症状,但为了安全起见,这两天还是让他静养休息,等感冒彻底好了之后再恢复训练。由于有了冯萧霆这个例子,因此国奥队对雨中训练就显得特别谨慎,主要是担心球员们受凉而引发感冒,造成非战斗减员。而女足队员马晓旭在热身赛中受伤导致无缘奥运的前科,也让在沈阳的国奥队现在格外警惕,“训练中不断嘱咐队员们要注意动作,我们可不能再出这样的事情了。”一位工作人员表示。 从长春到沈阳,雨水一路伴随着国奥队,“也邪了,我们走到哪儿雨就下到哪儿,在长春几次训练都被大雨给搅和了,没想到来沈阳又碰到这种事情。”一位国奥球员也对雨水的“青睐”有些不解。'
# s2 = u'新浪体育讯 主场战胜沈阳东进豪取主场六连胜的中甲劲旅延边队再传好消息。今日上午,延边州体育局与韩国认证农产品生产者协会达成赞助意向,该协会将赞助延边足球俱乐部10亿韩币(约合560万人民币),力助延足2011赛季实现冲超。 无偿赞助只因为延足感动此番,韩国认证农产品生产者协会为延足提供的10亿韩币赞助大单基本上都是无偿的,唯一的回报就是希望延边州体育局能够帮助该协会的产品打入延边市场做一些协调工作。说起无偿赞助延足,韩国认证农产品生产者协会中央会会长吴亨根(O Hyung-Kun)先生表示,只因延边足球让他感动。 据吴亨根介绍,在收到延边足球俱乐部赞助提议后,他很快就做出了赞助决定。“延边足球运动员很有天赋,只要在资金上能提供有力的支持,一定会成为一流球队。”在了解了延足球员目前的训练比赛状况后,今日吴亨根还以个人名义为延边队捐了三台全自动洗衣机。 其实,吴亨根也曾经是个足球人,他就是韩国全北现代俱乐部的创始人。1993年他创立了全北队,1994年韩国的汽车巨擘现代汽车正式入主全北队,而球队也更名成今日所用的全北现代。2006年全北现代战胜叙利亚卡马拉队夺得亚冠联赛冠军,中国球员冯潇霆目前就在这支球队效力。 除了这10亿韩币赞助,吴亨根还表示,中甲联赛结束后,他将把延边队带到韩国进行冬训,与全北的大学生球队进行训练比赛,通过以赛代练让延足充分为下赛季实现冲超夯实基础。 冲超早动手 经营更规范 联赛还剩三轮,目前延边队排名第三,极有望取得征战中甲六年来的最佳战绩(此前最好排名第六)。冲超是延边队一直的梦想,延边州体育局与俱乐部方面都希望在2011赛季完成冲超大业,让延边足球重新回归国内顶级行列。要想冲超就要未雨绸缪,本赛季尚未结束,延足冲超的各项准备工作便已展开。 本赛季延边队依然为资金所困,俱乐部经理李虎恩难辞其咎。今年7月,延边州体育局委托媒体人出身的郑宪哲先生先期运作经营延边足球俱乐部,为下赛季早作准备。年轻的郑宪哲接手后也为俱乐部经营带来了新思路,短短的两个月间,就为延足搞定了如此大单的韩国赞助意向。另外,下赛季延边队的比赛装备目前也已落实,韩国世达(STAR)体育用品公司将成为新的装备赞助商,为延足提供全套比赛训练装备,预计金额达100万人民币。 在未来延边足球俱乐部经营思路上,延边州体育局副局长于长龙表示,要对目前俱乐部的经营进行彻底改造,以求更加适应现代足球的发展理念,在政府支持的基础上,大胆尝试市场化运作,引进韩国足球俱乐部经营运作理念,在经营、服务、宣传等方面全方位提升俱乐部的整体水平。于长龙还透露,本赛季最后一轮客场同上海东亚比赛结束后,延边足球俱乐部将在上海举行招商会,向更多企业宣传推介延边足球,实现走出去招商。而接下来,延足还将陆续前往青岛、深圳、大连等地展开招商工作。 酝酿十年规划 打造血性之师 据悉,延边州体育局与延边足球俱乐部近期正在酝酿推出延足未来十年的一个中长期规划,其中最首要的任务就是要在未来三年在中超站稳脚跟。如果按照这一规划的设想,至少下赛季延足要完成冲超,此后再图站稳中超。 于长龙希望,能够在未来把延边队打造成一支文明之师、忠诚之师、血性之师、战斗之师,在继承朝鲜族球员勇猛顽强的优良传统基础上,更加彰显朝鲜族民族文化的底蕴和内涵,让延边队成为一支忠诚家乡,充满血性,真正为足球而战的足坛劲旅。 据悉,此番敲定赞助意向只是延足为冲超迈出的第一步,如何有效转变俱乐部经营理念、如何规范运作将是摆在延边州体育局面前的一个新课题。接下来,体育局与俱乐部还将推出一系列新动作,为冲超增添筹码。 (木木)'
# s3 = u'日前,江苏丹阳市延陵镇的一些西瓜种植户,因为滥用膨大剂又加上暴雨,导致不少西瓜纷纷“爆炸”,并因此影响了今年的西瓜总体销量。尽管专家一再强调膨大剂本身并无危害,但是滥用的话却易引发一连串问题。花果山CEO提问:辟谣的目的是什么?消除大众对膨化剂的恐惧?继续吃膨大剂西瓜?瓜农无疑是可悲的。根本不可能自己种西瓜的消费者呢?只能吃膨大剂西瓜么?谣言粉碎机:果壳网谣言粉碎机关心的不只是给大家一个“简单的答案”。我们通过对问题的梳理,给大家提供更全面的信息,让大家能更好的做出“自己的选择”。同时,果壳网谣言粉碎机也希望向大家提供一些理性看待问题的思路。这样的思路不仅是针对一事一人的,它关涉到的是我们的生活态度与看待世界的方法。CAPA-Real-柏志提问:氯吡脲使用后,会在水果中残留吗?人体食用后对人体有些什么影响?谣言粉碎机:会有一定的残留,一般在生长初期使用,时间越长残留越少。少量的接触,对人的影响很小。具体的毒理学实验数据,果壳的文章里有详细的说明。'
s1 = '针对最近我国赴比利时留学人员接连发生入境时被比海关拒绝或者办理身份证明时被比警方要求限期出境的事件,教育部提醒赴比利时留学人员应注意严格遵守比方相关规定。'
# s2 = u'程序员最爱'
# s3 = u'身体素质'
for s in [s1]:
print(u'生成标题:', autotitle.generate(s))
print()
class Evaluator(keras.callbacks.Callback):
"""评估与保存
"""
def __init__(self):
self.lowest = 1e10
def on_epoch_end(self, epoch, logs=None):
# 保存最优
if logs['loss'] <= self.lowest:
self.lowest = logs['loss']
# model.save_weights('./best_model.weights')
# model.save_weights('/Users/xusijun/Documents/NLP009/bert4keras-master001/tensorflow-for-java-master/best_model03.weights')
model.save('./myFile70.h5')
# model.save('/Users/xusijun/Documents/NLP009/bert4keras-master001/tensorflow-for-java-master/myFile04')
# tf.saved_model.save(model, '/Users/xusijun/Documents/NLP009/bert4keras-master001/examples/')
# tf.keras.models.save_model(model, '/Users/xusijun/Documents/NLP009/bert4keras-master001/examples/')
# 演示效果
just_show()
# 我的保存
my_keras_to_pb()
# pb模型
# save_PBmodel()
if __name__ == '__main__':
model.load_weights('./myFile70.h5')
just_show()
evaluator = Evaluator()
train_generator = data_generator(txts, batch_size)
model.fit(
train_generator.forfit(),
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=[evaluator]
)
else:
model.load_weights('./best_model003.weights')
| 17,796
| 0
| 250
|
792aa9f0d16f371e267c4f2fcb7bdd16c329c1d1
| 706
|
py
|
Python
|
solutions/791/791-yongjoonseo.py
|
iknoom/LeetCode-Solutions
|
85c034dfaf1455bcd69c19a2009197934d83f08e
|
[
"MIT"
] | 4
|
2021-01-13T11:37:57.000Z
|
2021-01-17T04:56:46.000Z
|
solutions/791/791-yongjoonseo.py
|
iknoom/LeetCode-Solutions
|
85c034dfaf1455bcd69c19a2009197934d83f08e
|
[
"MIT"
] | 9
|
2021-01-21T11:16:29.000Z
|
2021-02-23T14:27:00.000Z
|
solutions/791/791-yongjoonseo.py
|
iknoom/LeetCode-Solutions
|
85c034dfaf1455bcd69c19a2009197934d83f08e
|
[
"MIT"
] | 14
|
2021-01-14T14:36:07.000Z
|
2021-02-05T09:17:10.000Z
|
# check
# lowercase letters
# count all the letters of T which S contains
# save indices of letters in T
| 27.153846
| 54
| 0.447592
|
# check
# lowercase letters
# count all the letters of T which S contains
# save indices of letters in T
class Solution:
def customSortString(self, S: str, T: str) -> str:
result = [0] * len(T)
indices = []
counts = dict()
for i in range(len(T)):
if T[i] in S:
indices.append(i)
if T[i] in counts: counts[T[i]] += 1
else: counts[T[i]] = 1
else:
result[i] = T[i]
i = 0
for char in S:
left = counts.get(char)
while left:
result[indices[i]] = char
i += 1
left -= 1
return ''.join(result)
| 558
| -6
| 49
|
0773b38a64aebeeea57ebbff37f79409955ec330
| 2,699
|
py
|
Python
|
hata/ext/rpc/utils.py
|
albertopoljak/hata
|
96d0b3182eb4f5291eaf36bd23d521787c6b01f1
|
[
"0BSD"
] | null | null | null |
hata/ext/rpc/utils.py
|
albertopoljak/hata
|
96d0b3182eb4f5291eaf36bd23d521787c6b01f1
|
[
"0BSD"
] | null | null | null |
hata/ext/rpc/utils.py
|
albertopoljak/hata
|
96d0b3182eb4f5291eaf36bd23d521787c6b01f1
|
[
"0BSD"
] | 1
|
2020-09-17T20:10:15.000Z
|
2020-09-17T20:10:15.000Z
|
__all__ = ()
from sys import platform as PLATFORM
from os.path import join as join_paths
from os import listdir as list_directory, environ as ENVIRONMENTAL_VARIABLES
from tempfile import gettempdir as get_temporary_directory
from scarletio import set_docs
from .constants import PAYLOAD_KEY_EVENT, EVENT_ERROR, PAYLOAD_KEY_DATA
from. exceptions import DiscordRPCError
if PLATFORM in ('linux', 'darwin'):
TEMPORARY_DIRECTORY = ENVIRONMENTAL_VARIABLES.get('XDG_RUNTIME_DIR', None)
if (TEMPORARY_DIRECTORY is None):
TEMPORARY_DIRECTORY = ENVIRONMENTAL_VARIABLES.get('TMPDIR', None)
if (TEMPORARY_DIRECTORY is None):
TEMPORARY_DIRECTORY = ENVIRONMENTAL_VARIABLES.get('TMP', None)
if (TEMPORARY_DIRECTORY is None):
TEMPORARY_DIRECTORY = ENVIRONMENTAL_VARIABLES.get('TEMP', None)
if (TEMPORARY_DIRECTORY is None):
TEMPORARY_DIRECTORY = get_temporary_directory()
elif PLATFORM == 'win32':
TEMPORARY_DIRECTORY = '\\\\?\\pipe'
else:
set_docs(get_ipc_path,
"""
Gets Discord inter process communication path.
Parameters
----------
pipe : `None` or `str`
# TODO
Returns
-------
path : `None` or `str`
""")
def check_for_error(data):
"""
Checks whether the given data contains an errors.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Data received from Discord.
Raises
------
DiscordRPCError
"""
try:
event = data[PAYLOAD_KEY_EVENT]
except KeyError:
pass
else:
if event == EVENT_ERROR:
error_data = data[PAYLOAD_KEY_DATA]
error_code = error_data['code']
error_message = error_data['message']
raise DiscordRPCError(error_code, error_message)
| 27.824742
| 79
| 0.614672
|
__all__ = ()
from sys import platform as PLATFORM
from os.path import join as join_paths
from os import listdir as list_directory, environ as ENVIRONMENTAL_VARIABLES
from tempfile import gettempdir as get_temporary_directory
from scarletio import set_docs
from .constants import PAYLOAD_KEY_EVENT, EVENT_ERROR, PAYLOAD_KEY_DATA
from. exceptions import DiscordRPCError
if PLATFORM in ('linux', 'darwin'):
TEMPORARY_DIRECTORY = ENVIRONMENTAL_VARIABLES.get('XDG_RUNTIME_DIR', None)
if (TEMPORARY_DIRECTORY is None):
TEMPORARY_DIRECTORY = ENVIRONMENTAL_VARIABLES.get('TMPDIR', None)
if (TEMPORARY_DIRECTORY is None):
TEMPORARY_DIRECTORY = ENVIRONMENTAL_VARIABLES.get('TMP', None)
if (TEMPORARY_DIRECTORY is None):
TEMPORARY_DIRECTORY = ENVIRONMENTAL_VARIABLES.get('TEMP', None)
if (TEMPORARY_DIRECTORY is None):
TEMPORARY_DIRECTORY = get_temporary_directory()
def get_ipc_path(pipe):
ipc = f'discord-ipc-{pipe}'
for path in (None, 'snap.discord', 'app/com.discordapp.Discord'):
if path is None:
full_path = TEMPORARY_DIRECTORY
else:
full_path = join_paths(TEMPORARY_DIRECTORY)
for node_name in list_directory(full_path):
if node_name.startswith(ipc):
return join_paths(full_path, node_name)
return None
elif PLATFORM == 'win32':
TEMPORARY_DIRECTORY = '\\\\?\\pipe'
def get_ipc_path(pipe):
ipc = f'discord-ipc-{pipe}'
for node_name in list_directory(TEMPORARY_DIRECTORY):
if node_name.startswith(ipc):
return join_paths(TEMPORARY_DIRECTORY, node_name)
return None
else:
def get_ipc_path(pipe):
return None
set_docs(get_ipc_path,
"""
Gets Discord inter process communication path.
Parameters
----------
pipe : `None` or `str`
# TODO
Returns
-------
path : `None` or `str`
""")
def check_for_error(data):
"""
Checks whether the given data contains an errors.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Data received from Discord.
Raises
------
DiscordRPCError
"""
try:
event = data[PAYLOAD_KEY_EVENT]
except KeyError:
pass
else:
if event == EVENT_ERROR:
error_data = data[PAYLOAD_KEY_DATA]
error_code = error_data['code']
error_message = error_data['message']
raise DiscordRPCError(error_code, error_message)
| 748
| 0
| 88
|
ce80efef261b5805f2ccac2cfca1a8c03c9fd576
| 31,202
|
py
|
Python
|
cdam_convert_twine.py
|
jerrytron/twine-story-export
|
72574627969e8fd79ad246b7532874f223e2c88f
|
[
"MIT"
] | 2
|
2017-08-26T12:25:31.000Z
|
2021-06-01T19:35:25.000Z
|
cdam_convert_twine.py
|
jerrytron/twine-story-export
|
72574627969e8fd79ad246b7532874f223e2c88f
|
[
"MIT"
] | null | null | null |
cdam_convert_twine.py
|
jerrytron/twine-story-export
|
72574627969e8fd79ad246b7532874f223e2c88f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding=utf8
import os
import re
import sys
import struct
import pprint
import random
import argparse
import datetime
import tiddlywiki as tiddly
import cdam_gen_files as gen
import importlib
import bitarray
importlib.reload(sys)
# sys.setdefaultencoding('utf8')
VERSION = "1.0"
BINARY_VER = "1.0.5"
# For holding binary variable keys and values.
VARIABLES = {}
FLAGS = {}
TITLE_MAP = {}
STORY_MAP = {}
PASSAGES = {}
STORY_TITLE = ""
STORY_AUTHOR = ""
STORY_SUBTITLE = ""
STORY_CREDITS = ""
STORY_VERSION = ""
STORY_CONTACT = ""
STORY_LANGUAGE = ""
REPORT = ""
OPERATION_TEST = bytearray()
TOTAL_OPS = 0
VERBOSE = False
LINEAR = False
HTML = False
SEED = None
PP = pprint.PrettyPrinter(indent = 4)
kAppend = "<append>"
kContinue = "<continue>"
kContinueCopy = '<continue>'
kGotoTempTag = "-GOTO-"
if __name__ == '__main__':
#global _UPDATE
#global _FORCE
main()
| 34.325633
| 196
| 0.56538
|
#!/usr/bin/env python
# encoding=utf8
import os
import re
import sys
import struct
import pprint
import random
import argparse
import datetime
import tiddlywiki as tiddly
import cdam_gen_files as gen
import importlib
import bitarray
importlib.reload(sys)
# sys.setdefaultencoding('utf8')
VERSION = "1.0"
BINARY_VER = "1.0.5"
# For holding binary variable keys and values.
VARIABLES = {}
FLAGS = {}
TITLE_MAP = {}
STORY_MAP = {}
PASSAGES = {}
STORY_TITLE = ""
STORY_AUTHOR = ""
STORY_SUBTITLE = ""
STORY_CREDITS = ""
STORY_VERSION = ""
STORY_CONTACT = ""
STORY_LANGUAGE = ""
REPORT = ""
OPERATION_TEST = bytearray()
TOTAL_OPS = 0
VERBOSE = False
LINEAR = False
HTML = False
SEED = None
PP = pprint.PrettyPrinter(indent = 4)
kAppend = "<append>"
kContinue = "<continue>"
kContinueCopy = '<continue>'
kGotoTempTag = "-GOTO-"
class CDAMParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
class CDAMTwine(tiddly.Tiddler):
def GetPassages(self):
return self.tiddlers
def main():
global STORY_TITLE
global STORY_AUTHOR
global STORY_SUBTITLE
global STORY_CREDITS
global STORY_CONTACT
global STORY_LANGUAGE
global STORY_VERSION
global LINEAR
global HTML
# To Make a Linear Story:
# python ./cdam_convert_twine.py --title
parser = CDAMParser(description='CDAM Twine Source Code Converter')
parser.add_argument('--dirname', default='NONE', action='store', help='Directory name for story on the file system.')
parser.add_argument('--title', default='Untitled', action='store', help='The story title.')
parser.add_argument('--subtitle', default='NONE', action='store', help='The story subtitle.')
parser.add_argument('--author', default='Anonymous', action='store', help='The author of the story.')
parser.add_argument('--pubdate', default='', action='store', help='The date this story was published')
parser.add_argument('--credits', default='', action='store', help='Additional story credits.')
parser.add_argument('--contact', default='Follow creator @j3rrytron online!', action='store', help='Misc contact info.')
parser.add_argument('--lang', default='eng', action='store', help='Up to four character language code.')
parser.add_argument('--ver', default='0.0.0', action='store', help='Story version in three parts, x.x.x')
parser.add_argument('--source', default='', action='store', help='The Twine source code file.')
parser.add_argument('--output', default='./', action='store', help='The location to create the output files.')
parser.add_argument('--filename', default='', action='store', help='The output filename.')
parser.add_argument('--json', action='store_true', help='Output as a JSON text file.')
parser.add_argument('--linear', action='store_true', help='Output as a linear text file for humans.')
parser.add_argument('--html', action='store_true', help='Output as html document.')
parser.add_argument('--randseed', default='', action='store', help='Optional seed to control random output.')
parser.add_argument('--binary', action='store_true', help='Output as a CDAM binary for the Choosatron v2.')
parser.add_argument('--verbose', action='store_true', help='Print additional info, including warnings.')
parser.add_argument('--operation', action='store_true', help='Output operations file too for debugging.')
parser.add_argument('--family', action='store_true', help='Mark this story as family friendly.')
parser.add_argument('--vartext', action='store_true', help='This story uses variable text logic.')
parser.add_argument('--mono', action='store_true', help='This story requires a monospaced font.')
#parser.add_argument('--update', action='store_true', help='Attempt to safely add to/update existing files without damaging existing data.')
#parser.add_argument('--force', action='store_true', help='Overwrite files that already exist.')
args = parser.parse_args()
STORY_SUBTITLE = args.subtitle
STORY_CREDITS = args.credits
STORY_CONTACT = args.contact
STORY_LANGUAGE = args.lang
STORY_VERSION = args.ver
print("--- " + args.title + " ---")
if args.randseed:
SEED = int(args.randseed)
random.seed(SEED)
else:
SEED = datetime.datetime.now().microsecond
#print "Random Seed for " + args.title + ": " + str(SEED)
random.seed(SEED)
LINEAR = args.linear
HTML = args.html
if HTML:
LINEAR = True
# Uncomment to override output and place wherever source was.
#args.output = os.path.dirname(args.source)
VERBOSE = args.verbose
if VERBOSE:
print(args.title)
FLAGS['family_friendly'] = args.family
FLAGS['variable_text'] = args.vartext
FLAGS['retry'] = True # Just default true for now.
FLAGS['monospace'] = args.mono
storyWiki = LoadSource(args.source)
if storyWiki == False:
return
result = BuildCDAMStory(storyWiki)
if result == False:
return
if args.dirname.upper() in PASSAGES:
print("[ERROR] Value of --dirname can't be the same as a passage title. Passage already exists named: " + args.dirname.upper())
return
SimplifyNaming()
genFile = gen.CDAMGenFiles()
if args.binary == True:
if args.title != "Untitled":
STORY_TITLE = args.title
if args.author != "Anonymous":
STORY_AUTHOR = args.author
# Generate Story Body
storyBody = genFile.GenerateBody(STORY_MAP, PASSAGES, VARIABLES)
if storyBody == False:
return
if len(VARIABLES) == 0:
FLAGS['logic'] = False
else:
FLAGS['logic'] = True
if 'ending_quality' not in FLAGS:
FLAGS['ending_quality'] = False
if 'points' not in FLAGS:
FLAGS['points'] = False
# Currently no images are supported.
FLAGS['images'] = False
# Generate Story Header
storyHeader = genFile.GenerateHeader(args.lang, args.title, args.subtitle, args.author, args.pubdate, args.credits, args.contact, BINARY_VER, args.ver, FLAGS, len(storyBody), len(VARIABLES))
if storyHeader == False:
return
bookPath = STORY_TITLE.lower().replace(" ", "_") + "_BIN.dam"
if args.filename != "":
bookPath = args.filename + "_BIN.dam"
bookPath = os.path.join(args.output, bookPath)
if os.path.exists(bookPath):
os.remove(bookPath)
genFile.WriteToFile(bookPath, storyHeader + storyBody)
if args.operation:
opPath = STORY_TITLE.lower().replace(" ", "_") + "_OPS.dam"
if args.filename != "":
opPath = args.filename + "_OPS.dam"
opPath = os.path.join(args.output, opPath)
opData = bytearray()
opData += bytearray(struct.pack('<H', TOTAL_OPS))
opData += OPERATION_TEST
genFile.WriteToFile(opPath, opData)
elif args.linear == True:
if args.title != "Untitled":
STORY_TITLE = args.title
if args.author != "Anonymous":
STORY_AUTHOR = args.author
if HTML:
bookPath = STORY_TITLE.lower().replace(" ", "_") + "_LINEAR.html"
if args.filename != "":
bookPath = args.filename + "_LINEAR.html"
else:
bookPath = STORY_TITLE.lower().replace(" ", "_") + "_LINEAR.txt"
if args.filename != "":
bookPath = args.filename + "_LINEAR.txt"
bookPath = os.path.join(args.output, bookPath)
book = ""
if HTML:
# Look for an HTML header to insert.
sourcePath = os.path.dirname(args.source)
headerPath = os.path.join(sourcePath, "header.txt")
try:
file = open(headerPath, 'r')
book += file.read()
except IOError:
print("[WARNING] No HTML header found at: " + headerPath)
book += "Title: " + STORY_TITLE + "\nSubtitle: " + STORY_SUBTITLE + "\nAuthor: " + STORY_AUTHOR
book += "\nCredits: " + STORY_CREDITS + "\nContact: " + STORY_CONTACT + "\nLanguage: " + STORY_LANGUAGE + "\nVersion: " + STORY_VERSION + "\nSeed: " + str(SEED) + "\n\n\n"
psgList = []
newMap = {}
allKeys = list(PASSAGES.keys())
key = "0"
p = PASSAGES[key]
psgList.append(p)
allKeys.remove(key)
newMap[key] = key
index = 0
while len(allKeys) > 0:
index += 1
if "cs" in p and len(p["cs"]) == 1 and p["cs"][0]["link"] in allKeys:
p = PASSAGES[p["cs"][0]["link"]]
key = p["key"]
# Map from old to new index.
newMap[key] = str(index)
if key in allKeys:
allKeys.remove(key)
psgList.append(p)
else:
key = random.choice(allKeys)
# If this passage has a single entrance, that passage should be
# put in first.
if "ik" in PASSAGES[key]:
while len(PASSAGES[key]["ik"]) == 1:
# Keep tracing back until we find the first passage in a series
# of single paths, or until we hit a passage already used.
if PASSAGES[key]["ik"][0] in allKeys:
key = PASSAGES[key]["ik"][0]
else:
break
if key in allKeys:
allKeys.remove(key)
p = PASSAGES[key]
newMap[key] = str(index)
psgList.append(p)
index = 0
for psg in psgList:
book += linearPassageText(psg, newMap)
index += 1
if index < len(psgList):
book += "\n\n\n"
# Look for an HTML footer to insert.
if HTML:
sourcePath = os.path.dirname(args.source)
footerPath = os.path.join(sourcePath, "footer.txt")
try:
file = open(footerPath, 'r')
book += file.read()
#print book
except IOError:
print("[WARNING] No HTML footer found at: " + footerPath)
if os.path.exists(bookPath):
os.remove(bookPath)
genFile.WriteToFile(bookPath, book)
else:
result = False;
if args.json == False:
result = genFile.UpdateManifest(args.output, args.title, args.dirname, args.author, args.json)
if result == False:
print("[ERROR] Failed to update manifest.")
else:
result = args.dirname
result = genFile.BuildCDAMStory(result, STORY_MAP, PASSAGES, args.output, args.title, args.author, args.json)
if result == False:
print("[ERROR] Failed to build story.")
print("--- Complete! ---\n")
#print STORY_MAP
#print PASSAGES
def linearPassageText(aPassage, aMap):
global HTML
psgText = ""
goto = " (go to "
key = aMap[aPassage["key"]]
if HTML:
psgText += "<p class='paragraph'><span class='number'>" + "[" + key + "] </span>" + aPassage['pt'] + "</p>"
psgText += "\n"
else:
psgText += "[" + key + "] " + aPassage['pt']
if aPassage['en'] == True:
psgText += "\n--- THE END ---"
#if aPassage['eq'] == 1:
# psgText += "\n* - THE END"#* Oh no! Better luck next adventure. * - THE END"
#elif aPassage['eq'] == 2:
# psgText += "\n** - THE END"#** I'm sure you can do better. ** - THE END"
#elif aPassage['eq'] == 3:
# psgText += "\n*** - THE END"#*** You win some, you lose some. *** - THE END"
#elif aPassage['eq'] == 4:
# psgText += "\n**** - THE END"#**** Not too bad! **** - THE END"
#elif aPassage['eq'] == 5:
# psgText += "\n***** - THE END"#***** Congratulations! You sure know your stuff. ***** - THE END"
else:
choiceText = ""
if HTML == False:
# Add a delimeter so we know it is done
choiceText += "\n---"
for choice in aPassage['choices']:
m = re.search(kGotoTempTag, psgText)
if HTML:
if psgText[m.start() - 1] == '\n':
choiceText += ("<span class='choice-title choice-standalone'>" + choice['text'] + "</span>" + "<span class='goto'>" + goto + aMap[choice['link']] + ")</span>")
else:
choiceText += ("<span class='choice-title'>" + choice['text'] + "</span>" + "<span class='goto'>" + goto + aMap[choice['link']] + ")</span>")
else:
choiceText += ("\n- " + choice['text'] + goto + aMap[choice['link']] + ")")
psgText = re.sub(kGotoTempTag, choiceText, psgText, 1);
choiceText = ""
return psgText
def linearPassageTextFull(aPassages, aStoryMap, aKey):
psgText = ""
goto = " (go to "
p = aPassages[aKey]
m = aStoryMap[aKey]
psgText += "[" + aKey + "] " + p['pt']
# Add a delimeter so we know it is done
psgText += "\n---"
if p['en'] == True:
if p['eq'] == 1:
psgText += "\n* - THE END"#* Oh no! Better luck next adventure. * - THE END"
elif p['eq'] == 2:
psgText += "\n** - THE END"#** I'm sure you can do better. ** - THE END"
elif p['eq'] == 3:
psgText += "\n*** - THE END"#*** You win some, you lose some. *** - THE END"
elif p['eq'] == 4:
psgText += "\n**** - THE END"#**** Not too bad! **** - THE END"
elif p['eq'] == 5:
psgText += "\n***** - THE END"#***** Congratulations! You sure know your stuff. ***** - THE END"
else:
if len(p['cs']) == 1:
psgText += ("\n- " + p['cs'][0] + goto + m[0] + ")")
else:
for index in range(0, len(p['cs'])):
psgText += ("\n- " + p['cs'][index] + goto + m[index] + ")")
return psgText
def twineBuild(storySource, path, storyDir, title, author):
STORY_MAP.clear()
PASSAGES.clear()
result = BuildCDAMStory(storySource)
if result == False:
return
SimplifyNaming()
genFile = gen.CDAMGenFiles()
result = genFile.UpdateManifest(path, title, storyDir, author)
if result == False:
print("[ERROR] Failed to update manifest.")
result = genFile.BuildCDAMStory(storyDir, STORY_MAP, PASSAGES, path, title, author)
if result == False:
print("[ERROR] Failed to build story.")
def LoadSource(path):
try:
file = open(path, 'r')
except IOError:
print("[ERROR] File not found: " + path)
return False
sourceStr = file.read()
file.close()
# Start reading from the first ':' character
index = 0
for char in sourceStr:
if char == ':':
break
index += 1
sourceStr = sourceStr[index:]
wiki = tiddly.TiddlyWiki()
wiki.addTwee(sourceStr)
return wiki
def BuildCDAMStory(wiki):
global STORY_TITLE
global STORY_AUTHOR
global LINEAR
for key in list(wiki.tiddlers.keys()):
upKey = key.strip().upper()
if upKey not in list(wiki.tiddlers.keys()):
wiki.tiddlers[upKey] = wiki.tiddlers[key]
del wiki.tiddlers[key]
for key in wiki.tiddlers:
if wiki.tiddlers[key].title == "StoryTitle":
if STORY_TITLE == "":
STORY_TITLE = wiki.tiddlers[key].text
continue
if wiki.tiddlers[key].title == "StorySubtitle":
continue
if wiki.tiddlers[key].title == "StoryAuthor":
if STORY_AUTHOR == "":
STORY_AUTHOR = wiki.tiddlers[key].text
continue
#print "Passage: " + key
passage = ParseForAttributes(wiki.tiddlers[key].tags)
if passage == False:
continue
# Is this the starting passage?
if key == "START":
if "ps" not in passage:
passage["ps"] = 0
if "cp" not in passage:
passage["cp"] = 0
if "sv" not in passage:
passage["sv"] = "1.0"
else:
if "ps" in passage:
if VERBOSE:
print("[WARNING] Only set perfect score ('ps' or 'perfect') in the story passage titled 'Start'.")
del passage["ps"]
if "cp" in passage:
if VERBOSE:
print("[WARNING] Only set continue penalty ('cp' or 'penalty') in the story passage titled 'Start'.")
del passage["cp"]
if "sv" in passage:
if VERBOSE:
print("[WARNING] Only set story version ('sv' or 'version') in the story passage titled 'Start'.")
del passage["sv"]
passage["pv"] = VERSION
if "pp" not in passage:
passage["pp"] = 0
else:
# Set the 'points' flag.
FLAGS['points'] = True
rss = wiki.tiddlers[key].toRss()
choicePairs = ParseForChoices(rss.description)
#PP.pprint(choicePairs) # Print pretty!
passage["pt"] = ParseForBody(rss.description)
if type(choicePairs) is bool:
# No choices in this passage.
if choicePairs == True:
if "eq" not in passage:
if VERBOSE:
print("[WARNING] Ending quality 'eq' not set for " + key)
# Default to average.
passage["eq"] = 3
else:
# Set the 'ending quality' flag.
FLAGS['ending_quality'] = True
STORY_MAP[key] = passage["eq"]
passage["en"] = True
if "cc" not in passage:
passage["cc"] = True
else:
print("[ERROR] Failed to parse for choices.")
return False
if type(choicePairs) is list:
nodes = []
choices = []
for item in choicePairs:
nodes.append(item['link'].strip().upper())
choices.append(item['text'])
if ValidateChoices(wiki.tiddlers, nodes) == False:
print("[ERROR] Failed to validate choices for node.")
return False
else:
STORY_MAP[key] = nodes
passage["en"] = False
#passage["cs"] = choices
#passage["ck"] = nodes
passage["cs"] = choicePairs
#print "Validating passage for node " + key
if ValidatePassage(passage) == False:
print("[ERROR] Failed to validate passage.")
return False
else:
PASSAGES[key] = passage
#print PASSAGES
def ParseOperation(opParts, iteration):
global REPORT
data = bytearray()
REPORT += "( "
types = ""
leftName = ""
rightName = ""
command = opParts.pop(0)
leftType = opParts.pop(0)
leftValue = bytearray()
rightValue = bytearray()
#print "Command: " + command
#print "LeftType: " + leftType
if leftType == "cmd":
types += "0011"
leftValue = ParseOperation(opParts, iteration + 1)
REPORT += " " + command + " "
else:
tempValue = opParts.pop(0)
if leftType == "var":
#print tempValue
leftName = tempValue
types += "0010"
if leftName not in VARIABLES:
VARIABLES[leftName] = len(VARIABLES)
REPORT += leftName + "[" + str(VARIABLES[leftName]) + "] " + command + " "
#print "Var #: " + str(VARIABLES[leftName])
leftValue = bytearray(struct.pack('<H', VARIABLES[leftName]))
else:
types += "0001"
leftValue = bytearray(struct.pack('<H', int(tempValue)))
REPORT += str(tempValue) + " " + command + " "
#print str(leftValue)
rightType = opParts.pop(0)
#print "RightType: " + rightType
rightPrintVal = 0
if rightType == "cmd":
types += "0011"
rightValue = ParseOperation(opParts, iteration + 1)
else:
tempValue = opParts.pop(0)
if rightType == "var":
#print tempValue
rightName = tempValue
types += "0010"
if rightName not in VARIABLES:
VARIABLES[rightName] = len(VARIABLES)
#print "Index: " + str(VARIABLES[rightName])
rightValue = bytearray(struct.pack('<H', VARIABLES[rightName]))
else:
types += "0001"
rightValue = bytearray(struct.pack('<H', int(tempValue)))
rightPrintVal = tempValue
#print str(rightValue)
data += bitarray.bitarray(types)
if command == "equal" or command == "==":
data += bytes.fromhex('01')
elif command == "not_equal" or command == "!=":
data += bytes.fromhex('02')
elif command == "greater" or command == ">":
data += bytes.fromhex('03')
elif command == "less" or command == "<":
data += bytes.fromhex('04')
elif command == "greater_equal" or command == ">=":
data += bytes.fromhex('05')
elif command == "less_equal" or command == "<=":
data += bytes.fromhex('06')
elif command == "and":
data += bytes.fromhex('07')
elif command == "or":
data += bytes.fromhex('08')
elif command == "xor":
data += bytes.fromhex('09')
elif command == "nand":
data += bytes.fromhex('0A')
elif command == "nor":
data += bytes.fromhex('0B')
elif command == "xnor":
data += bytes.fromhex('0C')
elif command == "visible":
data += bytes.fromhex('0D')
elif command == "mod" or command == "%":
data += bytes.fromhex('0E')
elif command == "set" or command == "=":
data += bytes.fromhex('0F')
elif command == "plus" or command == "add" or command == "+":
data += bytes.fromhex('10')
elif command == "minus" or command == "-":
data += bytes.fromhex('11')
elif command == "multiply" or command == "mult" or command == "*":
data += bytes.fromhex('12')
elif command == "divide" or command == "/":
data += bytes.fromhex('13')
elif command == "rand" or command == "random":
data += bytes.fromhex('14')
elif command == "dice" or command == "roll":
data += bytes.fromhex('15')
elif command == "if":
data += bytes.fromhex('16')
if rightType == "var":
REPORT += rightName + "[" + str(VARIABLES[rightName]) + "]"
elif rightType == "raw":
REPORT += str(rightPrintVal)
REPORT += " )"
data += leftValue
data += rightValue
return data
def ParseForAttributes(tags):
global REPORT
global OPERATION_TEST
global TOTAL_OPS
attributes = {}
attributes["vu"] = []
attributes["cvu"] = {}
#attributes["cvu"]["totalBytes"] = 0
attributes["cdc"] = {}
for attr in tags:
attr = attr.lower()
if attr == "ignore":
return False
#print attr
pair = attr.split(':')
#if pair[0] == "vars":
# pair.pop(0)
# for var in pair:
# varSet = var.split('|')
# VARIABLES[varSet[0]] = { "default" : varSet[1], "index" : len(VARIABLES) }
if pair[0] == "vu":
print(pair)
pair.pop(0)
REPORT = ""
data = bytearray()
data = ParseOperation(pair, 0)
print(":".join("{:02x}".format(ord(chr(c))) for c in data))
OPERATION_TEST += data
TOTAL_OPS += 1
print(REPORT)
#updates = { "operation" : pair[1], "leftType" : pair[2], "leftValue" : pair[3], "rightType" : pair[4], "rightValue" : pair[5] }
#if updates["leftType"] == "var":
# if updates["leftValue"] not in VARIABLES:
# print "New var added: " + updates["leftValue"]
# VARIABLES[updates["leftValue"]] = { "default" : 0, "index" : len(VARIABLES) }
#if updates["rightType"] == "var":
# if updates["rightValue"] not in VARIABLES:
# print "New var added: " + updates["rightValue"]
# VARIABLES[updates["rightValue"]] = { "default" : 0, "index" : len(VARIABLES) }
attributes["vu"].append(data)
elif pair[0] == "choice":
pair.pop(0)
index = int(pair.pop(0)) - 1
if attributes["cvu"].setdefault(index, None) == None:
attributes["cvu"][index] = { "data" : [], "totalBytes" : 0}
opType = pair.pop(0)
REPORT = ""
data = bytearray()
data = ParseOperation(pair, 0)
OPERATION_TEST += data
TOTAL_OPS += 1
#attributes["cvu"]["totalBytes"] = 0
#components = { "valueOne" : pair[3], "operation" : pair[4], "valueTwoType" : pair[5], "valueTwo" : pair[6] }
if opType == "vu": # Value updates
print("[VU] " + str(index) + " : " + REPORT)
#if attributes["cvu"].setdefault(index, None) == None:
#print "Fresh Choice: " + str(index)
#attributes["cvu"][index] = { "data" : [], "totalBytes" : 0}
#attributes["cvu"][index]["data"] = bytearray()
#attributes["cvu"][index]["totalBytes"] = 0
attributes["cvu"][index]["data"].append(data)
attributes["cvu"][index]["totalBytes"] += len(data)
elif opType == "dc": # Display conditionals
print("[DC] " + str(index) + " : " + REPORT)
attributes["cdc"].setdefault(index, []).append(data)
elif len(pair) == 2:
# Set Default Values
if pair[0] == "pp" or pair[0] == "points":
attributes["pp"] = int(pair[1])
elif pair[0] == "eq" or pair[0] == "quality":
attributes["eq"] = int(pair[1])
elif pair[0] == "cc" or pair[0] == "continue":
if pair[1] in ['true', '1', 't']:
attributes["cc"] = True
elif pair[1] in ['false', '0', 'f']:
attributes["cc"] = False
else:
if VERBOSE:
print("[WARNING] Invalid boolean value provided for tag: " + pair[0])
elif pair[0] == "ps" or pair[0] == "perfect":
attributes["ps"] = int(pair[1])
elif pair[0] == "cp" or pair[0] == "penalty":
attributes["cp"] = int(pair[1])
elif pair[0] == "lc" or pair[0] == "color":
if VERBOSE:
print("[WARNING] Color not currently supported.")
#attributes["lc"] = int(pair[1])
elif pair[0] == "sn" or pair[0] == "sound":
if VERBOSE:
print("[WARNING] Sound not currently supported.")
#attributes["sn"] = int(pair[1])
elif pair[0] == "sv" or pair[0] == "version":
attributes["sv"] = pair[1]
return attributes
def ParseForChoices(bodyText):
global LINEAR
global HTML
# Cleanse choices of carriage returns.
bodyText = bodyText.replace('\r', '\n')
if HTML:
bodyText = bodyText.replace('\n\n', '<br>\n')
#else:
#bodyText = bodyText.replace('\n\n', '\n')
choices = []
# Search for either [[Choice Text|Choice Key]] or [[Choice Key]] and warn about missing text.
matchCount = len(re.findall(r"\n*\[\[([^\[\]|]+)(?:\|([\w\d\s]+))?\]\]", bodyText))
for index in range(0, matchCount):
m = re.search(r"\n*\[\[([^\[\]|]+)(?:\|([\w\d\s]+))?\]\]", bodyText)
#for m in re.finditer(r"\[\[([^\[\]|]+)(?:\|([\w\d\s]+))?\]\]", text):
# For [[Run away.|1B]], m.group(0) is whole match, m.group(1) = 'Run away.', and m.group(2) = '1B'
# For [[Run away.]], same but there is no m.group(2)
choice = {}
choice['index'] = m.start()
choice['length'] = m.end() - m.start()
text = m.group(1)
link = m.group(2)
# No link means copy text & link text are the same.
if not link:
link = text
# Link is meant for auto-jumping.
if text.lower() == kAppend:
if len(choices) == 0:
# If only a choice key, label it for an auto jump to the passage.
if LINEAR:
text = "Continue..."
else:
text = "*"
else:
print("[ERROR] Can only have a single auto-jump choice per passage.")
return False
elif text.lower() == kContinue:
text = kContinueCopy # Set to <continue>
elif text.lower() == 'continue':
text = kContinueCopy # Set to <continue>
elif text.lower() == 'continue...':
text = kContinueCopy # Set to <continue>
choice['link'] = link.strip().upper()
choice['text'] = text.strip()
choices.append(choice)
replaceChoices = ""
if LINEAR:
replaceChoices = kGotoTempTag
bodyText = re.sub(r"\n*\s*\[\[([^\[\]|]+)(?:\|([\w\d\s]+))?\]\]\s*", replaceChoices, bodyText, 1)
if len(choices) == 0:
return True
return choices
def ParseForBody(text):
global LINEAR
global HTML
# Cleanse of carriage returns (but leave newlines!).
#
body = text
body = body.replace('\r', '\n')
if HTML:
body = body.replace('\n\n', '<br>\n')
#else:
#body = body.replace('\n\n', '\n')
replaceChoices = ""
if LINEAR:
replaceChoices = kGotoTempTag
body = re.sub(r"\n*\s*\[\[([^\[\]|]+)(?:\|([\w\d\s]+))?\]\]\s*", replaceChoices, text)
return body
def ValidateChoices(tiddlers, nodes):
#print tiddlers
for node in nodes:
if node not in tiddlers:
#print tiddlers
print("[ERROR] Choice key found without matching passage: " + node)
return False
return True
def ValidatePassage(passage):
if "cc" in passage:
if passage["cc"] == True and passage["en"] == False:
if VERBOSE:
print("[WARNING] Continue flag useless if a passage isn't an ending. Setting False.")
passage["cc"] = False
elif passage["cc"] == True and passage["eq"] == 5:
#print "[WARNING] Continue flag should be false if ending quality is 5."
passage["cc"] = False
if passage["en"] == True and "eq" not in passage:
print("[ERROR] Ending Quality (eq|quality) missing from ending passage.")
return False
if "eq" in passage:
if passage["eq"] > 5 or passage["eq"] < 1:
print("[ERROR] Ending Quality (eq|quality) value outside range of 1-5.")
return False
if passage["pp"] > 255 or passage["pp"] < 0:
print("[ERROR] Points (pp|points) value outside range of 0-255.")
return False
def SimplifyNaming():
i = 1
newMap = STORY_MAP.copy()
STORY_MAP.clear()
newPassages = PASSAGES.copy()
PASSAGES.clear()
for titleKey in newMap:
upTitleKey = titleKey.strip().upper()
if upTitleKey != "START":
# Create a map from all passage titles to its new numbered title.
TITLE_MAP[upTitleKey] = str(i)
i += 1
else:
TITLE_MAP["START"] = "0"
for titleKey in newMap:
upTitleKey = titleKey.strip().upper()
if type(newMap[upTitleKey]) is list:
i = 0
for val in newMap[upTitleKey]:
# Links always referenced in uppercase.
#print "HERE: " + titlekey + " : " + i
newMap[upTitleKey][i] = TITLE_MAP[val.strip().upper()]
i += 1
STORY_MAP[TITLE_MAP[upTitleKey]] = newMap[upTitleKey]
PASSAGES[TITLE_MAP[upTitleKey]] = newPassages[upTitleKey]
PASSAGES[TITLE_MAP[upTitleKey]]['key'] = TITLE_MAP[upTitleKey]
# Create array for all incoming links on a passage.
for key in PASSAGES:
psg = PASSAGES[key]
if "cs" in psg and len(psg["cs"]) > 0:
for choice in psg["cs"]:
choice["link"] = TITLE_MAP[choice["link"].strip().upper()]
psgKey = choice["link"].strip().upper()
if "ik" not in PASSAGES[psgKey]:
PASSAGES[psgKey]["ik"] = [""]
PASSAGES[psgKey]["ik"].append(psg["key"])
if __name__ == '__main__':
#global _UPDATE
#global _FORCE
main()
| 29,872
| 32
| 395
|
757f80125b8c8f5468871f3caa0abaecb1d48b89
| 3,847
|
py
|
Python
|
autolab_core/primitives.py
|
SnehalD14/autolab_core
|
c271f1f84283ab5d368618eb85754a549aeae4a3
|
[
"Apache-2.0"
] | 23
|
2021-04-02T09:02:04.000Z
|
2022-03-22T05:31:03.000Z
|
autolab_core/primitives.py
|
SnehalD14/autolab_core
|
c271f1f84283ab5d368618eb85754a549aeae4a3
|
[
"Apache-2.0"
] | 35
|
2021-04-12T09:41:05.000Z
|
2022-03-26T13:32:46.000Z
|
autolab_core/primitives.py
|
SnehalD14/autolab_core
|
c271f1f84283ab5d368618eb85754a549aeae4a3
|
[
"Apache-2.0"
] | 16
|
2021-03-30T11:55:45.000Z
|
2022-03-30T07:10:59.000Z
|
"""
Common geometric primitives.
Author: Jeff Mahler
"""
import numpy as np
class Box(object):
"""A 2D box or 3D rectangular prism.
Attributes
----------
dims : :obj:`numpy.ndarray` of float
Maximal extent in x, y, and (optionally) z.
width : float
Maximal extent in x.
height : float
Maximal extent in y.
area : float
Area of projection onto xy plane.
min_pt : :obj:`numpy.ndarray` of float
The minimum x, y, and (optionally) z points.
max_pt : :obj:`numpy.ndarray` of float
The maximum x, y, and (optionally) z points.
center : :obj:`numpy.ndarray` of float
The center of the box in 2 or 3D coords.
frame : :obj:`str`
The frame in which this box is placed.
"""
def __init__(self, min_pt, max_pt, frame='unspecified'):
"""Initialize a box.
Parameters
----------
min_pt : :obj:`numpy.ndarray` of float
The minimum x, y, and (optionally) z points.
max_pt : :obj:`numpy.ndarray` of float
The maximum x, y, and (optionally) z points.
frame : :obj:`str`
The frame in which this box is placed.
Raises
------
ValueError
If max_pt is not strictly larger than min_pt in all dims.
"""
if np.any((max_pt - min_pt) < 0):
raise ValueError('Min point must be smaller than max point')
self._min_pt = min_pt
self._max_pt = max_pt
self._frame = frame
@property
def dims(self):
""":obj:`numpy.ndarray` of float: Maximal extent in x, y, and (optionally) z
"""
return self._max_pt - self._min_pt
@property
def width(self):
"""float: Maximal extent in x.
"""
return int(np.round(self.dims[1]))
@property
def height(self):
"""float: Maximal extent in y.
"""
return int(np.round(self.dims[0]))
@property
def area(self):
"""float: Area of projection onto xy plane.
"""
return self.width * self.height
@property
def min_pt(self):
""":obj:`numpy.ndarray` of float: The minimum x, y, and (optionally) z points.
"""
return self._min_pt
@property
def max_pt(self):
""":obj:`numpy.ndarray` of float: The maximum x, y, and (optionally) z points.
"""
return self._max_pt
@property
def center(self):
""":obj:`numpy.ndarray` of float: The center of the box in 2 or 3D coords.
"""
return self.min_pt + self.dims / 2.0
@property
def ci(self):
"""float value of center i coordinate"""
return self.center[0]
@property
def cj(self):
"""float value of center j coordinate"""
return self.center[1]
@property
def frame(self):
""":obj:`str`: The frame in which this box is placed.
"""
return self._frame
class Contour(object):
""" A set of pixels forming the boundary of an object of interest in an image.
Attributes
----------
boundary_pixels : :obj:`numpy.ndarray`
Nx2 array of pixel coordinates on the boundary of a contour
bounding_box : :obj:`Box`
smallest box containing the contour
area : float
area of the contour
num_pixels : int
number of pixels along the boundary
"""
@property
| 26.531034
| 86
| 0.570315
|
"""
Common geometric primitives.
Author: Jeff Mahler
"""
import numpy as np
class Box(object):
"""A 2D box or 3D rectangular prism.
Attributes
----------
dims : :obj:`numpy.ndarray` of float
Maximal extent in x, y, and (optionally) z.
width : float
Maximal extent in x.
height : float
Maximal extent in y.
area : float
Area of projection onto xy plane.
min_pt : :obj:`numpy.ndarray` of float
The minimum x, y, and (optionally) z points.
max_pt : :obj:`numpy.ndarray` of float
The maximum x, y, and (optionally) z points.
center : :obj:`numpy.ndarray` of float
The center of the box in 2 or 3D coords.
frame : :obj:`str`
The frame in which this box is placed.
"""
def __init__(self, min_pt, max_pt, frame='unspecified'):
"""Initialize a box.
Parameters
----------
min_pt : :obj:`numpy.ndarray` of float
The minimum x, y, and (optionally) z points.
max_pt : :obj:`numpy.ndarray` of float
The maximum x, y, and (optionally) z points.
frame : :obj:`str`
The frame in which this box is placed.
Raises
------
ValueError
If max_pt is not strictly larger than min_pt in all dims.
"""
if np.any((max_pt - min_pt) < 0):
raise ValueError('Min point must be smaller than max point')
self._min_pt = min_pt
self._max_pt = max_pt
self._frame = frame
@property
def dims(self):
""":obj:`numpy.ndarray` of float: Maximal extent in x, y, and (optionally) z
"""
return self._max_pt - self._min_pt
@property
def width(self):
"""float: Maximal extent in x.
"""
return int(np.round(self.dims[1]))
@property
def height(self):
"""float: Maximal extent in y.
"""
return int(np.round(self.dims[0]))
@property
def area(self):
"""float: Area of projection onto xy plane.
"""
return self.width * self.height
@property
def min_pt(self):
""":obj:`numpy.ndarray` of float: The minimum x, y, and (optionally) z points.
"""
return self._min_pt
@property
def max_pt(self):
""":obj:`numpy.ndarray` of float: The maximum x, y, and (optionally) z points.
"""
return self._max_pt
@property
def center(self):
""":obj:`numpy.ndarray` of float: The center of the box in 2 or 3D coords.
"""
return self.min_pt + self.dims / 2.0
@property
def ci(self):
"""float value of center i coordinate"""
return self.center[0]
@property
def cj(self):
"""float value of center j coordinate"""
return self.center[1]
@property
def frame(self):
""":obj:`str`: The frame in which this box is placed.
"""
return self._frame
class Contour(object):
""" A set of pixels forming the boundary of an object of interest in an image.
Attributes
----------
boundary_pixels : :obj:`numpy.ndarray`
Nx2 array of pixel coordinates on the boundary of a contour
bounding_box : :obj:`Box`
smallest box containing the contour
area : float
area of the contour
num_pixels : int
number of pixels along the boundary
"""
def __init__(self, boundary_pixels, area=0.0, frame='unspecified'):
self.boundary_pixels = boundary_pixels.squeeze()
self.bounding_box = Box(np.min(self.boundary_pixels, axis=0),
np.max(self.boundary_pixels, axis=0),
frame)
self.area = area
@property
def num_pixels(self):
return self.boundary_pixels.shape[0]
| 352
| 0
| 52
|
f174753cba9198ba54b664cbc54c27b45c67aedf
| 1,699
|
py
|
Python
|
test/test_events.py
|
klevio/python-sparkpost
|
007fb26ff5d046a639a88273265fd0775573a8e2
|
[
"Apache-2.0"
] | null | null | null |
test/test_events.py
|
klevio/python-sparkpost
|
007fb26ff5d046a639a88273265fd0775573a8e2
|
[
"Apache-2.0"
] | null | null | null |
test/test_events.py
|
klevio/python-sparkpost
|
007fb26ff5d046a639a88273265fd0775573a8e2
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import responses
from sparkpost import SparkPost
from sparkpost.exceptions import SparkPostAPIException
@responses.activate
@responses.activate
@responses.activate
@responses.activate
| 25.742424
| 75
| 0.616245
|
import pytest
import responses
from sparkpost import SparkPost
from sparkpost.exceptions import SparkPostAPIException
@responses.activate
def test_success_events_message():
responses.add(
responses.GET,
'https://api.sparkpost.com/api/v1/events/message',
status=200,
content_type='application/json',
body='{"results": []}'
)
sp = SparkPost('fake-key')
results = sp.events.message.list()
assert results == []
@responses.activate
def test_fail_events_message():
responses.add(
responses.GET,
'https://api.sparkpost.com/api/v1/events/message',
status=500,
content_type='application/json',
body="""
{"errors": [{"message": "You failed", "description": "More Info"}]}
"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost('fake-key')
sp.events.message.list()
@responses.activate
def test_success_events_ingest():
responses.add(
responses.GET,
'https://api.sparkpost.com/api/v1/events/ingest',
status=200,
content_type='application/json',
body='{"results": []}'
)
sp = SparkPost('fake-key')
results = sp.events.ingest.list()
assert results == []
@responses.activate
def test_fail_events_ingest():
responses.add(
responses.GET,
'https://api.sparkpost.com/api/v1/events/ingest',
status=500,
content_type='application/json',
body="""
{"errors": [{"message": "You failed", "description": "More Info"}]}
"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost('fake-key')
sp.events.ingest.list()
| 1,404
| 0
| 88
|
76276d63683f2ee0d6c3b6270c2b69939a2ed7ab
| 195
|
py
|
Python
|
src/util.py
|
sajtizsolt/dumas
|
4b7e307535bcc93a75784449bc44055d6dd0730b
|
[
"MIT"
] | 3
|
2021-08-17T08:14:40.000Z
|
2021-09-05T10:21:11.000Z
|
src/util.py
|
sajtizsolt/dumas
|
4b7e307535bcc93a75784449bc44055d6dd0730b
|
[
"MIT"
] | null | null | null |
src/util.py
|
sajtizsolt/dumas
|
4b7e307535bcc93a75784449bc44055d6dd0730b
|
[
"MIT"
] | null | null | null |
import sys
| 19.5
| 43
| 0.74359
|
import sys
def print_exception_and_exit():
print_message_and_exit(sys.exc_info()[1])
def print_message_and_exit(message):
print('\n Error:')
print(message, file=sys.stderr)
sys.exit()
| 138
| 0
| 46
|
7f796b84e36ede142a0744e292e9d72736a1a043
| 1,984
|
py
|
Python
|
3.7.0/lldb-3.7.0.src/test/functionalities/plugins/commands/TestPluginCommands.py
|
androm3da/clang_sles
|
2ba6d0711546ad681883c42dfb8661b842806695
|
[
"MIT"
] | 3
|
2016-02-10T14:18:40.000Z
|
2018-02-05T03:15:56.000Z
|
3.7.0/lldb-3.7.0.src/test/functionalities/plugins/commands/TestPluginCommands.py
|
androm3da/clang_sles
|
2ba6d0711546ad681883c42dfb8661b842806695
|
[
"MIT"
] | 1
|
2016-02-10T15:40:03.000Z
|
2016-02-10T15:40:03.000Z
|
3.7.0/lldb-3.7.0.src/test/functionalities/plugins/commands/TestPluginCommands.py
|
androm3da/clang_sles
|
2ba6d0711546ad681883c42dfb8661b842806695
|
[
"MIT"
] | null | null | null |
"""
Test that plugins that load commands work correctly.
"""
import os, time
import re
import unittest2
import lldb
from lldbtest import *
import lldbutil
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| 29.176471
| 122
| 0.673387
|
"""
Test that plugins that load commands work correctly.
"""
import os, time
import re
import unittest2
import lldb
from lldbtest import *
import lldbutil
class PluginCommandTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
self.lib_dir = os.environ["LLDB_LIB_DIR"]
self.implib_dir = os.environ["LLDB_IMPLIB_DIR"]
@expectedFailureFreeBSD('llvm.org/pr17430')
@skipIfNoSBHeaders
@skipIfHostIncompatibleWithRemote # Requires a compatible arch and platform to link against the host's built lldb lib.
def test_load_plugin(self):
"""Test that plugins that load commands work correctly."""
plugin_name = "plugin"
if sys.platform.startswith("darwin"):
plugin_lib_name = "lib%s.dylib" % plugin_name
else:
plugin_lib_name = "lib%s.so" % plugin_name
# Invoke the library build rule.
self.buildLibrary("plugin.cpp", plugin_name)
debugger = lldb.SBDebugger.Create()
retobj = lldb.SBCommandReturnObject()
retval = debugger.GetCommandInterpreter().HandleCommand("plugin load %s" % plugin_lib_name, retobj)
retobj.Clear()
retval = debugger.GetCommandInterpreter().HandleCommand("plugin_loaded_command child abc def ghi",retobj)
if self.TraceOn():
print retobj.GetOutput()
self.expect(retobj,substrs = ['abc def ghi'], exe=False)
retobj.Clear()
# check that abbreviations work correctly in plugin commands.
retval = debugger.GetCommandInterpreter().HandleCommand("plugin_loaded_ ch abc def ghi",retobj)
if self.TraceOn():
print retobj.GetOutput()
self.expect(retobj,substrs = ['abc def ghi'], exe=False)
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| 162
| 1,485
| 23
|
d32e2039cde94b64ddb6c5940d4f74d989163e2b
| 1,923
|
py
|
Python
|
ros2/code_map_localization/code_map_localization/webcam.py
|
wzli/CodeMapLocalization
|
613c021cccbcb4c0f1d42252a9bcb6396b230bea
|
[
"MIT"
] | 2
|
2020-07-12T16:02:20.000Z
|
2020-09-06T14:08:43.000Z
|
ros2/code_map_localization/code_map_localization/webcam.py
|
wzli/CodeMapLocalization
|
613c021cccbcb4c0f1d42252a9bcb6396b230bea
|
[
"MIT"
] | null | null | null |
ros2/code_map_localization/code_map_localization/webcam.py
|
wzli/CodeMapLocalization
|
613c021cccbcb4c0f1d42252a9bcb6396b230bea
|
[
"MIT"
] | null | null | null |
import rclpy
from rclpy.node import Node
from geometry_msgs.msg import PoseStamped
from code_map_localization_msgs.msg import Localization
from .convert_message import convert_to_ros_msgs
from codemap.webcam import WebCamLocalization
import ctypes
import time
libcodemap = ctypes.cdll.LoadLibrary('libcodemap.so')
if __name__ == '__main__':
main()
| 36.283019
| 78
| 0.702028
|
import rclpy
from rclpy.node import Node
from geometry_msgs.msg import PoseStamped
from code_map_localization_msgs.msg import Localization
from .convert_message import convert_to_ros_msgs
from codemap.webcam import WebCamLocalization
import ctypes
import time
libcodemap = ctypes.cdll.LoadLibrary('libcodemap.so')
class CodeMapLocalizationWebcam(Node):
def __init__(self):
super().__init__('code_map_localization')
# create publisher
self.pose_publisher = self.create_publisher(PoseStamped, 'pose', 1)
self.localization_publisher = self.create_publisher(
Localization, 'localization', 1)
# parse serial device
capture_device = self.declare_parameter("capture_device")
capture_device = capture_device._value if capture_device else 0
# parse frame_id
self.frame_id = self.declare_parameter("frame_id")
self.frame_id = self.frame_id._value if self.frame_id else 'map'
# ceate webcam stream
self.webcam_stream = WebCamLocalization(libcodemap, capture_device)
# print start message
self.get_logger().info(
f'Webcam Localization started on capture device {capture_device}')
# poll every 10ms
self.timer = self.create_timer(0.03, self.rx_timer_callback)
self.start_time = time.time()
def rx_timer_callback(self):
loc_msg = self.webcam_stream.update()
if loc_msg is not None:
loc_msg['timestamp'] = int((time.time() - self.start_time) * 1000)
pose_msg, localization_msg = convert_to_ros_msgs(loc_msg)
pose_msg.header.frame_id = self.frame_id
self.pose_publisher.publish(pose_msg)
self.localization_publisher.publish(localization_msg)
def main(args=None):
rclpy.init(args=args)
node = CodeMapLocalizationWebcam()
rclpy.spin(node)
if __name__ == '__main__':
main()
| 1,450
| 17
| 99
|
598d337979ee892594fed1712e2db68a0df4498d
| 2,017
|
py
|
Python
|
snakypy/dotctrl/actions/unlink.py
|
williamcanin/dotctrl
|
c3d8f07efce777cf67c478e96a03afbe37c0107e
|
[
"MIT"
] | 6
|
2021-04-20T23:17:28.000Z
|
2022-01-29T21:17:00.000Z
|
snakypy/dotctrl/actions/unlink.py
|
williamcanin/dotctrl
|
c3d8f07efce777cf67c478e96a03afbe37c0107e
|
[
"MIT"
] | 5
|
2021-05-27T11:33:45.000Z
|
2021-06-28T08:03:00.000Z
|
snakypy/dotctrl/actions/unlink.py
|
williamcanin/dotctrl
|
c3d8f07efce777cf67c478e96a03afbe37c0107e
|
[
"MIT"
] | 1
|
2021-06-23T05:03:33.000Z
|
2021-06-23T05:03:33.000Z
|
from contextlib import suppress
from os import remove
from os.path import islink, join
from sys import exit
from snakypy.helpers import FG, printer
from snakypy.dotctrl.config.base import Base
from snakypy.dotctrl.utils import check_init, join_two, listing_files, rm_garbage_config
| 34.775862
| 88
| 0.530987
|
from contextlib import suppress
from os import remove
from os.path import islink, join
from sys import exit
from snakypy.helpers import FG, printer
from snakypy.dotctrl.config.base import Base
from snakypy.dotctrl.utils import check_init, join_two, listing_files, rm_garbage_config
class UnlinkCommand(Base):
def __init__(self, root, home):
Base.__init__(self, root, home)
def main(self, arguments: dict) -> bool:
"""Method to unlink point files from the repository
with their place of origin."""
check_init(self.ROOT)
rm_garbage_config(self.HOME, self.repo_path, self.config_path)
if arguments["--element"]:
file_home = join_two(self.HOME, arguments["--element"])
if islink(file_home):
with suppress(Exception):
remove(file_home)
return True
printer(
f'Element "{file_home}" not unlinked. Element not found.',
foreground=FG().ERROR,
)
return False
else:
objects = [
*listing_files(self.repo_path, only_rc_files=True),
*self.data,
]
for item in objects:
file_home = join(self.HOME, item)
if not islink(file_home) and not arguments["--force"]:
printer(
"Unlinked elements were found. Use the --element option "
"to unlink unique links or use --force.",
foreground=FG().WARNING,
)
exit(0)
if islink(file_home):
with suppress(Exception):
remove(file_home)
if len(objects) == 0:
printer(
"Nothing to unlinked, en masse. Empty list of elements.",
foreground=FG().WARNING,
)
return False
return True
| 50
| 1,659
| 23
|
437e22c8435e0564c4897390e72feb8a3af89a11
| 1,191
|
py
|
Python
|
arhuaco/sensors/source/log_metrics.py
|
kuronosec/arhuaco
|
6eec1691dd03b2e3726ae8c2101588b45d58b6d7
|
[
"Apache-2.0"
] | 1
|
2020-08-08T02:17:34.000Z
|
2020-08-08T02:17:34.000Z
|
arhuaco/sensors/source/log_metrics.py
|
kuronosec/arhuaco
|
6eec1691dd03b2e3726ae8c2101588b45d58b6d7
|
[
"Apache-2.0"
] | null | null | null |
arhuaco/sensors/source/log_metrics.py
|
kuronosec/arhuaco
|
6eec1691dd03b2e3726ae8c2101588b45d58b6d7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 Andres Gomez Ramirez.
# All Rights Reserved.
import sys
import time
import subprocess
import logging
import os.path
import time
from arhuaco.sensors.source.source import Source
| 29.775
| 68
| 0.598657
|
# Copyright (c) 2019 Andres Gomez Ramirez.
# All Rights Reserved.
import sys
import time
import subprocess
import logging
import os.path
import time
from arhuaco.sensors.source.source import Source
class LogMetrics(Source):
def __init__(self, dataPath):
# Initialize entities
super(LogMetrics, self).__init__()
self.dataPath = dataPath
def get_data_iterator(self):
# Collect data from log file
command_log = ("tail -f %s" % self.dataPath)
while not os.path.exists(self.dataPath):
time.sleep(1)
logging.info("Starting the log collection %s" % command_log)
proc_log = subprocess.Popen(command_log,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
# Extract data from the logs
while proc_log.poll() is None:
line = proc_log.stdout.readline()
yield line.decode('utf-8')
logging.info(proc_log.poll())
logging.info('Finalyzing log collection.')
proc_log.terminate()
def get_data_source(self):
return None
| 883
| 4
| 104
|
c5ccd7c06343a21e1742dea0fdd5652c02cb7257
| 4,685
|
py
|
Python
|
__init__.py
|
in4lio/yupp
|
38d4002d2f07c31940b2be572a1c205d6bf63546
|
[
"MIT"
] | 44
|
2015-09-15T17:14:05.000Z
|
2021-08-22T10:35:05.000Z
|
__init__.py
|
in4lio/yupp
|
38d4002d2f07c31940b2be572a1c205d6bf63546
|
[
"MIT"
] | null | null | null |
__init__.py
|
in4lio/yupp
|
38d4002d2f07c31940b2be572a1c205d6bf63546
|
[
"MIT"
] | 1
|
2015-09-22T22:27:28.000Z
|
2015-09-22T22:27:28.000Z
|
r"""
http://github.com/in4lio/yupp/
__ __ _____ _____
/\ \ /\ \ /\ _ \ _ \
\ \ \_\/ \_\/ \_\ \ \_\ \
\ \__ /\____/\ __/\ __/
\/_/\_\/___/\ \_\/\ \_\/
\/_/ \/_/ \/_/
Python 'yupp' Codec Support
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
from future import standard_library
standard_library.install_aliases()
import codecs
from encodings import utf_8, search_function
from .pp.yulic import VERSION, DESCRIPTION, HOLDER, EMAIL
from .pp.yup import cli
from .pp.yup import proc_file as translate
# ---------------------------------------------------------------------------
__pp_name__ = 'yupp'
__version__ = VERSION
__description__ = DESCRIPTION
__author__ = HOLDER
__author_email__ = EMAIL
__url__ = 'http://github.com/in4lio/yupp/'
# ---------------------------------------------------------------------------
def read_header( fn ):
'''
Read shebang and magic comment from the source file.
'''
header = ''
try:
with open( fn, 'r' ) as f:
header = f.readline()
if 'coding:' not in header:
header += f.readline()
except:
pass
return header
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
codecs.register( yupp_search_function )
| 29.099379
| 85
| 0.522519
|
r"""
http://github.com/in4lio/yupp/
__ __ _____ _____
/\ \ /\ \ /\ _ \ _ \
\ \ \_\/ \_\/ \_\ \ \_\ \
\ \__ /\____/\ __/\ __/
\/_/\_\/___/\ \_\/\ \_\/
\/_/ \/_/ \/_/
Python 'yupp' Codec Support
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
from future import standard_library
standard_library.install_aliases()
import codecs
from encodings import utf_8, search_function
from .pp.yulic import VERSION, DESCRIPTION, HOLDER, EMAIL
from .pp.yup import cli
from .pp.yup import proc_file as translate
# ---------------------------------------------------------------------------
__pp_name__ = 'yupp'
__version__ = VERSION
__description__ = DESCRIPTION
__author__ = HOLDER
__author_email__ = EMAIL
__url__ = 'http://github.com/in4lio/yupp/'
# ---------------------------------------------------------------------------
def read_header( fn ):
'''
Read shebang and magic comment from the source file.
'''
header = ''
try:
with open( fn, 'r' ) as f:
header = f.readline()
if 'coding:' not in header:
header += f.readline()
except:
pass
return header
# ---------------------------------------------------------------------------
def decode_stream( fn, _stream ):
from ast import parse
from .pp.yup import proc_stream
from .pylib import yutraceback
try:
ok, code, fn_o, shrink = proc_stream( _stream, fn )
except Exception:
yutraceback.print_exc( None )
ok = False
if not ok:
return ''
# -- replace the filename of source file in traceback
yutraceback.substitution( fn, fn_o, shrink )
# -- check syntax of the preprocessed code
try:
parse( code, fn_o )
except SyntaxError:
yutraceback.print_exc( 0 )
code = ''
return code
# -- or using a dirty hack
execfile( fn_o )
return ''
# ---------------------------------------------------------------------------
def decoder_factory( basecodec ):
# -----------------------------------
def decode( input, errors='strict' ):
from io import StringIO
from sys import argv
data, bytesencoded = basecodec.decode( input, errors )
fn = argv[ 0 ]
return decode_stream( fn, StringIO( read_header( fn ) + data )), bytesencoded
return decode
# ---------------------------------------------------------------------------
def incremental_decoder_factory( basecodec ):
# -----------------------------------
class IncrementalDecoder( codecs.BufferedIncrementalDecoder ):
def _buffer_decode( self, input, errors, final ):
if input and final:
return decoder_factory( basecodec )( input, errors )
# -- we don't support incremental decoding
return '', 0
return IncrementalDecoder
# ---------------------------------------------------------------------------
def stream_decoder_factory( basecodec ):
# -----------------------------------
class StreamReader( basecodec.StreamReader ):
def __init__( self, *args, **kwargs ):
from io import StringIO
basecodec.StreamReader.__init__( self, *args, **kwargs )
self.stream = StringIO( decode_stream( self.stream.name, self.stream ))
return StreamReader
# ---------------------------------------------------------------------------
def yupp_search_function( coding ):
if not coding.lower().startswith( __pp_name__ ):
return None
dot = coding.find( '.' )
if dot != -1:
# -- coding: yupp.<encoding>
if dot != len( __pp_name__ ):
# -- wrong coding format
return None
basecodec = search_function( coding[( dot + 1 ): ])
if basecodec is None:
# -- unknown <encoding>
return None
else:
if len( coding ) != len( __pp_name__ ):
# -- wrong coding format
return None
# -- default encoding: UTF-8
basecodec = utf_8
return codecs.CodecInfo(
name=__pp_name__,
encode=basecodec.encode,
decode=decoder_factory( basecodec ),
incrementalencoder=basecodec.IncrementalEncoder,
incrementaldecoder=incremental_decoder_factory( basecodec ),
streamwriter=basecodec.StreamWriter,
streamreader=stream_decoder_factory( basecodec )
)
# ---------------------------------------------------------------------------
codecs.register( yupp_search_function )
| 2,724
| 0
| 110
|
6f4fefcc76d7ffae881a56693f6ae63af3836838
| 1,430
|
py
|
Python
|
rtmplib/packet.py
|
genba2/pinybotbeta-enhanced
|
564ae7c363ee00ad2ae0e05d74e08e58de3d1d2f
|
[
"MIT"
] | null | null | null |
rtmplib/packet.py
|
genba2/pinybotbeta-enhanced
|
564ae7c363ee00ad2ae0e05d74e08e58de3d1d2f
|
[
"MIT"
] | null | null | null |
rtmplib/packet.py
|
genba2/pinybotbeta-enhanced
|
564ae7c363ee00ad2ae0e05d74e08e58de3d1d2f
|
[
"MIT"
] | null | null | null |
import time
HANDSHAKE_LENGTH = 1536
class Handshake(object):
"""
A handshake packet.
@ivar first: The first 4 bytes of the packet, represented as an unsigned
long.
@type first: 32bit unsigned int.
@ivar second: The second 4 bytes of the packet, represented as an unsigned
long.
@type second: 32bit unsigned int.
@ivar payload: A blob of data which makes up the rest of the packet. This
must be C{HANDSHAKE_LENGTH} - 8 bytes in length.
@type payload: C{str}
@ivar timestamp: Timestamp that this packet was created (in milliseconds).
@type timestamp: C{int}
"""
first = None
second = None
payload = None
timestamp = None
def encode(self, stream_buffer):
"""
Encodes this packet to a stream.
"""
stream_buffer.write_ulong(self.first or 0)
stream_buffer.write_ulong(self.second or 0)
stream_buffer.write(self.payload)
def decode(self, stream_buffer):
"""
Decodes this packet from a stream.
"""
self.first = stream_buffer.read_ulong()
self.second = stream_buffer.read_ulong()
self.payload = stream_buffer.read(HANDSHAKE_LENGTH - 8)
| 26.481481
| 78
| 0.633566
|
import time
HANDSHAKE_LENGTH = 1536
class Handshake(object):
"""
A handshake packet.
@ivar first: The first 4 bytes of the packet, represented as an unsigned
long.
@type first: 32bit unsigned int.
@ivar second: The second 4 bytes of the packet, represented as an unsigned
long.
@type second: 32bit unsigned int.
@ivar payload: A blob of data which makes up the rest of the packet. This
must be C{HANDSHAKE_LENGTH} - 8 bytes in length.
@type payload: C{str}
@ivar timestamp: Timestamp that this packet was created (in milliseconds).
@type timestamp: C{int}
"""
first = None
second = None
payload = None
timestamp = None
def __init__(self, **kwargs):
timestamp = kwargs.get('timestamp', None)
if timestamp is None:
kwargs['timestamp'] = int(time.time())
self.__dict__.update(kwargs)
def encode(self, stream_buffer):
"""
Encodes this packet to a stream.
"""
stream_buffer.write_ulong(self.first or 0)
stream_buffer.write_ulong(self.second or 0)
stream_buffer.write(self.payload)
def decode(self, stream_buffer):
"""
Decodes this packet from a stream.
"""
self.first = stream_buffer.read_ulong()
self.second = stream_buffer.read_ulong()
self.payload = stream_buffer.read(HANDSHAKE_LENGTH - 8)
| 178
| 0
| 27
|
433a9cc460319ac1dc362de667e3e4fbb75f3448
| 1,052
|
py
|
Python
|
kits/python/mediocre/main.py
|
ppinchuk/Lux-Design-2021
|
8a04ad48c6749cafc9aca986f14e75daaa31c789
|
[
"Apache-2.0"
] | null | null | null |
kits/python/mediocre/main.py
|
ppinchuk/Lux-Design-2021
|
8a04ad48c6749cafc9aca986f14e75daaa31c789
|
[
"Apache-2.0"
] | null | null | null |
kits/python/mediocre/main.py
|
ppinchuk/Lux-Design-2021
|
8a04ad48c6749cafc9aca986f14e75daaa31c789
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict
import sys
from mediocre_agent import agent
if __name__ == "__main__":
def read_input():
"""
Reads input from stdin
"""
try:
return input()
except EOFError as eof:
raise SystemExit(eof)
step = 0
observation = Observation()
observation["updates"] = []
observation["step"] = 0
player_id = 0
while True:
inputs = read_input()
observation["updates"].append(inputs)
if step == 0:
player_id = int(observation["updates"][0])
observation.player = player_id
if inputs == "D_DONE":
actions = agent(observation, None)
observation["updates"] = []
step += 1
observation["step"] = step
print(",".join(actions))
print("D_FINISH")
| 26.974359
| 54
| 0.521863
|
from typing import Dict
import sys
from mediocre_agent import agent
if __name__ == "__main__":
def read_input():
"""
Reads input from stdin
"""
try:
return input()
except EOFError as eof:
raise SystemExit(eof)
step = 0
class Observation(Dict[str, any]):
def __init__(self, player=0) -> None:
self.player = player
# self.updates = []
# self.step = 0
observation = Observation()
observation["updates"] = []
observation["step"] = 0
player_id = 0
while True:
inputs = read_input()
observation["updates"].append(inputs)
if step == 0:
player_id = int(observation["updates"][0])
observation.player = player_id
if inputs == "D_DONE":
actions = agent(observation, None)
observation["updates"] = []
step += 1
observation["step"] = step
print(",".join(actions))
print("D_FINISH")
| 109
| 13
| 57
|
c0f7fbc32344fbe01dd0f5e9a00e97f8421dc665
| 650
|
py
|
Python
|
core/agent.py
|
ihgalis/queue_simulation
|
a49412417cedbdb1fe7943390a6f805489c33aaa
|
[
"MIT"
] | null | null | null |
core/agent.py
|
ihgalis/queue_simulation
|
a49412417cedbdb1fe7943390a6f805489c33aaa
|
[
"MIT"
] | null | null | null |
core/agent.py
|
ihgalis/queue_simulation
|
a49412417cedbdb1fe7943390a6f805489c33aaa
|
[
"MIT"
] | null | null | null |
class Agent(object):
"""
represents the agent who takes the calls from the queue
"""
def __init__(self, id, free, minutes_till_ready=0):
"""
constructor just sets the id
:param name: string
"""
self.id = id
self.free = free
self.minutes_till_ready = minutes_till_ready
@staticmethod
def consume(caller_list):
"""
consumes callers from the queue and chats with the
caller.
:param caller_list:
:return:
"""
temp_caller = caller_list.consume_caller()
print("agent consumes - " + str(temp_caller.chat()))
| 23.214286
| 60
| 0.578462
|
class Agent(object):
"""
represents the agent who takes the calls from the queue
"""
def __init__(self, id, free, minutes_till_ready=0):
"""
constructor just sets the id
:param name: string
"""
self.id = id
self.free = free
self.minutes_till_ready = minutes_till_ready
@staticmethod
def consume(caller_list):
"""
consumes callers from the queue and chats with the
caller.
:param caller_list:
:return:
"""
temp_caller = caller_list.consume_caller()
print("agent consumes - " + str(temp_caller.chat()))
| 0
| 0
| 0
|
c262beba650fdb2c95f431d3157ae61c710ef51a
| 324
|
py
|
Python
|
config/wsgi.py
|
drixselecta/homebytwo
|
29d26ce9f5586943e3b64c95aa4ce9ea7263bd10
|
[
"MIT"
] | 7
|
2018-03-10T20:58:59.000Z
|
2021-08-22T17:18:09.000Z
|
config/wsgi.py
|
HomebyTwo/homebytwo
|
29d26ce9f5586943e3b64c95aa4ce9ea7263bd10
|
[
"MIT"
] | 69
|
2017-02-01T21:15:43.000Z
|
2022-02-26T09:33:27.000Z
|
config/wsgi.py
|
drixselecta/homebytwo
|
29d26ce9f5586943e3b64c95aa4ce9ea7263bd10
|
[
"MIT"
] | null | null | null |
from os import environ
from pathlib import Path
from django.core.wsgi import get_wsgi_application
from config import get_project_root_path, import_env_vars
import_env_vars(Path(get_project_root_path(), "envdir"))
environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.base")
application = get_wsgi_application()
| 24.923077
| 68
| 0.833333
|
from os import environ
from pathlib import Path
from django.core.wsgi import get_wsgi_application
from config import get_project_root_path, import_env_vars
import_env_vars(Path(get_project_root_path(), "envdir"))
environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.base")
application = get_wsgi_application()
| 0
| 0
| 0
|
592da9c69b2699031221663c32e6918ce48f5588
| 18
|
py
|
Python
|
acmicpc/15733/15733.py
|
love-adela/algorithm
|
4ccd02173c96f8369962f1fd4e5166a221690fa2
|
[
"MIT"
] | 3
|
2019-03-09T05:19:23.000Z
|
2019-04-06T09:26:36.000Z
|
acmicpc/15733/15733.py
|
love-adela/algorithm
|
4ccd02173c96f8369962f1fd4e5166a221690fa2
|
[
"MIT"
] | 1
|
2020-02-23T10:38:04.000Z
|
2020-02-23T10:38:04.000Z
|
acmicpc/15733/15733.py
|
love-adela/algorithm
|
4ccd02173c96f8369962f1fd4e5166a221690fa2
|
[
"MIT"
] | 2
|
2017-11-20T14:06:06.000Z
|
2020-07-20T00:17:47.000Z
|
print("I'm Sexy")
| 9
| 17
| 0.611111
|
print("I'm Sexy")
| 0
| 0
| 0
|
1f5d038875e16e0fe3a4bf4b7a051aa57494670c
| 1,343
|
py
|
Python
|
run_init_images.py
|
alchem0x2A/paper.ZnVO
|
b36839ee52867c6892177b6152daa7a5b4fd4109
|
[
"MIT"
] | null | null | null |
run_init_images.py
|
alchem0x2A/paper.ZnVO
|
b36839ee52867c6892177b6152daa7a5b4fd4109
|
[
"MIT"
] | null | null | null |
run_init_images.py
|
alchem0x2A/paper.ZnVO
|
b36839ee52867c6892177b6152daa7a5b4fd4109
|
[
"MIT"
] | null | null | null |
import sys
import os, os.path
# May need this for the path issue for gpaw-python
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from src.structure import get_structure
from src.supercell import make_super, add_adatom
from src.neb import neb, calc_img
import shutil
from ase.parallel import paropen, parprint, world, rank, broadcast
from ase.visualize import view
# Name=Zn, Co
if __name__ == "__main__":
assert len(sys.argv) == 3
mater = sys.argv[1]
imag = sys.argv[2]
main(name=mater, imag=imag)
| 27.979167
| 81
| 0.613552
|
import sys
import os, os.path
# May need this for the path issue for gpaw-python
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from src.structure import get_structure
from src.supercell import make_super, add_adatom
from src.neb import neb, calc_img
import shutil
from ase.parallel import paropen, parprint, world, rank, broadcast
from ase.visualize import view
# Name=Zn, Co
def main(name,
imag="init",
root="/cluster/scratch/ttian/ZnVO",
clean=False):
assert imag in ("init", "final")
if name not in ("Zn", "Co"):
return False
# Directory
if rank == 0:
base_dir = os.path.join(root, "{}V2O5".format(name))
if clean:
shutil.rmtree(base_dir, ignore_errors=True)
if not os.path.exists(base_dir):
os.makedirs(base_dir)
world.barrier()
if clean:
return # on all ranks
base_dir = os.path.join(root, "{}V2O5".format(name))
if imag == "init":
calc_img(base_dir=base_dir, scaled_pos=(0, 0, 1 / 2), index=imag)
else:
calc_img(base_dir=base_dir, scaled_pos=(1 / 2, 1 / 2, 1 / 2), index=imag)
return 0
if __name__ == "__main__":
assert len(sys.argv) == 3
mater = sys.argv[1]
imag = sys.argv[2]
main(name=mater, imag=imag)
| 791
| 0
| 22
|
9d8dfad20f0cd219f29ef974ce7e0abe3aeec538
| 959
|
py
|
Python
|
360agent/plugins/gpu.py
|
vfuse/360agent
|
947e5ffe6a9e2ef22665f4b2b98c882e698fb201
|
[
"BSD-3-Clause"
] | 88
|
2017-01-26T14:26:37.000Z
|
2021-12-31T17:07:03.000Z
|
360agent/plugins/gpu.py
|
vfuse/360agent
|
947e5ffe6a9e2ef22665f4b2b98c882e698fb201
|
[
"BSD-3-Clause"
] | 26
|
2016-12-27T12:28:16.000Z
|
2022-02-24T08:11:45.000Z
|
360agent/plugins/gpu.py
|
vfuse/360agent
|
947e5ffe6a9e2ef22665f4b2b98c882e698fb201
|
[
"BSD-3-Clause"
] | 28
|
2017-04-11T08:40:00.000Z
|
2021-10-05T06:43:04.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import plugins
import sys
if __name__ == '__main__':
Plugin().execute()
| 28.205882
| 87
| 0.535975
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import plugins
import sys
class Plugin(plugins.BasePlugin):
__name__ = 'gpu'
def run(self, *unused):
'''
expirimental plugin used to collect GPU load from OpenHardWareMonitor (Windows)
'''
data = {}
if sys.platform == "win32":
try:
import wmi
except:
return 'wmi module not installed.'
try:
w = wmi.WMI(namespace="root\OpenHardwareMonitor")
temperature_infos = w.Sensor()
for sensor in temperature_infos:
if sensor.SensorType==u'Load' and sensor.Name==u'GPU Core':
data[sensor.Parent.replace('/','-').strip('-')] = sensor.Value
except:
return 'Could not fetch GPU Load data from OpenHardwareMonitor.'
return data
if __name__ == '__main__':
Plugin().execute()
| 0
| 812
| 23
|
c08b1e1fb17569d5d4677af2fce155f334018648
| 3,794
|
py
|
Python
|
core/argo/core/network/ResEnc.py
|
szokejokepu/natural-rws
|
bb1ad4ca3ec714e6bf071d2136593dc853492b68
|
[
"MIT"
] | null | null | null |
core/argo/core/network/ResEnc.py
|
szokejokepu/natural-rws
|
bb1ad4ca3ec714e6bf071d2136593dc853492b68
|
[
"MIT"
] | null | null | null |
core/argo/core/network/ResEnc.py
|
szokejokepu/natural-rws
|
bb1ad4ca3ec714e6bf071d2136593dc853492b68
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import sonnet as snt
from .build_utils import residual_stack, maybe_set_l2_conv_contractive_regularizer
from .AbstractResNetLayer import AbstractResNetLayer
class ResEnc(AbstractResNetLayer):
"""
res enc used in VQ
"""
#TODO remove biases before batch norm, see if it makes any difference. Remove dropouts?
| 39.113402
| 114
| 0.495519
|
import tensorflow as tf
import sonnet as snt
from .build_utils import residual_stack, maybe_set_l2_conv_contractive_regularizer
from .AbstractResNetLayer import AbstractResNetLayer
class ResEnc(AbstractResNetLayer):
"""
res enc used in VQ
"""
#TODO remove biases before batch norm, see if it makes any difference. Remove dropouts?
def __init__(self, num_hiddens, num_residual_layers, num_residual_hiddens,
activation,
is_training,
name='ResEnc',
prob_drop=0.1,
bn_momentum=0.99,
bn_renormalization=True,
creg_scale=None,
**extra_params):
super().__init__(num_hiddens,
num_residual_layers,
num_residual_hiddens,
activation,
is_training,
name=name,
prob_drop=prob_drop,
bn_momentum=bn_momentum,
bn_renormalization=bn_renormalization,
creg_scale=creg_scale,
**extra_params)
def _build(self, x):
# h_pre = x
conv1 = snt.Conv2D(
output_channels=self._num_hiddens / 2,
kernel_shape=(4, 4),
stride=(2, 2),
# use_bias=False,
**self._extra_params,
name="enc_1")
h = conv1(x)
maybe_set_l2_conv_contractive_regularizer(conv1, h, self._activation, self._creg_scale, name="enc_1_creg")
h = self._dropout(h, training=self._is_training)
h = tf.layers.batch_normalization(h, training=self._is_training,
momentum=self._bn_momentum,
renorm=self._bn_renormalization,
renorm_momentum=self._bn_momentum,
renorm_clipping=self._renorm_clipping,
name="batch_norm_1")
h = self._activation(h)
conv2 = snt.Conv2D(
output_channels=self._num_hiddens,
kernel_shape=(4, 4),
stride=(2, 2),
# use_bias=False,
**self._extra_params,
name="enc_2")
h = conv2(h)
maybe_set_l2_conv_contractive_regularizer(conv2, h, self._activation, self._creg_scale, name="enc_2_creg")
h = self._dropout(h, training=self._is_training)
h = tf.layers.batch_normalization(h, training=self._is_training,
momentum=self._bn_momentum,
renorm=self._bn_renormalization,
renorm_momentum=self._bn_momentum,
renorm_clipping=self._renorm_clipping,
name="batch_norm_2")
h = self._activation(h)
h = residual_stack(
h,
self._num_hiddens,
self._num_residual_layers,
self._num_residual_hiddens,
activation=self._activation,
training=self._is_training,
prob_drop=self._prob_drop,
momentum=self._bn_momentum,
renorm=self._bn_renormalization,
renorm_momentum=self._bn_momentum,
renorm_clipping=self._renorm_clipping,
creg_scale = self._creg_scale,
**self._extra_params
)
return h
| 3,395
| 0
| 53
|
904a75d44462d0b84d10192273337b4d499672b8
| 1,366
|
py
|
Python
|
xrpl/models/transactions/payment_channel_fund.py
|
mDuo13/xrpl-py
|
70f927dcd2dbb8644b3e210b0a8de2a214e71e3d
|
[
"0BSD"
] | null | null | null |
xrpl/models/transactions/payment_channel_fund.py
|
mDuo13/xrpl-py
|
70f927dcd2dbb8644b3e210b0a8de2a214e71e3d
|
[
"0BSD"
] | null | null | null |
xrpl/models/transactions/payment_channel_fund.py
|
mDuo13/xrpl-py
|
70f927dcd2dbb8644b3e210b0a8de2a214e71e3d
|
[
"0BSD"
] | null | null | null |
"""
Represents a PaymentChannelFund transaction on the XRP Ledger.
A PaymentChannelFund transaction adds additional XRP to an open payment channel,
and optionally updates the expiration time of the channel. Only the source address
of the channel can use this transaction.
`See PaymentChannelFund <https://xrpl.org/paymentchannelfund.html>`_
"""
from dataclasses import dataclass, field
from typing import Optional
from xrpl.models.required import REQUIRED
from xrpl.models.transactions.transaction import Transaction, TransactionType
from xrpl.models.utils import require_kwargs_on_init
@require_kwargs_on_init
@dataclass(frozen=True)
class PaymentChannelFund(Transaction):
"""
Represents a PaymentChannelFund transaction on the XRP Ledger.
A PaymentChannelFund transaction adds additional XRP to an open payment channel,
and optionally updates the expiration time of the channel. Only the source address
of the channel can use this transaction.
`See PaymentChannelFund <https://xrpl.org/paymentchannelfund.html>`_
"""
#: This field is required.
channel: str = REQUIRED # type: ignore
#: This field is required.
amount: str = REQUIRED # type: ignore
expiration: Optional[int] = None
transaction_type: TransactionType = field(
default=TransactionType.PAYMENT_CHANNEL_FUND,
init=False,
)
| 35.947368
| 86
| 0.770864
|
"""
Represents a PaymentChannelFund transaction on the XRP Ledger.
A PaymentChannelFund transaction adds additional XRP to an open payment channel,
and optionally updates the expiration time of the channel. Only the source address
of the channel can use this transaction.
`See PaymentChannelFund <https://xrpl.org/paymentchannelfund.html>`_
"""
from dataclasses import dataclass, field
from typing import Optional
from xrpl.models.required import REQUIRED
from xrpl.models.transactions.transaction import Transaction, TransactionType
from xrpl.models.utils import require_kwargs_on_init
@require_kwargs_on_init
@dataclass(frozen=True)
class PaymentChannelFund(Transaction):
"""
Represents a PaymentChannelFund transaction on the XRP Ledger.
A PaymentChannelFund transaction adds additional XRP to an open payment channel,
and optionally updates the expiration time of the channel. Only the source address
of the channel can use this transaction.
`See PaymentChannelFund <https://xrpl.org/paymentchannelfund.html>`_
"""
#: This field is required.
channel: str = REQUIRED # type: ignore
#: This field is required.
amount: str = REQUIRED # type: ignore
expiration: Optional[int] = None
transaction_type: TransactionType = field(
default=TransactionType.PAYMENT_CHANNEL_FUND,
init=False,
)
| 0
| 0
| 0
|
65b2babb163c94ccd29863798b7089a565f8bf1e
| 13,980
|
py
|
Python
|
nlcpy/ufuncs/operations.py
|
SX-Aurora/nlcpy
|
0a53eec8778073bc48b12687b7ce37ab2bf2b7e0
|
[
"BSD-3-Clause"
] | 11
|
2020-07-31T02:21:55.000Z
|
2022-03-10T03:12:11.000Z
|
nlcpy/ufuncs/operations.py
|
SX-Aurora/nlcpy
|
0a53eec8778073bc48b12687b7ce37ab2bf2b7e0
|
[
"BSD-3-Clause"
] | null | null | null |
nlcpy/ufuncs/operations.py
|
SX-Aurora/nlcpy
|
0a53eec8778073bc48b12687b7ce37ab2bf2b7e0
|
[
"BSD-3-Clause"
] | null | null | null |
#
# * The source code in this file is developed independently by NEC Corporation.
#
# # NLCPy License #
#
# Copyright (c) 2020-2021 NEC Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither NEC Corporation nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import numpy
from nlcpy.ufuncs import ufuncs
from nlcpy.ufuncs import casting
from nlcpy.ufuncs import err
from nlcpy.ufuncs import ufunc_docs
# ----------------------------------------------------------------------------
# ufunc operations
# see: https://docs.scipy.org/doc/numpy/reference/ufuncs.html
# ----------------------------------------------------------------------------
# math_operations
add = ufuncs.create_ufunc(
'nlcpy_add',
numpy.add.types,
err._add_error_check,
doc=ufunc_docs._add_doc
)
subtract = ufuncs.create_ufunc(
'nlcpy_subtract',
casting._subtract_types,
err._subtract_error_check,
doc=ufunc_docs._subtract_doc
)
multiply = ufuncs.create_ufunc(
'nlcpy_multiply',
numpy.multiply.types,
err._multiply_error_check,
doc=ufunc_docs._multiply_doc
)
true_divide = ufuncs.create_ufunc(
'nlcpy_true_divide',
casting._true_divide_types,
err._true_divide_error_check,
doc=ufunc_docs._true_divide_doc
)
# ufunc_operation(divide,orig_types,valid_error_check)dnl
divide = true_divide
logaddexp = ufuncs.create_ufunc(
'nlcpy_logaddexp',
numpy.logaddexp.types,
err._logaddexp_error_check,
doc=ufunc_docs._logaddexp_doc
)
logaddexp2 = ufuncs.create_ufunc(
'nlcpy_logaddexp2',
numpy.logaddexp2.types,
err._logaddexp2_error_check,
doc=ufunc_docs._logaddexp2_doc
)
floor_divide = ufuncs.create_ufunc(
'nlcpy_floor_divide',
numpy.floor_divide.types,
err._floor_divide_error_check,
doc=ufunc_docs._floor_divide_doc
)
negative = ufuncs.create_ufunc(
'nlcpy_negative',
casting._negative_types,
err._negative_error_check,
doc=ufunc_docs._negative_doc
)
positive = ufuncs.create_ufunc(
'nlcpy_positive',
casting._positive_types,
err._positive_error_check,
doc=ufunc_docs._positive_doc
)
power = ufuncs.create_ufunc(
'nlcpy_power',
numpy.power.types,
err._power_error_check,
doc=ufunc_docs._power_doc
)
remainder = ufuncs.create_ufunc(
'nlcpy_remainder',
casting._remainder_types,
err._remainder_error_check,
doc=ufunc_docs._remainder_doc
)
# ufunc_operation(mod,orig_types,valid_error_check)dnl
mod = remainder
fmod = ufuncs.create_ufunc(
'nlcpy_fmod',
casting._fmod_types,
err._fmod_error_check,
doc=ufunc_docs._fmod_doc
)
# ufunc_operation(divmod,numpy_types,valid_error_check)dnl
absolute = ufuncs.create_ufunc(
'nlcpy_absolute',
numpy.absolute.types,
err._absolute_error_check,
doc=ufunc_docs._absolute_doc
)
fabs = ufuncs.create_ufunc(
'nlcpy_fabs',
casting._fabs_types,
err._fabs_error_check,
doc=ufunc_docs._fabs_doc
)
rint = ufuncs.create_ufunc(
'nlcpy_rint',
numpy.rint.types,
err._rint_error_check,
doc=ufunc_docs._rint_doc
)
sign = ufuncs.create_ufunc(
'nlcpy_sign',
casting._sign_types,
err._sign_error_check,
doc=ufunc_docs._sign_doc
)
heaviside = ufuncs.create_ufunc(
'nlcpy_heaviside',
numpy.heaviside.types,
err._heaviside_error_check,
doc=ufunc_docs._heaviside_doc
)
conjugate = ufuncs.create_ufunc(
'nlcpy_conjugate',
numpy.conjugate.types,
err._conjugate_error_check,
doc=ufunc_docs._conjugate_doc
)
# ufunc_operation(conj,numpy_types,valid_error_check)dnl
conj = conjugate
exp = ufuncs.create_ufunc(
'nlcpy_exp',
numpy.exp.types,
err._exp_error_check,
doc=ufunc_docs._exp_doc
)
exp2 = ufuncs.create_ufunc(
'nlcpy_exp2',
numpy.exp2.types,
err._exp2_error_check,
doc=ufunc_docs._exp2_doc
)
log = ufuncs.create_ufunc(
'nlcpy_log',
numpy.log.types,
err._log_error_check,
doc=ufunc_docs._log_doc
)
log2 = ufuncs.create_ufunc(
'nlcpy_log2',
numpy.log2.types,
err._log2_error_check,
doc=ufunc_docs._log2_doc
)
log10 = ufuncs.create_ufunc(
'nlcpy_log10',
numpy.log10.types,
err._log10_error_check,
doc=ufunc_docs._log10_doc
)
expm1 = ufuncs.create_ufunc(
'nlcpy_expm1',
numpy.expm1.types,
err._expm1_error_check,
doc=ufunc_docs._expm1_doc
)
log1p = ufuncs.create_ufunc(
'nlcpy_log1p',
numpy.log1p.types,
err._log1p_error_check,
doc=ufunc_docs._log1p_doc
)
sqrt = ufuncs.create_ufunc(
'nlcpy_sqrt',
numpy.sqrt.types,
err._sqrt_error_check,
doc=ufunc_docs._sqrt_doc
)
square = ufuncs.create_ufunc(
'nlcpy_square',
numpy.square.types,
err._square_error_check,
doc=ufunc_docs._square_doc
)
cbrt = ufuncs.create_ufunc(
'nlcpy_cbrt',
casting._cbrt_types,
err._cbrt_error_check,
doc=ufunc_docs._cbrt_doc
)
reciprocal = ufuncs.create_ufunc(
'nlcpy_reciprocal',
numpy.reciprocal.types,
err._reciprocal_error_check,
doc=ufunc_docs._reciprocal_doc
)
# ufunc_operation(gcd)dnl
# ufunc_operation(lcm)dnl
# bit-twiddling functions
bitwise_and = ufuncs.create_ufunc(
'nlcpy_bitwise_and',
casting._bitwise_and_types,
err._bitwise_and_error_check,
doc=ufunc_docs._bitwise_and_doc
)
bitwise_or = ufuncs.create_ufunc(
'nlcpy_bitwise_or',
casting._bitwise_or_types,
err._bitwise_or_error_check,
doc=ufunc_docs._bitwise_or_doc
)
bitwise_xor = ufuncs.create_ufunc(
'nlcpy_bitwise_xor',
casting._bitwise_xor_types,
err._bitwise_xor_error_check,
doc=ufunc_docs._bitwise_xor_doc
)
invert = ufuncs.create_ufunc(
'nlcpy_invert',
casting._invert_types,
err._invert_error_check,
doc=ufunc_docs._invert_doc
)
left_shift = ufuncs.create_ufunc(
'nlcpy_left_shift',
casting._left_shift_types,
err._left_shift_error_check,
doc=ufunc_docs._left_shift_doc
)
right_shift = ufuncs.create_ufunc(
'nlcpy_right_shift',
casting._right_shift_types,
err._right_shift_error_check,
doc=ufunc_docs._right_shift_doc
)
# comparison functions
greater = ufuncs.create_ufunc(
'nlcpy_greater',
numpy.greater.types,
err._greater_error_check,
doc=ufunc_docs._greater_doc
)
greater_equal = ufuncs.create_ufunc(
'nlcpy_greater_equal',
numpy.greater_equal.types,
err._greater_equal_error_check,
doc=ufunc_docs._greater_equal_doc
)
less = ufuncs.create_ufunc(
'nlcpy_less',
numpy.less.types,
err._less_error_check,
doc=ufunc_docs._less_doc
)
less_equal = ufuncs.create_ufunc(
'nlcpy_less_equal',
numpy.less_equal.types,
err._less_equal_error_check,
doc=ufunc_docs._less_equal_doc
)
not_equal = ufuncs.create_ufunc(
'nlcpy_not_equal',
numpy.not_equal.types,
err._not_equal_error_check,
doc=ufunc_docs._not_equal_doc
)
equal = ufuncs.create_ufunc(
'nlcpy_equal',
numpy.equal.types,
err._equal_error_check,
doc=ufunc_docs._equal_doc
)
logical_and = ufuncs.create_ufunc(
'nlcpy_logical_and',
numpy.logical_and.types,
err._logical_and_error_check,
doc=ufunc_docs._logical_and_doc
)
logical_or = ufuncs.create_ufunc(
'nlcpy_logical_or',
numpy.logical_or.types,
err._logical_or_error_check,
doc=ufunc_docs._logical_or_doc
)
logical_xor = ufuncs.create_ufunc(
'nlcpy_logical_xor',
numpy.logical_xor.types,
err._logical_xor_error_check,
doc=ufunc_docs._logical_xor_doc
)
logical_not = ufuncs.create_ufunc(
'nlcpy_logical_not',
numpy.logical_not.types,
err._logical_not_error_check,
doc=ufunc_docs._logical_not_doc
)
minimum = ufuncs.create_ufunc(
'nlcpy_minimum',
numpy.minimum.types,
err._minimum_error_check,
doc=ufunc_docs._minimum_doc
)
maximum = ufuncs.create_ufunc(
'nlcpy_maximum',
numpy.maximum.types,
err._maximum_error_check,
doc=ufunc_docs._maximum_doc
)
fmax = ufuncs.create_ufunc(
'nlcpy_fmax',
numpy.fmax.types,
err._fmax_error_check,
doc=ufunc_docs._fmax_doc
)
fmin = ufuncs.create_ufunc(
'nlcpy_fmin',
numpy.fmin.types,
err._fmin_error_check,
doc=ufunc_docs._fmin_doc
)
# trigonometric functions
sin = ufuncs.create_ufunc(
'nlcpy_sin',
numpy.sin.types,
err._sin_error_check,
doc=ufunc_docs._sin_doc
)
cos = ufuncs.create_ufunc(
'nlcpy_cos',
numpy.cos.types,
err._cos_error_check,
doc=ufunc_docs._cos_doc
)
tan = ufuncs.create_ufunc(
'nlcpy_tan',
numpy.tan.types,
err._tan_error_check,
doc=ufunc_docs._tan_doc
)
arcsin = ufuncs.create_ufunc(
'nlcpy_arcsin',
numpy.arcsin.types,
err._arcsin_error_check,
doc=ufunc_docs._arcsin_doc
)
arccos = ufuncs.create_ufunc(
'nlcpy_arccos',
numpy.arccos.types,
err._arccos_error_check,
doc=ufunc_docs._arccos_doc
)
arctan = ufuncs.create_ufunc(
'nlcpy_arctan',
numpy.arctan.types,
err._arctan_error_check,
doc=ufunc_docs._arctan_doc
)
arctan2 = ufuncs.create_ufunc(
'nlcpy_arctan2',
casting._arctan2_types,
err._arctan2_error_check,
doc=ufunc_docs._arctan2_doc
)
hypot = ufuncs.create_ufunc(
'nlcpy_hypot',
casting._hypot_types,
err._hypot_error_check,
doc=ufunc_docs._hypot_doc
)
sinh = ufuncs.create_ufunc(
'nlcpy_sinh',
numpy.sinh.types,
err._sinh_error_check,
doc=ufunc_docs._sinh_doc
)
cosh = ufuncs.create_ufunc(
'nlcpy_cosh',
numpy.cosh.types,
err._cosh_error_check,
doc=ufunc_docs._cosh_doc
)
tanh = ufuncs.create_ufunc(
'nlcpy_tanh',
numpy.tanh.types,
err._tanh_error_check,
doc=ufunc_docs._tanh_doc
)
arcsinh = ufuncs.create_ufunc(
'nlcpy_arcsinh',
numpy.arcsinh.types,
err._arcsinh_error_check,
doc=ufunc_docs._arcsinh_doc
)
arccosh = ufuncs.create_ufunc(
'nlcpy_arccosh',
numpy.arccosh.types,
err._arccosh_error_check,
doc=ufunc_docs._arccosh_doc
)
arctanh = ufuncs.create_ufunc(
'nlcpy_arctanh',
numpy.arctanh.types,
err._arctanh_error_check,
doc=ufunc_docs._arctanh_doc
)
deg2rad = ufuncs.create_ufunc(
'nlcpy_deg2rad',
casting._deg2rad_types,
err._deg2rad_error_check,
doc=ufunc_docs._deg2rad_doc
)
rad2deg = ufuncs.create_ufunc(
'nlcpy_rad2deg',
casting._rad2deg_types,
err._rad2deg_error_check,
doc=ufunc_docs._rad2deg_doc
)
degrees = ufuncs.create_ufunc(
'nlcpy_degrees',
casting._degrees_types,
err._degrees_error_check,
doc=ufunc_docs._degrees_doc
)
radians = ufuncs.create_ufunc(
'nlcpy_radians',
casting._radians_types,
err._radians_error_check,
doc=ufunc_docs._radians_doc
)
# floating functions
isfinite = ufuncs.create_ufunc(
'nlcpy_isfinite',
numpy.isfinite.types,
err._isfinite_error_check,
doc=ufunc_docs._isfinite_doc
)
isinf = ufuncs.create_ufunc(
'nlcpy_isinf',
numpy.isinf.types,
err._isinf_error_check,
doc=ufunc_docs._isinf_doc
)
isnan = ufuncs.create_ufunc(
'nlcpy_isnan',
numpy.isnan.types,
err._isnan_error_check,
doc=ufunc_docs._isnan_doc
)
# ufunc_operation(isnat,numpy_types,valid_error_check)dnl
signbit = ufuncs.create_ufunc(
'nlcpy_signbit',
numpy.signbit.types,
err._signbit_error_check,
doc=ufunc_docs._signbit_doc
)
copysign = ufuncs.create_ufunc(
'nlcpy_copysign',
numpy.copysign.types,
err._copysign_error_check,
doc=ufunc_docs._copysign_doc
)
nextafter = ufuncs.create_ufunc(
'nlcpy_nextafter',
numpy.nextafter.types,
err._nextafter_error_check,
doc=ufunc_docs._nextafter_doc
)
spacing = ufuncs.create_ufunc(
'nlcpy_spacing',
numpy.spacing.types,
err._spacing_error_check,
doc=ufunc_docs._spacing_doc
)
# ufunc_operation(modf,numpy_types,valid_error_check)dnl
ldexp = ufuncs.create_ufunc(
'nlcpy_ldexp',
numpy.ldexp.types,
err._ldexp_error_check,
doc=ufunc_docs._ldexp_doc
)
# ufunc_operation(frexp)dnl
floor = ufuncs.create_ufunc(
'nlcpy_floor',
casting._floor_types,
err._floor_error_check,
doc=ufunc_docs._floor_doc
)
ceil = ufuncs.create_ufunc(
'nlcpy_ceil',
casting._ceil_types,
err._ceil_error_check,
doc=ufunc_docs._ceil_doc
)
trunc = ufuncs.create_ufunc(
'nlcpy_trunc',
numpy.trunc.types,
err._trunc_error_check,
doc=ufunc_docs._trunc_doc
)
# matmul
matmul = ufuncs.create_ufunc(
'nlcpy_matmul',
numpy.matmul.types,
None,
doc=ufunc_docs._matmul_doc
)
# end of operator functions
| 20
| 88
| 0.722318
|
#
# * The source code in this file is developed independently by NEC Corporation.
#
# # NLCPy License #
#
# Copyright (c) 2020-2021 NEC Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither NEC Corporation nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import numpy
from nlcpy.ufuncs import ufuncs
from nlcpy.ufuncs import casting
from nlcpy.ufuncs import err
from nlcpy.ufuncs import ufunc_docs
# ----------------------------------------------------------------------------
# ufunc operations
# see: https://docs.scipy.org/doc/numpy/reference/ufuncs.html
# ----------------------------------------------------------------------------
# math_operations
add = ufuncs.create_ufunc(
'nlcpy_add',
numpy.add.types,
err._add_error_check,
doc=ufunc_docs._add_doc
)
subtract = ufuncs.create_ufunc(
'nlcpy_subtract',
casting._subtract_types,
err._subtract_error_check,
doc=ufunc_docs._subtract_doc
)
multiply = ufuncs.create_ufunc(
'nlcpy_multiply',
numpy.multiply.types,
err._multiply_error_check,
doc=ufunc_docs._multiply_doc
)
true_divide = ufuncs.create_ufunc(
'nlcpy_true_divide',
casting._true_divide_types,
err._true_divide_error_check,
doc=ufunc_docs._true_divide_doc
)
# ufunc_operation(divide,orig_types,valid_error_check)dnl
divide = true_divide
logaddexp = ufuncs.create_ufunc(
'nlcpy_logaddexp',
numpy.logaddexp.types,
err._logaddexp_error_check,
doc=ufunc_docs._logaddexp_doc
)
logaddexp2 = ufuncs.create_ufunc(
'nlcpy_logaddexp2',
numpy.logaddexp2.types,
err._logaddexp2_error_check,
doc=ufunc_docs._logaddexp2_doc
)
floor_divide = ufuncs.create_ufunc(
'nlcpy_floor_divide',
numpy.floor_divide.types,
err._floor_divide_error_check,
doc=ufunc_docs._floor_divide_doc
)
negative = ufuncs.create_ufunc(
'nlcpy_negative',
casting._negative_types,
err._negative_error_check,
doc=ufunc_docs._negative_doc
)
positive = ufuncs.create_ufunc(
'nlcpy_positive',
casting._positive_types,
err._positive_error_check,
doc=ufunc_docs._positive_doc
)
power = ufuncs.create_ufunc(
'nlcpy_power',
numpy.power.types,
err._power_error_check,
doc=ufunc_docs._power_doc
)
remainder = ufuncs.create_ufunc(
'nlcpy_remainder',
casting._remainder_types,
err._remainder_error_check,
doc=ufunc_docs._remainder_doc
)
# ufunc_operation(mod,orig_types,valid_error_check)dnl
mod = remainder
fmod = ufuncs.create_ufunc(
'nlcpy_fmod',
casting._fmod_types,
err._fmod_error_check,
doc=ufunc_docs._fmod_doc
)
# ufunc_operation(divmod,numpy_types,valid_error_check)dnl
absolute = ufuncs.create_ufunc(
'nlcpy_absolute',
numpy.absolute.types,
err._absolute_error_check,
doc=ufunc_docs._absolute_doc
)
fabs = ufuncs.create_ufunc(
'nlcpy_fabs',
casting._fabs_types,
err._fabs_error_check,
doc=ufunc_docs._fabs_doc
)
rint = ufuncs.create_ufunc(
'nlcpy_rint',
numpy.rint.types,
err._rint_error_check,
doc=ufunc_docs._rint_doc
)
sign = ufuncs.create_ufunc(
'nlcpy_sign',
casting._sign_types,
err._sign_error_check,
doc=ufunc_docs._sign_doc
)
heaviside = ufuncs.create_ufunc(
'nlcpy_heaviside',
numpy.heaviside.types,
err._heaviside_error_check,
doc=ufunc_docs._heaviside_doc
)
conjugate = ufuncs.create_ufunc(
'nlcpy_conjugate',
numpy.conjugate.types,
err._conjugate_error_check,
doc=ufunc_docs._conjugate_doc
)
# ufunc_operation(conj,numpy_types,valid_error_check)dnl
conj = conjugate
exp = ufuncs.create_ufunc(
'nlcpy_exp',
numpy.exp.types,
err._exp_error_check,
doc=ufunc_docs._exp_doc
)
exp2 = ufuncs.create_ufunc(
'nlcpy_exp2',
numpy.exp2.types,
err._exp2_error_check,
doc=ufunc_docs._exp2_doc
)
log = ufuncs.create_ufunc(
'nlcpy_log',
numpy.log.types,
err._log_error_check,
doc=ufunc_docs._log_doc
)
log2 = ufuncs.create_ufunc(
'nlcpy_log2',
numpy.log2.types,
err._log2_error_check,
doc=ufunc_docs._log2_doc
)
log10 = ufuncs.create_ufunc(
'nlcpy_log10',
numpy.log10.types,
err._log10_error_check,
doc=ufunc_docs._log10_doc
)
expm1 = ufuncs.create_ufunc(
'nlcpy_expm1',
numpy.expm1.types,
err._expm1_error_check,
doc=ufunc_docs._expm1_doc
)
log1p = ufuncs.create_ufunc(
'nlcpy_log1p',
numpy.log1p.types,
err._log1p_error_check,
doc=ufunc_docs._log1p_doc
)
sqrt = ufuncs.create_ufunc(
'nlcpy_sqrt',
numpy.sqrt.types,
err._sqrt_error_check,
doc=ufunc_docs._sqrt_doc
)
square = ufuncs.create_ufunc(
'nlcpy_square',
numpy.square.types,
err._square_error_check,
doc=ufunc_docs._square_doc
)
cbrt = ufuncs.create_ufunc(
'nlcpy_cbrt',
casting._cbrt_types,
err._cbrt_error_check,
doc=ufunc_docs._cbrt_doc
)
reciprocal = ufuncs.create_ufunc(
'nlcpy_reciprocal',
numpy.reciprocal.types,
err._reciprocal_error_check,
doc=ufunc_docs._reciprocal_doc
)
# ufunc_operation(gcd)dnl
# ufunc_operation(lcm)dnl
# bit-twiddling functions
bitwise_and = ufuncs.create_ufunc(
'nlcpy_bitwise_and',
casting._bitwise_and_types,
err._bitwise_and_error_check,
doc=ufunc_docs._bitwise_and_doc
)
bitwise_or = ufuncs.create_ufunc(
'nlcpy_bitwise_or',
casting._bitwise_or_types,
err._bitwise_or_error_check,
doc=ufunc_docs._bitwise_or_doc
)
bitwise_xor = ufuncs.create_ufunc(
'nlcpy_bitwise_xor',
casting._bitwise_xor_types,
err._bitwise_xor_error_check,
doc=ufunc_docs._bitwise_xor_doc
)
invert = ufuncs.create_ufunc(
'nlcpy_invert',
casting._invert_types,
err._invert_error_check,
doc=ufunc_docs._invert_doc
)
left_shift = ufuncs.create_ufunc(
'nlcpy_left_shift',
casting._left_shift_types,
err._left_shift_error_check,
doc=ufunc_docs._left_shift_doc
)
right_shift = ufuncs.create_ufunc(
'nlcpy_right_shift',
casting._right_shift_types,
err._right_shift_error_check,
doc=ufunc_docs._right_shift_doc
)
# comparison functions
greater = ufuncs.create_ufunc(
'nlcpy_greater',
numpy.greater.types,
err._greater_error_check,
doc=ufunc_docs._greater_doc
)
greater_equal = ufuncs.create_ufunc(
'nlcpy_greater_equal',
numpy.greater_equal.types,
err._greater_equal_error_check,
doc=ufunc_docs._greater_equal_doc
)
less = ufuncs.create_ufunc(
'nlcpy_less',
numpy.less.types,
err._less_error_check,
doc=ufunc_docs._less_doc
)
less_equal = ufuncs.create_ufunc(
'nlcpy_less_equal',
numpy.less_equal.types,
err._less_equal_error_check,
doc=ufunc_docs._less_equal_doc
)
not_equal = ufuncs.create_ufunc(
'nlcpy_not_equal',
numpy.not_equal.types,
err._not_equal_error_check,
doc=ufunc_docs._not_equal_doc
)
equal = ufuncs.create_ufunc(
'nlcpy_equal',
numpy.equal.types,
err._equal_error_check,
doc=ufunc_docs._equal_doc
)
logical_and = ufuncs.create_ufunc(
'nlcpy_logical_and',
numpy.logical_and.types,
err._logical_and_error_check,
doc=ufunc_docs._logical_and_doc
)
logical_or = ufuncs.create_ufunc(
'nlcpy_logical_or',
numpy.logical_or.types,
err._logical_or_error_check,
doc=ufunc_docs._logical_or_doc
)
logical_xor = ufuncs.create_ufunc(
'nlcpy_logical_xor',
numpy.logical_xor.types,
err._logical_xor_error_check,
doc=ufunc_docs._logical_xor_doc
)
logical_not = ufuncs.create_ufunc(
'nlcpy_logical_not',
numpy.logical_not.types,
err._logical_not_error_check,
doc=ufunc_docs._logical_not_doc
)
minimum = ufuncs.create_ufunc(
'nlcpy_minimum',
numpy.minimum.types,
err._minimum_error_check,
doc=ufunc_docs._minimum_doc
)
maximum = ufuncs.create_ufunc(
'nlcpy_maximum',
numpy.maximum.types,
err._maximum_error_check,
doc=ufunc_docs._maximum_doc
)
fmax = ufuncs.create_ufunc(
'nlcpy_fmax',
numpy.fmax.types,
err._fmax_error_check,
doc=ufunc_docs._fmax_doc
)
fmin = ufuncs.create_ufunc(
'nlcpy_fmin',
numpy.fmin.types,
err._fmin_error_check,
doc=ufunc_docs._fmin_doc
)
# trigonometric functions
sin = ufuncs.create_ufunc(
'nlcpy_sin',
numpy.sin.types,
err._sin_error_check,
doc=ufunc_docs._sin_doc
)
cos = ufuncs.create_ufunc(
'nlcpy_cos',
numpy.cos.types,
err._cos_error_check,
doc=ufunc_docs._cos_doc
)
tan = ufuncs.create_ufunc(
'nlcpy_tan',
numpy.tan.types,
err._tan_error_check,
doc=ufunc_docs._tan_doc
)
arcsin = ufuncs.create_ufunc(
'nlcpy_arcsin',
numpy.arcsin.types,
err._arcsin_error_check,
doc=ufunc_docs._arcsin_doc
)
arccos = ufuncs.create_ufunc(
'nlcpy_arccos',
numpy.arccos.types,
err._arccos_error_check,
doc=ufunc_docs._arccos_doc
)
arctan = ufuncs.create_ufunc(
'nlcpy_arctan',
numpy.arctan.types,
err._arctan_error_check,
doc=ufunc_docs._arctan_doc
)
arctan2 = ufuncs.create_ufunc(
'nlcpy_arctan2',
casting._arctan2_types,
err._arctan2_error_check,
doc=ufunc_docs._arctan2_doc
)
hypot = ufuncs.create_ufunc(
'nlcpy_hypot',
casting._hypot_types,
err._hypot_error_check,
doc=ufunc_docs._hypot_doc
)
sinh = ufuncs.create_ufunc(
'nlcpy_sinh',
numpy.sinh.types,
err._sinh_error_check,
doc=ufunc_docs._sinh_doc
)
cosh = ufuncs.create_ufunc(
'nlcpy_cosh',
numpy.cosh.types,
err._cosh_error_check,
doc=ufunc_docs._cosh_doc
)
tanh = ufuncs.create_ufunc(
'nlcpy_tanh',
numpy.tanh.types,
err._tanh_error_check,
doc=ufunc_docs._tanh_doc
)
arcsinh = ufuncs.create_ufunc(
'nlcpy_arcsinh',
numpy.arcsinh.types,
err._arcsinh_error_check,
doc=ufunc_docs._arcsinh_doc
)
arccosh = ufuncs.create_ufunc(
'nlcpy_arccosh',
numpy.arccosh.types,
err._arccosh_error_check,
doc=ufunc_docs._arccosh_doc
)
arctanh = ufuncs.create_ufunc(
'nlcpy_arctanh',
numpy.arctanh.types,
err._arctanh_error_check,
doc=ufunc_docs._arctanh_doc
)
deg2rad = ufuncs.create_ufunc(
'nlcpy_deg2rad',
casting._deg2rad_types,
err._deg2rad_error_check,
doc=ufunc_docs._deg2rad_doc
)
rad2deg = ufuncs.create_ufunc(
'nlcpy_rad2deg',
casting._rad2deg_types,
err._rad2deg_error_check,
doc=ufunc_docs._rad2deg_doc
)
degrees = ufuncs.create_ufunc(
'nlcpy_degrees',
casting._degrees_types,
err._degrees_error_check,
doc=ufunc_docs._degrees_doc
)
radians = ufuncs.create_ufunc(
'nlcpy_radians',
casting._radians_types,
err._radians_error_check,
doc=ufunc_docs._radians_doc
)
# floating functions
isfinite = ufuncs.create_ufunc(
'nlcpy_isfinite',
numpy.isfinite.types,
err._isfinite_error_check,
doc=ufunc_docs._isfinite_doc
)
isinf = ufuncs.create_ufunc(
'nlcpy_isinf',
numpy.isinf.types,
err._isinf_error_check,
doc=ufunc_docs._isinf_doc
)
isnan = ufuncs.create_ufunc(
'nlcpy_isnan',
numpy.isnan.types,
err._isnan_error_check,
doc=ufunc_docs._isnan_doc
)
# ufunc_operation(isnat,numpy_types,valid_error_check)dnl
signbit = ufuncs.create_ufunc(
'nlcpy_signbit',
numpy.signbit.types,
err._signbit_error_check,
doc=ufunc_docs._signbit_doc
)
copysign = ufuncs.create_ufunc(
'nlcpy_copysign',
numpy.copysign.types,
err._copysign_error_check,
doc=ufunc_docs._copysign_doc
)
nextafter = ufuncs.create_ufunc(
'nlcpy_nextafter',
numpy.nextafter.types,
err._nextafter_error_check,
doc=ufunc_docs._nextafter_doc
)
spacing = ufuncs.create_ufunc(
'nlcpy_spacing',
numpy.spacing.types,
err._spacing_error_check,
doc=ufunc_docs._spacing_doc
)
# ufunc_operation(modf,numpy_types,valid_error_check)dnl
ldexp = ufuncs.create_ufunc(
'nlcpy_ldexp',
numpy.ldexp.types,
err._ldexp_error_check,
doc=ufunc_docs._ldexp_doc
)
# ufunc_operation(frexp)dnl
floor = ufuncs.create_ufunc(
'nlcpy_floor',
casting._floor_types,
err._floor_error_check,
doc=ufunc_docs._floor_doc
)
ceil = ufuncs.create_ufunc(
'nlcpy_ceil',
casting._ceil_types,
err._ceil_error_check,
doc=ufunc_docs._ceil_doc
)
trunc = ufuncs.create_ufunc(
'nlcpy_trunc',
numpy.trunc.types,
err._trunc_error_check,
doc=ufunc_docs._trunc_doc
)
# matmul
matmul = ufuncs.create_ufunc(
'nlcpy_matmul',
numpy.matmul.types,
None,
doc=ufunc_docs._matmul_doc
)
# end of operator functions
| 0
| 0
| 0
|