hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
85c1d41e0ffd477d4930abc2243ffa926855d096
| 565
|
py
|
Python
|
models.py
|
Arnaud15/ProjectMSE226
|
11db7260611242ba0e464c0fab266591afb3c1a6
|
[
"MIT"
] | null | null | null |
models.py
|
Arnaud15/ProjectMSE226
|
11db7260611242ba0e464c0fab266591afb3c1a6
|
[
"MIT"
] | null | null | null |
models.py
|
Arnaud15/ProjectMSE226
|
11db7260611242ba0e464c0fab266591afb3c1a6
|
[
"MIT"
] | null | null | null |
import sklearn.linear_model as skl
| 56.5
| 118
| 0.723894
|
import sklearn.linear_model as skl
def linear_regression(type, penalty_log='l2', alpha=0, l1_ratio=0, fit_intercept=True, normalize=False):
dic = {"ols": skl.LinearRegression(fit_intercept, normalize), 'ridge': skl.Ridge(alpha, fit_intercept, normalize),
"lasso": skl.Lasso(alpha, fit_intercept, normalize),
"elasticNet": skl.ElasticNet(alpha, l1_ratio, fit_intercept, normalize, random_state=0),
"logistic": skl.LogisticRegression(penalty=penalty_log, C=alpha, fit_intercept=fit_intercept)}
reg = dic[type]
return reg
| 507
| 0
| 23
|
a2bf734bd139742c1f3f8731ae4083b0bc7bfdd5
| 403
|
py
|
Python
|
tests/unit/test_util.py
|
yougov/pmxbot
|
7e13ec7dc9c1759a1d6a637b0424d7efbb6c62c1
|
[
"MIT"
] | 17
|
2016-01-27T12:10:03.000Z
|
2019-08-28T23:02:51.000Z
|
tests/unit/test_util.py
|
yougov/pmxbot
|
7e13ec7dc9c1759a1d6a637b0424d7efbb6c62c1
|
[
"MIT"
] | 79
|
2015-12-02T16:02:01.000Z
|
2020-02-09T01:51:05.000Z
|
tests/unit/test_util.py
|
yougov/pmxbot
|
7e13ec7dc9c1759a1d6a637b0424d7efbb6c62c1
|
[
"MIT"
] | 8
|
2016-06-27T11:07:42.000Z
|
2019-01-24T20:21:42.000Z
|
import pytest
from pmxbot import util
@pytest.mark.xfail(reason="Wordnik is unreliable")
@pytest.mark.xfail(reason="#97: Google is unreliable")
| 22.388889
| 54
| 0.784119
|
import pytest
from pmxbot import util
@pytest.mark.xfail(reason="Wordnik is unreliable")
def test_lookup(needs_wordnik):
assert util.lookup('dachshund') is not None
@pytest.mark.xfail(reason="#97: Google is unreliable")
def test_emergency_compliment(needs_internet):
assert util.load_emergency_compliments()
def test_acronym_lookup(needs_internet):
assert util.lookup_acronym('NSFW')
| 186
| 0
| 67
|
07faafb2470cb4cfac7096981ce820ce593ba511
| 3,824
|
py
|
Python
|
examples/statistics.py
|
chiluf/visvis.dev
|
373846ea25044b7ca50f44c63dab4248e14deacd
|
[
"BSD-3-Clause"
] | null | null | null |
examples/statistics.py
|
chiluf/visvis.dev
|
373846ea25044b7ca50f44c63dab4248e14deacd
|
[
"BSD-3-Clause"
] | null | null | null |
examples/statistics.py
|
chiluf/visvis.dev
|
373846ea25044b7ca50f44c63dab4248e14deacd
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
""" This example a coupe of ways to visualize statistic data.
As an example I took the monthly temperature in the region where I live
(Twente, The Netherlands) over the period 1983-2010.
This data was extracted from publicly available data obtained from the
Royal Netherlands Meteorological Institute (KNMI).
"""
import visvis as vv
temp_data = """
1983: 8.00 2.73 9.00 13.02 14.63 21.10 25.03 23.73 17.72 13.34 9.31 5.35
1984: 4.78 4.06 7.38 12.24 14.14 16.86 19.19 22.24 15.99 14.15 10.44 5.81
1985: -1.87 2.28 6.50 12.06 17.67 17.36 21.03 19.73 17.49 13.55 4.06 6.92
1986: 3.55 -1.30 8.03 10.64 18.54 20.93 20.98 20.06 15.50 15.02 10.55 6.59
1987: -1.62 4.52 4.78 15.06 14.49 17.57 20.66 19.66 18.74 14.17 7.96 5.66
1988: 8.05 6.71 7.18 13.00 19.37 18.68 19.80 21.29 17.60 13.82 8.05 7.34
1989: 6.14 7.77 11.48 10.21 19.58 21.05 23.14 22.44 19.95 15.92 8.97 7.34
1990: 6.83 10.82 11.90 13.47 19.58 19.13 21.06 23.80 16.24 16.02 8.08 5.35
1991: 5.21 2.29 12.43 13.33 14.11 15.99 23.49 22.87 19.74 13.52 7.78 5.55
1992: 4.51 7.43 9.67 12.23 20.38 22.42 23.49 22.80 18.91 10.58 10.04 5.55
1993: 7.36 3.32 9.72 15.79 19.33 20.09 20.57 20.01 16.89 12.36 4.46 6.51
1994: 6.84 4.08 10.51 13.15 17.34 20.39 27.51 23.17 17.45 13.45 11.60 7.52
1995: 5.52 9.00 8.98 13.40 18.29 19.56 25.85 25.53 18.28 17.12 8.99 0.83
1996: 1.00 2.14 6.78 15.10 15.34 20.58 21.24 22.86 16.43 14.54 7.61 1.52
1997: 1.17 8.71 11.46 12.30 17.79 21.28 22.90 25.80 19.04 13.29 8.25 6.25
1998: 6.73 9.04 10.72 12.91 19.81 20.46 20.56 21.63 18.72 11.91 5.95 6.01
1999: 7.07 5.34 10.54 14.59 18.72 20.39 24.65 22.39 22.83 14.03 8.96 6.36
2000: 5.74 8.43 9.52 15.49 20.22 21.51 19.71 22.83 19.35 14.59 10.13 6.67
2001: 5.02 7.23 7.25 12.46 19.85 20.12 23.71 23.79 16.70 17.72 9.14 4.59
2002: 6.23 9.74 11.19 14.14 18.40 21.86 22.44 23.75 19.25 12.91 10.19 3.69
2003: 4.35 5.25 12.38 15.38 18.71 23.96 24.06 25.77 20.04 11.27 10.53 6.45
2004: 4.99 7.15 10.00 15.85 17.26 20.93 21.72 24.33 19.94 14.91 8.67 4.96
2005: 6.95 4.32 9.85 15.66 18.07 21.97 22.92 21.07 21.04 17.72 8.86 5.37
2006: 3.31 4.14 7.05 13.51 19.30 22.37 29.29 20.75 23.03 17.04 11.67 8.46
2007: 8.73 8.41 12.24 19.28 18.81 22.45 21.69 22.05 17.97 13.48 8.96 5.48
2008: 8.07 8.98 9.21 13.43 20.70 22.55 23.44 22.24 18.40 13.50 8.62 4.18
2009: 2.98 5.27 9.47 18.89 19.56 20.85 23.36 24.04 20.04 13.60 11.57 3.95
2010: 0.20 3.60 10.17 15.41 15.21 22.88 27.06 21.16 17.20 13.73 7.35 -0.62
"""
# Collect data per month (just put all years on a heap)
# Not the most readable code, but this example is about what we do with
# the data next.
temps_per_month = [[] for i in range(12)]
for line in temp_data.splitlines():
if ":" not in line:
continue
temps = [float(t) for t in line.split(': ')[1].split(' ')]
for i in range(12):
temps_per_month[i].append(temps[i])
# Calculate means
mean = lambda x: sum(x)/len(x)
mean_temps_per_month = [mean(tt) for tt in temps_per_month]
# Prepare figure
vv.figure(1); vv.clf()
# Show means in a normal bar chart
a1 = vv.subplot(221);
b2 = vv.bar(mean_temps_per_month)
b2.color = 'r'
# Show means in a 3D bar chart
a2 = vv.subplot(222);
b3 = vv.bar3(mean_temps_per_month)
b3.color = 'g'
a2.daspect = 1,1,0.3
# Show box plot
a3 = vv.subplot(223)
bp = vv.boxplot(temps_per_month)
bp.lc = 'b'
bp.lw = 2
# Show violin plot
a4 = vv.subplot(224)
vp = vv.boxplot(temps_per_month, whiskers='violin')
vp.lc = 'm'
vp.lw = 3
# Set legends and ticks for each axes
for a in [a1, a2, a3, a4]:
a.axis.xTicks = 'Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split()
if a is a2:
a.axis.zLabel = 'Temperature [C^o]'
a.axis.showGridZ = True
else:
a.axis.yLabel = 'Temperature [C^o]'
a.axis.showGridY = True
a.axis.xTicksAngle = -30
app = vv.use()
app.Run()
| 39.020408
| 77
| 0.656642
|
#!/usr/bin/env python
""" This example a coupe of ways to visualize statistic data.
As an example I took the monthly temperature in the region where I live
(Twente, The Netherlands) over the period 1983-2010.
This data was extracted from publicly available data obtained from the
Royal Netherlands Meteorological Institute (KNMI).
"""
import visvis as vv
temp_data = """
1983: 8.00 2.73 9.00 13.02 14.63 21.10 25.03 23.73 17.72 13.34 9.31 5.35
1984: 4.78 4.06 7.38 12.24 14.14 16.86 19.19 22.24 15.99 14.15 10.44 5.81
1985: -1.87 2.28 6.50 12.06 17.67 17.36 21.03 19.73 17.49 13.55 4.06 6.92
1986: 3.55 -1.30 8.03 10.64 18.54 20.93 20.98 20.06 15.50 15.02 10.55 6.59
1987: -1.62 4.52 4.78 15.06 14.49 17.57 20.66 19.66 18.74 14.17 7.96 5.66
1988: 8.05 6.71 7.18 13.00 19.37 18.68 19.80 21.29 17.60 13.82 8.05 7.34
1989: 6.14 7.77 11.48 10.21 19.58 21.05 23.14 22.44 19.95 15.92 8.97 7.34
1990: 6.83 10.82 11.90 13.47 19.58 19.13 21.06 23.80 16.24 16.02 8.08 5.35
1991: 5.21 2.29 12.43 13.33 14.11 15.99 23.49 22.87 19.74 13.52 7.78 5.55
1992: 4.51 7.43 9.67 12.23 20.38 22.42 23.49 22.80 18.91 10.58 10.04 5.55
1993: 7.36 3.32 9.72 15.79 19.33 20.09 20.57 20.01 16.89 12.36 4.46 6.51
1994: 6.84 4.08 10.51 13.15 17.34 20.39 27.51 23.17 17.45 13.45 11.60 7.52
1995: 5.52 9.00 8.98 13.40 18.29 19.56 25.85 25.53 18.28 17.12 8.99 0.83
1996: 1.00 2.14 6.78 15.10 15.34 20.58 21.24 22.86 16.43 14.54 7.61 1.52
1997: 1.17 8.71 11.46 12.30 17.79 21.28 22.90 25.80 19.04 13.29 8.25 6.25
1998: 6.73 9.04 10.72 12.91 19.81 20.46 20.56 21.63 18.72 11.91 5.95 6.01
1999: 7.07 5.34 10.54 14.59 18.72 20.39 24.65 22.39 22.83 14.03 8.96 6.36
2000: 5.74 8.43 9.52 15.49 20.22 21.51 19.71 22.83 19.35 14.59 10.13 6.67
2001: 5.02 7.23 7.25 12.46 19.85 20.12 23.71 23.79 16.70 17.72 9.14 4.59
2002: 6.23 9.74 11.19 14.14 18.40 21.86 22.44 23.75 19.25 12.91 10.19 3.69
2003: 4.35 5.25 12.38 15.38 18.71 23.96 24.06 25.77 20.04 11.27 10.53 6.45
2004: 4.99 7.15 10.00 15.85 17.26 20.93 21.72 24.33 19.94 14.91 8.67 4.96
2005: 6.95 4.32 9.85 15.66 18.07 21.97 22.92 21.07 21.04 17.72 8.86 5.37
2006: 3.31 4.14 7.05 13.51 19.30 22.37 29.29 20.75 23.03 17.04 11.67 8.46
2007: 8.73 8.41 12.24 19.28 18.81 22.45 21.69 22.05 17.97 13.48 8.96 5.48
2008: 8.07 8.98 9.21 13.43 20.70 22.55 23.44 22.24 18.40 13.50 8.62 4.18
2009: 2.98 5.27 9.47 18.89 19.56 20.85 23.36 24.04 20.04 13.60 11.57 3.95
2010: 0.20 3.60 10.17 15.41 15.21 22.88 27.06 21.16 17.20 13.73 7.35 -0.62
"""
# Collect data per month (just put all years on a heap)
# Not the most readable code, but this example is about what we do with
# the data next.
temps_per_month = [[] for i in range(12)]
for line in temp_data.splitlines():
if ":" not in line:
continue
temps = [float(t) for t in line.split(': ')[1].split(' ')]
for i in range(12):
temps_per_month[i].append(temps[i])
# Calculate means
mean = lambda x: sum(x)/len(x)
mean_temps_per_month = [mean(tt) for tt in temps_per_month]
# Prepare figure
vv.figure(1); vv.clf()
# Show means in a normal bar chart
a1 = vv.subplot(221);
b2 = vv.bar(mean_temps_per_month)
b2.color = 'r'
# Show means in a 3D bar chart
a2 = vv.subplot(222);
b3 = vv.bar3(mean_temps_per_month)
b3.color = 'g'
a2.daspect = 1,1,0.3
# Show box plot
a3 = vv.subplot(223)
bp = vv.boxplot(temps_per_month)
bp.lc = 'b'
bp.lw = 2
# Show violin plot
a4 = vv.subplot(224)
vp = vv.boxplot(temps_per_month, whiskers='violin')
vp.lc = 'm'
vp.lw = 3
# Set legends and ticks for each axes
for a in [a1, a2, a3, a4]:
a.axis.xTicks = 'Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split()
if a is a2:
a.axis.zLabel = 'Temperature [C^o]'
a.axis.showGridZ = True
else:
a.axis.yLabel = 'Temperature [C^o]'
a.axis.showGridY = True
a.axis.xTicksAngle = -30
app = vv.use()
app.Run()
| 0
| 0
| 0
|
cdd3664562efc356638d927e7302539928cb2b06
| 1,320
|
py
|
Python
|
tests/terraform/checks/resource/gcp/test_CloudStorageLogging.py
|
kylelaker/checkov
|
6eada26030a87f397a6bf1831827b3dc6c5dad2d
|
[
"Apache-2.0"
] | 4,013
|
2019-12-09T13:16:54.000Z
|
2022-03-31T14:31:01.000Z
|
tests/terraform/checks/resource/gcp/test_CloudStorageLogging.py
|
kylelaker/checkov
|
6eada26030a87f397a6bf1831827b3dc6c5dad2d
|
[
"Apache-2.0"
] | 1,258
|
2019-12-17T09:55:51.000Z
|
2022-03-31T19:17:17.000Z
|
tests/terraform/checks/resource/gcp/test_CloudStorageLogging.py
|
kylelaker/checkov
|
6eada26030a87f397a6bf1831827b3dc6c5dad2d
|
[
"Apache-2.0"
] | 638
|
2019-12-19T08:57:38.000Z
|
2022-03-30T21:38:37.000Z
|
import unittest
import hcl2
from checkov.common.models.enums import CheckResult
from checkov.terraform.checks.resource.gcp.CloudStorageLogging import check
if __name__ == '__main__':
unittest.main()
| 33
| 82
| 0.625
|
import unittest
import hcl2
from checkov.common.models.enums import CheckResult
from checkov.terraform.checks.resource.gcp.CloudStorageLogging import check
class TestCloudStorageLogging(unittest.TestCase):
def test_failure(self):
hcl_res = hcl2.loads("""
resource "google_storage_bucket" "logging" {
name = "jgwloggingbucket"
location = var.location
uniform_bucket_level_access = true
}
""")
resource_conf = hcl_res['resource'][0]['google_storage_bucket']['logging']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
hcl_res = hcl2.loads("""
resource "google_storage_bucket" "logging" {
name = "jgwloggingbucket"
location = var.location
uniform_bucket_level_access = true
logging {
log_bucket = "mylovelybucket"
}
}
""")
resource_conf = hcl_res['resource'][0]['google_storage_bucket']['logging']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| 1,008
| 28
| 77
|
802109d07a839590f191a668a78ce0dbc359cb5f
| 540
|
py
|
Python
|
antarest/study/storage/rawstudy/model/filesystem/root/input/hydro/prepro/area/prepro.py
|
AntaresSimulatorTeam/antaREST
|
d686d2a86a52737c211ae67f3cee591f559909f2
|
[
"Apache-2.0"
] | 2
|
2020-09-30T11:40:22.000Z
|
2020-11-09T09:06:30.000Z
|
antarest/study/storage/rawstudy/model/filesystem/root/input/hydro/prepro/area/prepro.py
|
AntaresSimulatorTeam/antaREST
|
d686d2a86a52737c211ae67f3cee591f559909f2
|
[
"Apache-2.0"
] | 542
|
2021-01-11T13:23:47.000Z
|
2022-03-31T15:38:10.000Z
|
antarest/study/storage/rawstudy/model/filesystem/root/input/hydro/prepro/area/prepro.py
|
AntaresSimulatorTeam/antaREST
|
d686d2a86a52737c211ae67f3cee591f559909f2
|
[
"Apache-2.0"
] | 1
|
2020-10-01T12:18:15.000Z
|
2020-10-01T12:18:15.000Z
|
from antarest.study.storage.rawstudy.model.filesystem.config.model import (
FileStudyTreeConfig,
)
from antarest.study.storage.rawstudy.model.filesystem.context import (
ContextServer,
)
from antarest.study.storage.rawstudy.model.filesystem.ini_file_node import (
IniFileNode,
)
| 33.75
| 76
| 0.768519
|
from antarest.study.storage.rawstudy.model.filesystem.config.model import (
FileStudyTreeConfig,
)
from antarest.study.storage.rawstudy.model.filesystem.context import (
ContextServer,
)
from antarest.study.storage.rawstudy.model.filesystem.ini_file_node import (
IniFileNode,
)
class InputHydroPreproAreaPrepro(IniFileNode):
def __init__(self, context: ContextServer, config: FileStudyTreeConfig):
types = {"prepro": {"intermonthly-correlation": float}}
IniFileNode.__init__(self, context, config, types)
| 174
| 25
| 49
|
d3b5b655afd498b598dfe9266c76d3818cd93340
| 26,166
|
py
|
Python
|
models/cifar/resnet.py
|
HRanWang/imagenet_whr
|
fcfe830c7889a7239d3f791de192d6121f8c316f
|
[
"MIT"
] | 1
|
2019-10-19T15:30:02.000Z
|
2019-10-19T15:30:02.000Z
|
models/cifar/resnet.py
|
HRanWang/imagenet_whr
|
fcfe830c7889a7239d3f791de192d6121f8c316f
|
[
"MIT"
] | null | null | null |
models/cifar/resnet.py
|
HRanWang/imagenet_whr
|
fcfe830c7889a7239d3f791de192d6121f8c316f
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
'''Resnet for cifar dataset.
Ported form
https://github.com/facebook/fb.resnet.torch
and
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
(c) YANG, Wei
'''
import torch.nn as nn
import math
import torch
import numpy as np
__all__ = ['resnet','resnet50']
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def resnet(**kwargs):
"""
Constructs a ResNet model.
"""
return ResNet(**kwargs)
class Dconv_shuffle(nn.Module):
"""
Deformable convolution with random shuffling of the feature map.
Random shuffling only happened within each page independently.
The sampling locations are generated for each forward pass during the training.
"""
def resnet50(**kwargs):
"""
Constructs a ResNet model.
"""
return Resnet50(**kwargs)
| 41.798722
| 150
| 0.613697
|
from __future__ import absolute_import
'''Resnet for cifar dataset.
Ported form
https://github.com/facebook/fb.resnet.torch
and
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
(c) YANG, Wei
'''
import torch.nn as nn
import math
import torch
import numpy as np
__all__ = ['resnet','resnet50']
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, depth, num_classes=1000, block_name='BasicBlock'):
super(ResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == 'basicblock':
assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == 'bottleneck':
assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x) # 32x32
x = self.layer1(x) # 32x32
x = self.layer2(x) # 16x16
x = self.layer3(x) # 8x8
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet(**kwargs):
"""
Constructs a ResNet model.
"""
return ResNet(**kwargs)
def conv_1_3x3():
return nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False), # 'SAME'
nn.BatchNorm2d(64),
nn.ReLU(inplace=True))
# TODO: nn.MaxPool2d(kernel_size=3, stride=2, padding=0)) # 'valid'
def conv_1_3x3_dconv():
return nn.Sequential(Dconv_shuffle(3, 64, 3, 1, 1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True))
# TODO: nn.MaxPool2d(kernel_size=3, stride=2, padding=0)) # 'valid'
class bottleneck(nn.Module):
def __init__(self, inplanes, planes, kernel_size, strides=(2, 2)):
super(bottleneck, self).__init__()
plane1, plane2, plane3 = planes
self.outchannels = plane3
self.conv1 = nn.Conv2d(inplanes, plane1, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(plane1)
self.conv2 = nn.Conv2d(plane1, plane2, kernel_size=kernel_size, stride=strides, padding=int((kernel_size - 1) / 2), bias=False)
self.bn2 = nn.BatchNorm2d(plane2)
self.conv3 = nn.Conv2d(plane2, plane3, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(plane3)
self.conv4 = nn.Conv2d(inplanes, plane3, kernel_size=1, stride=strides, padding=0, bias=False)
self.bn4 = nn.BatchNorm2d(plane3)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_tensor):
out = self.conv1(input_tensor)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
shortcut = self.conv4(input_tensor)
shortcut = self.bn4(shortcut)
out += shortcut
out = self.relu(out)
return out
class identity_block3(nn.Module):
def __init__(self, inplanes, planes, kernel_size):
super(identity_block3, self).__init__()
plane1, plane2, plane3 = planes
self.outchannels = plane3
self.conv1 = nn.Conv2d(inplanes, plane1, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(plane1)
self.conv2 = nn.Conv2d(plane1, plane2, kernel_size=kernel_size, stride=1, padding=int((kernel_size - 1) / 2), bias=False)
self.bn2 = nn.BatchNorm2d(plane2)
self.conv3 = nn.Conv2d(plane2, plane3, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(plane3)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_tensor, return_conv3_out=False): # return_conv3_out is only served for grad_cam.py
out = self.conv1(input_tensor)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out_conv3 = self.conv3(out)
out = self.bn3(out_conv3)
out += input_tensor
out = self.relu(out)
if return_conv3_out:
return out, out_conv3
else:
return out
class Dconv_shuffle(nn.Module):
"""
Deformable convolution with random shuffling of the feature map.
Random shuffling only happened within each page independently.
The sampling locations are generated for each forward pass during the training.
"""
def __init__(self, inplane, outplane, kernel_size, stride, padding):
super(Dconv_shuffle, self).__init__()
print('cifar Dconv_shuffle is used')
self.dilated_conv = nn.Conv2d(inplane, outplane, kernel_size=kernel_size, stride=stride, padding=padding,
bias=False)
self.indices = None
def _setup(self, inplane, spatial_size):
self.indices = np.empty((inplane, spatial_size), dtype=np.int64)
for i in range(inplane):
self.indices[i, :] = np.arange(self.indices.shape[1])+ i*self.indices.shape[1]
def forward(self, x):
x_shape = x.size() # [128, 3, 32, 32]
x = x.view(x_shape[0], -1)
if self.indices is None:
self._setup(x_shape[1], x_shape[2]*x_shape[3])
for i in range(x_shape[1]):
np.random.shuffle(self.indices[i])
x = x[:, torch.from_numpy(self.indices)].view(x_shape)
return self.dilated_conv(x)
class bottleneck_shuffle(nn.Module):
def __init__(self, inplanes, planes, kernel_size, strides=(2, 2), type='error'):
super(bottleneck_shuffle, self).__init__()
plane1, plane2, plane3 = planes
self.conv1 = nn.Conv2d(inplanes, plane1, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(plane1)
self.dconv1 = Dconv_shuffle(plane1, plane2, kernel_size=kernel_size, stride=strides, padding=1)
self.bn2 = nn.BatchNorm2d(plane2)
self.conv3 = nn.Conv2d(plane2, plane3, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(plane3)
self.dconv2 = Dconv_shuffle(inplanes, plane3, kernel_size=1, stride=strides, padding=0)
self.bn4 = nn.BatchNorm2d(plane3)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_tensor):
out = self.conv1(input_tensor)
out = self.bn1(out)
out = self.relu(out)
out = self.dconv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
shortcut = self.dconv2(input_tensor)
shortcut = self.bn4(shortcut)
out += shortcut
out = self.relu(out)
return out
class identity_block3_shuffle(nn.Module):
def __init__(self, inplanes, planes, kernel_size, type='error'):
super(identity_block3_shuffle, self).__init__()
plane1, plane2, plane3 = planes
self.conv1 = nn.Conv2d(inplanes, plane1, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(plane1)
self.dconv = Dconv_shuffle(plane1, plane2, kernel_size=kernel_size, stride=1, padding=1)
self.bn2 = nn.BatchNorm2d(plane2)
self.conv3 = nn.Conv2d(plane2, plane3, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(plane3)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_tensor):
out = self.conv1(input_tensor)
out = self.bn1(out)
out = self.relu(out)
out = self.dconv(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
x_shape = input_tensor.size() # [128, 3, 32, 32]
x = input_tensor.view(x_shape[0], x_shape[1] * x_shape[2] * x_shape[3]) # [128, 3*32*32]
shuffled_input = torch.empty(x_shape[0], x_shape[1], x_shape[2], x_shape[3]).cuda(0)
perm = torch.empty(0).float()
for i in range(x_shape[1]):
a = torch.randperm(x_shape[2] * x_shape[3]) + i * x_shape[2] * x_shape[3]
perm = torch.cat((perm, a.float()), 0)
shuffled_input[:, :, :, :] = x[:, perm.long()].view(x_shape[0], x_shape[1], x_shape[2], x_shape[3])
out += shuffled_input
out = self.relu(out)
return out
class Resnet50(nn.Module):
def __init__(self, dropout_rate, num_classes, include_top, layer=99, type='none'):
print('resnet50 is used')
super(Resnet50, self).__init__()
self.dropout_rate = dropout_rate
self.num_classes = num_classes
self.include_top = include_top
block_ex = 4
# Define the building blocks
if layer > 0:
self.conv_3x3 = conv_1_3x3()
else:
self.conv_3x3 = conv_1_3x3_dconv()
if layer > 10:
self.bottleneck_1 = bottleneck(16*block_ex, [16*block_ex, 16*block_ex, 64*block_ex], kernel_size=3, strides=(1, 1))
else:
self.bottleneck_1 = bottleneck_shuffle(16*block_ex, [16*block_ex, 16*block_ex, 64*block_ex], kernel_size=3, strides=(1, 1), type=type)
if layer > 11:
self.identity_block_1_1 = identity_block3(64*block_ex, [16*block_ex, 16*block_ex, 64*block_ex], kernel_size=3)
else:
self.identity_block_1_1 = identity_block3_shuffle(64*block_ex, [16*block_ex, 16*block_ex, 64*block_ex], kernel_size=3, type=type)
if layer > 12:
self.identity_block_1_2 = identity_block3(64*block_ex, [16*block_ex, 16*block_ex, 64*block_ex], kernel_size=3)
else:
self.identity_block_1_2 = identity_block3_shuffle(64*block_ex, [16*block_ex, 16*block_ex, 64*block_ex], kernel_size=3, type=type)
if layer > 20:
self.bottleneck_2 = bottleneck(64*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3, strides=(2, 2))
else:
self.bottleneck_2 = bottleneck_shuffle(64*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3, strides=(2, 2), type=type)
if layer > 21:
self.identity_block_2_1 = identity_block3(128*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3)
else:
self.identity_block_2_1 = identity_block3_shuffle(128*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3, type=type)
if layer > 22:
self.identity_block_2_2 = identity_block3(128*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3)
else:
self.identity_block_2_2 = identity_block3_shuffle(128*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3, type=type)
if layer > 23:
self.identity_block_2_3 = identity_block3(128*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3)
else:
self.identity_block_2_3 = identity_block3_shuffle(128*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3, type=type)
if layer > 30:
self.bottleneck_3 = bottleneck(128*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3, strides=(2, 2))
else:
self.bottleneck_3 = bottleneck_shuffle(128*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3, strides=(2, 2), type=type)
if layer > 31:
self.identity_block_3_1 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)
else:
self.identity_block_3_1 = identity_block3_shuffle(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3, type=type)
if layer > 32:
self.identity_block_3_2 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)
else:
self.identity_block_3_2 = identity_block3_shuffle(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3, type=type)
if layer > 33:
self.identity_block_3_3 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)
else:
self.identity_block_3_3 = identity_block3_shuffle(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3, type=type)
if layer > 34:
self.identity_block_3_4 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)
else:
self.identity_block_3_4 = identity_block3_shuffle(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3, type=type)
if layer > 35:
self.identity_block_3_5 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)
else:
self.identity_block_3_5 = identity_block3_shuffle(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3, type=type)
if layer > 40:
self.bottleneck_4 = bottleneck(256*block_ex, [128*block_ex, 128*block_ex, 512*block_ex], kernel_size=3, strides=(2, 2))
else:
self.bottleneck_4 = bottleneck_shuffle(256*block_ex, [128*block_ex, 128*block_ex, 512*block_ex], kernel_size=3, strides=(2, 2), type=type)
if layer > 41:
self.identity_block_4_1 = identity_block3(512*block_ex, [128*block_ex, 128*block_ex, 512*block_ex], kernel_size=3)
else:
self.identity_block_4_1 = identity_block3_shuffle(512*block_ex, [128*block_ex, 128*block_ex, 512*block_ex], kernel_size=3, type=type)
if layer > 42:
self.identity_block_4_2 = identity_block3(512*block_ex, [128*block_ex, 128*block_ex, 512*block_ex], kernel_size=3)
else:
self.identity_block_4_2 = identity_block3_shuffle(512*block_ex, [128*block_ex, 128*block_ex, 512*block_ex], kernel_size=3, type=type)
self.avgpool = nn.AdaptiveAvgPool2d(1) # TODO: check the final size
self.fc = nn.Linear(512*block_ex, num_classes)
# Initialize the weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
# raise Exception('You are using a model without BN!!!')
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, input_x):
# print(input_x.size())
x = self.conv_3x3(input_x)
# np.save('/nethome/yuefan/fanyue/dconv/fm3x3.npy', x.detach().cpu().numpy())
# print(x.size())
x = self.bottleneck_1(x)
x = self.identity_block_1_1(x)
x = self.identity_block_1_2(x)
# print(x.size())
x = self.bottleneck_2(x)
x = self.identity_block_2_1(x)
x = self.identity_block_2_2(x)
x = self.identity_block_2_3(x)
# print(x.size())
x = self.bottleneck_3(x)
x = self.identity_block_3_1(x)
x = self.identity_block_3_2(x)
x = self.identity_block_3_3(x)
x = self.identity_block_3_4(x)
x = self.identity_block_3_5(x)
# print(x.size())
x = self.bottleneck_4(x)
x = self.identity_block_4_1(x)
x = self.identity_block_4_2(x)
# print("feature shape:", x.size())
if self.include_top:
x = self.avgpool(x)
x = x.view(x.size(0), -1)
# TODO: why there is no dropout
x = self.fc(x)
return x
class ResnetWHR(nn.Module):
def __init__(self, dropout_rate, num_classes, include_top):
"""
This is ResNet50 for PCB verison
"""
super(ResnetWHR, self).__init__()
self.dropout_rate = dropout_rate
self.num_classes = num_classes
self.include_top = include_top
self.num_features = 512
# Define the building blocks
self.conv_3x3 = conv_1_3x3()
self.bottleneck_1 = bottleneck(16, [16, 16, 64], kernel_size=3, strides=(1, 1))
self.identity_block_1_1 = identity_block3(64, [16, 16, 64], kernel_size=3)
self.identity_block_1_2 = identity_block3(64, [16, 16, 64], kernel_size=3)
self.bottleneck_2 = bottleneck(64, [32, 32, 128], kernel_size=3, strides=(2, 2))
self.identity_block_2_1 = identity_block3(128, [32, 32, 128], kernel_size=3)
self.identity_block_2_2 = identity_block3(128, [32, 32, 128], kernel_size=3)
self.identity_block_2_3 = identity_block3(128, [32, 32, 128], kernel_size=3)
self.bottleneck_3 = bottleneck(128, [64, 64, 256], kernel_size=3, strides=(2, 2))
self.identity_block_3_1 = identity_block3(256, [64, 64, 256], kernel_size=3)
self.identity_block_3_2 = identity_block3(256, [64, 64, 256], kernel_size=3)
self.identity_block_3_3 = identity_block3(256, [64, 64, 256], kernel_size=3)
self.identity_block_3_4 = identity_block3(256, [64, 64, 256], kernel_size=3)
self.identity_block_3_5 = identity_block3(256, [64, 64, 256], kernel_size=3)
self.bottleneck_4 = bottleneck(256, [128, 128, 512], kernel_size=3, strides=(2, 2))
self.identity_block_4_1 = identity_block3(512, [128, 128, 512], kernel_size=3)
self.identity_block_4_2 = identity_block3(512, [128, 128, 512], kernel_size=3)
# =======================================top=============================================
# self.se1 = SELayer(64)
# self.se2 = SELayer(128)
# self.se3 = SELayer(256)
# self.local_conv_layer1 = nn.Conv2d(64, self.num_features, kernel_size=1, padding=0, bias=False)
# self.local_conv_layer2 = nn.Conv2d(128, self.num_features, kernel_size=1, padding=0, bias=False)
# self.local_conv_layer3 = nn.Conv2d(256, self.num_features, kernel_size=1, padding=0, bias=False)
# self.instance_layer1 = nn.Linear(self.num_features, self.num_classes)
# self.instance_layer2 = nn.Linear(self.num_features, self.num_classes)
# self.instance_layer3 = nn.Linear(self.num_features, self.num_classes)
self.instance0 = nn.Linear(self.num_features, self.num_classes)
self.instance1 = nn.Linear(self.num_features, self.num_classes)
self.instance2 = nn.Linear(self.num_features, self.num_classes)
self.instance3 = nn.Linear(self.num_features, self.num_classes)
self.instance4 = nn.Linear(self.num_features, self.num_classes)
# self.linear_list = []
# for i in range(16):
# self.linear_list.append(nn.Linear(self.num_features, self.num_classes).cuda())
# self.local_conv = nn.Conv2d(self.num_features, self.num_features, kernel_size=1, padding=0, bias=False)
# self.local_bn = nn.BatchNorm2d(self.num_features)
# Initialize the weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, std=0.001)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, input_x):
x = self.conv_3x3(input_x)
x = self.bottleneck_1(x)
x = self.identity_block_1_1(x)
x_layer1 = self.identity_block_1_2(x)
x = self.bottleneck_2(x_layer1)
x = self.identity_block_2_1(x)
x = self.identity_block_2_2(x)
x_layer2 = self.identity_block_2_3(x)
x = self.bottleneck_3(x_layer2)
x = self.identity_block_3_1(x)
x = self.identity_block_3_2(x)
x = self.identity_block_3_3(x)
x = self.identity_block_3_4(x)
x_layer3 = self.identity_block_3_5(x)
x = self.bottleneck_4(x_layer3)
x = self.identity_block_4_1(x)
x = self.identity_block_4_2(x)
# x_layer1 = self.se1(x_layer1)
# x_layer1 = nn.functional.avg_pool2d(x_layer1, kernel_size=(32, 32), stride=(1, 1))
# x_layer1 = self.local_conv_layer1(x_layer1)
# x_layer1 = x_layer1.contiguous().view(x_layer1.size(0), -1)
# x_layer1 = self.instance_layer1(x_layer1)
#
# x_layer2 = self.se2(x_layer2)
# x_layer2 = nn.functional.avg_pool2d(x_layer2, kernel_size=(16, 16), stride=(1, 1))
# x_layer2 = self.local_conv_layer2(x_layer2)
# x_layer2 = x_layer2.contiguous().view(x_layer2.size(0), -1)
# x_layer2 = self.instance_layer2(x_layer2)
#
# x_layer3 = self.se3(x_layer3)
# x_layer3 = nn.functional.avg_pool2d(x_layer3, kernel_size=(8, 8), stride=(1, 1))
# x_layer3 = self.local_conv_layer3(x_layer3)
# x_layer3 = x_layer3.contiguous().view(x_layer3.size(0), -1)
# x_layer3 = self.instance_layer3(x_layer3)
sx = x.size(2) / 4
x = nn.functional.avg_pool2d(x, kernel_size=(sx, x.size(3)), stride=(sx, x.size(3))) # 4x1
# x = self.local_conv(x)
# x = self.local_bn(x)
# x = nn.functional.relu(x)
x4 = nn.functional.avg_pool2d(x, kernel_size=(4, 1), stride=(1, 1))
x4 = x4.contiguous().view(x4.size(0), -1)
c4 = self.instance4(x4)
# x = x.view(x.size(0), x.size(1), 16)
# c_list = []
# for i in range(16):
# x_offset = torch.empty(x.size(0), 512).cuda(0)
# # print(x_offset[:, :, :].size(), x[:, :, i].size())
# x_offset[:, :] = x[:, :, i]
# tmp = self.linear_list[i](x_offset)
# c_list.append(tmp)
x = x.chunk(4, dim=2)
x0 = x[0].contiguous().view(x[0].size(0), -1)
x1 = x[1].contiguous().view(x[1].size(0), -1)
x2 = x[2].contiguous().view(x[2].size(0), -1)
x3 = x[3].contiguous().view(x[3].size(0), -1)
c0 = self.instance0(x0)
c1 = self.instance1(x1)
c2 = self.instance2(x2)
c3 = self.instance3(x3)
return c0, c1, c2, c3, c4#c_list, c4##, x_layer1, x_layer2, x_layer3
def resnet50(**kwargs):
"""
Constructs a ResNet model.
"""
return Resnet50(**kwargs)
| 20,490
| 4,004
| 678
|
1c293d539578de6f1ef7fcad8bd8b118b92f8704
| 824
|
py
|
Python
|
src/python/misc/serifxml_round_trip.py
|
BBN-E/text-open
|
c508f6caeaa51a43cdb0bc27d8ed77e5750fdda9
|
[
"Apache-2.0"
] | 2
|
2022-03-24T14:37:51.000Z
|
2022-03-24T19:56:45.000Z
|
src/python/misc/serifxml_round_trip.py
|
BBN-E/text-open
|
c508f6caeaa51a43cdb0bc27d8ed77e5750fdda9
|
[
"Apache-2.0"
] | null | null | null |
src/python/misc/serifxml_round_trip.py
|
BBN-E/text-open
|
c508f6caeaa51a43cdb0bc27d8ed77e5750fdda9
|
[
"Apache-2.0"
] | null | null | null |
# Read in serifxml then save it an make sure the files are
# essentially identical
import sys, os
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(script_dir, ".."))
import serifxml3
if len(sys.argv) != 3:
print("Usage: " + sys.argv[0] + " input-serifxml-file output-serifxml-file")
sys.exit(1)
input_file, output_file = sys.argv[1:]
if os.path.exists(output_file):
os.remove(output_file)
doc = serifxml3.Document(input_file)
doc.save(output_file)
print("Reading input serifxml")
i = open(input_file)
print("Writing output serifxml")
o = open(output_file)
i_contents = i.read()
o_contents = o.read()
i.close()
o.close()
print("Checking")
if i_contents.strip() != o_contents.strip():
print("Serifxml files differ")
sys.exit(1)
print("Serifxml files match")
| 21.684211
| 80
| 0.713592
|
# Read in serifxml then save it an make sure the files are
# essentially identical
import sys, os
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(script_dir, ".."))
import serifxml3
if len(sys.argv) != 3:
print("Usage: " + sys.argv[0] + " input-serifxml-file output-serifxml-file")
sys.exit(1)
input_file, output_file = sys.argv[1:]
if os.path.exists(output_file):
os.remove(output_file)
doc = serifxml3.Document(input_file)
doc.save(output_file)
print("Reading input serifxml")
i = open(input_file)
print("Writing output serifxml")
o = open(output_file)
i_contents = i.read()
o_contents = o.read()
i.close()
o.close()
print("Checking")
if i_contents.strip() != o_contents.strip():
print("Serifxml files differ")
sys.exit(1)
print("Serifxml files match")
| 0
| 0
| 0
|
fa60c271da2ff5f23a591f6ce737335abc7c261e
| 331
|
py
|
Python
|
src/kgmk/dsa/algebra/modular/matrix/dot/jit.py
|
kagemeka/python
|
486ce39d97360b61029527bacf00a87fdbcf552c
|
[
"MIT"
] | null | null | null |
src/kgmk/dsa/algebra/modular/matrix/dot/jit.py
|
kagemeka/python
|
486ce39d97360b61029527bacf00a87fdbcf552c
|
[
"MIT"
] | null | null | null |
src/kgmk/dsa/algebra/modular/matrix/dot/jit.py
|
kagemeka/python
|
486ce39d97360b61029527bacf00a87fdbcf552c
|
[
"MIT"
] | null | null | null |
import numpy as np
import numba as nb
@nb.njit
| 18.388889
| 51
| 0.567976
|
import numpy as np
import numba as nb
@nb.njit
def mod_matrix_dot(
a: np.ndarray,
b: np.ndarray,
mod: int,
) -> np.ndarray:
ha, wa = a.shape
hb, wb = b.shape
assert wa == hb
c = np.zeros((ha, wb), np.int64)
for i in range(ha):
for j in range(wb):
c[i, j] = np.sum(a[i] * b[:, j] % mod) % mod
return c
| 261
| 0
| 22
|
8ae9ce3f0d275395b8f614a561a53d141570dcb2
| 707
|
py
|
Python
|
SeRe/tools/data_pre/filename2txt.py
|
Anny-Anny/SeRe
|
2483269c5b5b36673b8893bb026689d731226cb8
|
[
"MIT"
] | null | null | null |
SeRe/tools/data_pre/filename2txt.py
|
Anny-Anny/SeRe
|
2483269c5b5b36673b8893bb026689d731226cb8
|
[
"MIT"
] | null | null | null |
SeRe/tools/data_pre/filename2txt.py
|
Anny-Anny/SeRe
|
2483269c5b5b36673b8893bb026689d731226cb8
|
[
"MIT"
] | null | null | null |
import os
ROOT = '/home/xjw/Downloads/code/mmsegmentation-0.21.0/'
def txt2filename(txt_path):
"""
@param txt_path:
@return: 返回数据集中包含的所有图片的名称
"""
data = []
with open(txt_path, 'r') as f:
for ch in f.readlines():
data.append(ch.strip())
return data
if __name__ == '__main__':
filename2txt('/home/xjw/Downloads/code/mmsegmentation-0.21.0/data/xiangtan/images/validation',
ROOT + 'SeRe/tools/data_pre/val.txt')
| 23.566667
| 98
| 0.592645
|
import os
ROOT = '/home/xjw/Downloads/code/mmsegmentation-0.21.0/'
def filename2txt(filepath, txt_path):
with open(txt_path, 'w+') as f:
for root, dirs, files in os.walk(filepath):
for name in files:
f.write(name.split(".")[0] + '\n')
f.close()
def txt2filename(txt_path):
"""
@param txt_path:
@return: 返回数据集中包含的所有图片的名称
"""
data = []
with open(txt_path, 'r') as f:
for ch in f.readlines():
data.append(ch.strip())
return data
if __name__ == '__main__':
filename2txt('/home/xjw/Downloads/code/mmsegmentation-0.21.0/data/xiangtan/images/validation',
ROOT + 'SeRe/tools/data_pre/val.txt')
| 200
| 0
| 23
|
e38cea4dfc6f41ac44fabb67e89d4f818b54bb37
| 1,013
|
py
|
Python
|
Python/Multiflex_set_binary_mode.py
|
Terabee/sample_codes
|
a58676f5e7fec1dfcacc5269515b3ef069b01ec7
|
[
"MIT"
] | 13
|
2019-06-24T05:34:08.000Z
|
2022-01-16T08:37:37.000Z
|
Python/Multiflex_set_binary_mode.py
|
Terabee/sample_codes
|
a58676f5e7fec1dfcacc5269515b3ef069b01ec7
|
[
"MIT"
] | 8
|
2019-08-28T17:30:23.000Z
|
2021-10-01T17:07:23.000Z
|
Python/Multiflex_set_binary_mode.py
|
Terabee/sample_codes
|
a58676f5e7fec1dfcacc5269515b3ef069b01ec7
|
[
"MIT"
] | 10
|
2019-04-30T20:19:41.000Z
|
2021-02-10T19:32:39.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Example TeraRanger MultiFlex configuration script.
For more information about how to use this script, please refer to this document:
https://www.terabee.com/wp-content/uploads/2017/09/TR-MF-Python-ReadMe.pdf
"""
import sys
import binascii
import serial
if __name__ == "__main__":
if len(sys.argv) < 2:
print '\n \n[ERROR] Correct usage $ python multiflex_binary.py port'
sys.exit(1)
port_name = sys.argv[1]
multiflex = serial.Serial(port_name, 115200, timeout=5, writeTimeout=5)
print 'Connected to TeraRanger MultiFlex'
multiflex.flushInput()
multiflex.flushOutput()
multiflex.write(bytearray([0x00, 0x11, 0x02, 0x4C]))
response = multiflex.read(16)
response = binascii.hexlify(response)
if response.find("52451100d4") != -1:
print 'ACK'
if response.find("524511ff27") != -1:
print 'NACK'
multiflex.close()
sys.exit(0)
| 26.657895
| 81
| 0.642646
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Example TeraRanger MultiFlex configuration script.
For more information about how to use this script, please refer to this document:
https://www.terabee.com/wp-content/uploads/2017/09/TR-MF-Python-ReadMe.pdf
"""
import sys
import binascii
import serial
if __name__ == "__main__":
if len(sys.argv) < 2:
print '\n \n[ERROR] Correct usage $ python multiflex_binary.py port'
sys.exit(1)
port_name = sys.argv[1]
multiflex = serial.Serial(port_name, 115200, timeout=5, writeTimeout=5)
print 'Connected to TeraRanger MultiFlex'
multiflex.flushInput()
multiflex.flushOutput()
multiflex.write(bytearray([0x00, 0x11, 0x02, 0x4C]))
response = multiflex.read(16)
response = binascii.hexlify(response)
if response.find("52451100d4") != -1:
print 'ACK'
if response.find("524511ff27") != -1:
print 'NACK'
multiflex.close()
sys.exit(0)
| 0
| 0
| 0
|
aecbe0d40235ca0fe8d7b8b6e024663d65c7ef43
| 1,236
|
py
|
Python
|
homepage/migrations/0007_userprofile_follows.py
|
Andre-Azu/instagramclone
|
94906c7d2f88c24c156b19a96ab0edf619c67277
|
[
"Unlicense"
] | null | null | null |
homepage/migrations/0007_userprofile_follows.py
|
Andre-Azu/instagramclone
|
94906c7d2f88c24c156b19a96ab0edf619c67277
|
[
"Unlicense"
] | null | null | null |
homepage/migrations/0007_userprofile_follows.py
|
Andre-Azu/instagramclone
|
94906c7d2f88c24c156b19a96ab0edf619c67277
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 4.0 on 2021-12-15 07:28
from django.db import migrations, models
import django.db.models.deletion
| 38.625
| 146
| 0.615696
|
# Generated by Django 4.0 on 2021-12-15 07:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('homepage', '0006_image_comments_image_likes'),
]
operations = [
migrations.CreateModel(
name='Userprofile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_picture', models.ImageField(default=None, upload_to='instagram')),
('bio', models.CharField(max_length=100)),
('username', models.CharField(max_length=15)),
],
),
migrations.CreateModel(
name='Follows',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('followed', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='followers', to='homepage.userprofile')),
('follower', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='following', to='homepage.userprofile')),
],
),
]
| 0
| 1,091
| 23
|
5cbfbd355f1a6d92f9d29c7c9a4593dc242bdb15
| 5,347
|
py
|
Python
|
gateapi-python/gate_api/models/options_underlying_ticker.py
|
jarenmt/IEOPUMP
|
220f7f612d299f7305e82fe6c33661e6871f2d86
|
[
"MIT"
] | null | null | null |
gateapi-python/gate_api/models/options_underlying_ticker.py
|
jarenmt/IEOPUMP
|
220f7f612d299f7305e82fe6c33661e6871f2d86
|
[
"MIT"
] | null | null | null |
gateapi-python/gate_api/models/options_underlying_ticker.py
|
jarenmt/IEOPUMP
|
220f7f612d299f7305e82fe6c33661e6871f2d86
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Gate API v4
Welcome to Gate.io API APIv4 provides spot, margin and futures trading operations. There are public APIs to retrieve the real-time market statistics, and private APIs which needs authentication to trade on user's behalf. # noqa: E501
Contact: support@mail.gate.io
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from gate_api.configuration import Configuration
class OptionsUnderlyingTicker(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {'trade_put': 'int', 'trade_call': 'int', 'index_price': 'str'}
attribute_map = {'trade_put': 'trade_put', 'trade_call': 'trade_call', 'index_price': 'index_price'}
def __init__(self, trade_put=None, trade_call=None, index_price=None, local_vars_configuration=None): # noqa: E501
# type: (int, int, str, Configuration) -> None
"""OptionsUnderlyingTicker - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._trade_put = None
self._trade_call = None
self._index_price = None
self.discriminator = None
if trade_put is not None:
self.trade_put = trade_put
if trade_call is not None:
self.trade_call = trade_call
if index_price is not None:
self.index_price = index_price
@property
def trade_put(self):
"""Gets the trade_put of this OptionsUnderlyingTicker. # noqa: E501
Total put options trades amount in last 24h # noqa: E501
:return: The trade_put of this OptionsUnderlyingTicker. # noqa: E501
:rtype: int
"""
return self._trade_put
@trade_put.setter
def trade_put(self, trade_put):
"""Sets the trade_put of this OptionsUnderlyingTicker.
Total put options trades amount in last 24h # noqa: E501
:param trade_put: The trade_put of this OptionsUnderlyingTicker. # noqa: E501
:type: int
"""
self._trade_put = trade_put
@property
def trade_call(self):
"""Gets the trade_call of this OptionsUnderlyingTicker. # noqa: E501
Total call options trades amount in last 24h # noqa: E501
:return: The trade_call of this OptionsUnderlyingTicker. # noqa: E501
:rtype: int
"""
return self._trade_call
@trade_call.setter
def trade_call(self, trade_call):
"""Sets the trade_call of this OptionsUnderlyingTicker.
Total call options trades amount in last 24h # noqa: E501
:param trade_call: The trade_call of this OptionsUnderlyingTicker. # noqa: E501
:type: int
"""
self._trade_call = trade_call
@property
def index_price(self):
"""Gets the index_price of this OptionsUnderlyingTicker. # noqa: E501
Index price # noqa: E501
:return: The index_price of this OptionsUnderlyingTicker. # noqa: E501
:rtype: str
"""
return self._index_price
@index_price.setter
def index_price(self, index_price):
"""Sets the index_price of this OptionsUnderlyingTicker.
Index price # noqa: E501
:param index_price: The index_price of this OptionsUnderlyingTicker. # noqa: E501
:type: str
"""
self._index_price = index_price
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OptionsUnderlyingTicker):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OptionsUnderlyingTicker):
return True
return self.to_dict() != other.to_dict()
| 31.452941
| 239
| 0.616607
|
# coding: utf-8
"""
Gate API v4
Welcome to Gate.io API APIv4 provides spot, margin and futures trading operations. There are public APIs to retrieve the real-time market statistics, and private APIs which needs authentication to trade on user's behalf. # noqa: E501
Contact: support@mail.gate.io
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from gate_api.configuration import Configuration
class OptionsUnderlyingTicker(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {'trade_put': 'int', 'trade_call': 'int', 'index_price': 'str'}
attribute_map = {'trade_put': 'trade_put', 'trade_call': 'trade_call', 'index_price': 'index_price'}
def __init__(self, trade_put=None, trade_call=None, index_price=None, local_vars_configuration=None): # noqa: E501
# type: (int, int, str, Configuration) -> None
"""OptionsUnderlyingTicker - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._trade_put = None
self._trade_call = None
self._index_price = None
self.discriminator = None
if trade_put is not None:
self.trade_put = trade_put
if trade_call is not None:
self.trade_call = trade_call
if index_price is not None:
self.index_price = index_price
@property
def trade_put(self):
"""Gets the trade_put of this OptionsUnderlyingTicker. # noqa: E501
Total put options trades amount in last 24h # noqa: E501
:return: The trade_put of this OptionsUnderlyingTicker. # noqa: E501
:rtype: int
"""
return self._trade_put
@trade_put.setter
def trade_put(self, trade_put):
"""Sets the trade_put of this OptionsUnderlyingTicker.
Total put options trades amount in last 24h # noqa: E501
:param trade_put: The trade_put of this OptionsUnderlyingTicker. # noqa: E501
:type: int
"""
self._trade_put = trade_put
@property
def trade_call(self):
"""Gets the trade_call of this OptionsUnderlyingTicker. # noqa: E501
Total call options trades amount in last 24h # noqa: E501
:return: The trade_call of this OptionsUnderlyingTicker. # noqa: E501
:rtype: int
"""
return self._trade_call
@trade_call.setter
def trade_call(self, trade_call):
"""Sets the trade_call of this OptionsUnderlyingTicker.
Total call options trades amount in last 24h # noqa: E501
:param trade_call: The trade_call of this OptionsUnderlyingTicker. # noqa: E501
:type: int
"""
self._trade_call = trade_call
@property
def index_price(self):
"""Gets the index_price of this OptionsUnderlyingTicker. # noqa: E501
Index price # noqa: E501
:return: The index_price of this OptionsUnderlyingTicker. # noqa: E501
:rtype: str
"""
return self._index_price
@index_price.setter
def index_price(self, index_price):
"""Sets the index_price of this OptionsUnderlyingTicker.
Index price # noqa: E501
:param index_price: The index_price of this OptionsUnderlyingTicker. # noqa: E501
:type: str
"""
self._index_price = index_price
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OptionsUnderlyingTicker):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OptionsUnderlyingTicker):
return True
return self.to_dict() != other.to_dict()
| 0
| 0
| 0
|
f5b61b7592684510eb43bc9ae6e4136074f3f566
| 577
|
py
|
Python
|
accounts/admin.py
|
GermanMtzmx/mailtest
|
9ec3f51344e6968698f8313ef987fca4d2907cdf
|
[
"Xnet",
"X11"
] | null | null | null |
accounts/admin.py
|
GermanMtzmx/mailtest
|
9ec3f51344e6968698f8313ef987fca4d2907cdf
|
[
"Xnet",
"X11"
] | null | null | null |
accounts/admin.py
|
GermanMtzmx/mailtest
|
9ec3f51344e6968698f8313ef987fca4d2907cdf
|
[
"Xnet",
"X11"
] | null | null | null |
from django.contrib import admin
class UserAdmin(admin.ModelAdmin):
"""User admin"""
exclude = ('renewed',)
list_display = ('username', 'email',
'isActive', 'created', 'modified')
list_filter = ('isActive',)
search_fields = ('firstName',
'lastName', 'username', 'email')
class SocialTokenAdmin(admin.ModelAdmin):
"""Social token admin"""
list_display = ('token',
'social', 'user', 'created')
search_fields = ('token',)
#admin.site.register(User, UserAdmin)
#admin.site.register(SocialToken, SocialTokenAdmin)
| 19.896552
| 51
| 0.637782
|
from django.contrib import admin
class UserAdmin(admin.ModelAdmin):
"""User admin"""
exclude = ('renewed',)
list_display = ('username', 'email',
'isActive', 'created', 'modified')
list_filter = ('isActive',)
search_fields = ('firstName',
'lastName', 'username', 'email')
class SocialTokenAdmin(admin.ModelAdmin):
"""Social token admin"""
list_display = ('token',
'social', 'user', 'created')
search_fields = ('token',)
#admin.site.register(User, UserAdmin)
#admin.site.register(SocialToken, SocialTokenAdmin)
| 0
| 0
| 0
|
b2aa72705f3ef23ab152b930ee038b7c4589011a
| 2,545
|
py
|
Python
|
J1939_PGN/J1939_PGN.py
|
CSS-Electronics/j1939_pgn
|
e268e1b2713c9f0d8634dda30e6493235eee5e56
|
[
"MIT"
] | 1
|
2021-06-22T07:24:08.000Z
|
2021-06-22T07:24:08.000Z
|
J1939_PGN/J1939_PGN.py
|
CSS-Electronics/j1939_pgn
|
e268e1b2713c9f0d8634dda30e6493235eee5e56
|
[
"MIT"
] | null | null | null |
J1939_PGN/J1939_PGN.py
|
CSS-Electronics/j1939_pgn
|
e268e1b2713c9f0d8634dda30e6493235eee5e56
|
[
"MIT"
] | null | null | null |
from enum import Enum, auto
class J1939_PDU(Enum):
"""
J1939 PDU type
"""
PDU1 = auto()
PDU2 = auto()
class J1939_PGN:
"""
J1939 PGN class.
"""
_id = None
def __init__(self, msg_id: int = None, msg_pgn: int = None):
"""
Takes either a message ID or PGN
:param msg_id: CAN-bus message ID
:param msg_pgn: CAN-bus message J1939 PGN
"""
if msg_id is not None:
self._id = msg_id
elif msg_pgn is not None:
self._id = msg_pgn << 8
@property
def p(self) -> int:
"""
Priority
:return: J1939 priority value
"""
return (self._id >> 26) & 0x7
@property
def r(self) -> int:
"""
Reserved bit
:return: Reserved bit value
"""
return (self._id >> 25) & 0x1
@property
def dp(self) -> int:
"""
Data Page
:return: Data Page value
"""
return (self._id >> 24) & 0x1
@property
def pf(self) -> int:
"""
PDU format
:return: PDU format value
"""
return (self._id >> 16) & 0xFF
@property
def ps(self) -> int:
"""
PDU Specific
:return: PDU specific value
"""
return (self._id >> 8) & 0xFF
@property
def sa(self) -> int:
"""
Source Address
:return: Source Address value
"""
return self._id & 0xFF
@property
def pdu(self) -> J1939_PDU:
"""
PDU type
:return: PDU type as J1939_PDU
"""
if self.pf < 240:
return J1939_PDU.PDU1
else:
return J1939_PDU.PDU2
@property
def id(self) -> int:
"""
Message ID
:return: Message ID value
"""
return self._id
@property
def pgn(self):
"""
Message PGN
:return: Message PGN value
"""
if self.pdu is J1939_PDU.PDU1:
# Clear target address
return (self._id >> 8) & 0x3FF00
else:
return (self._id >> 8) & 0x3FFFF
| 21.752137
| 64
| 0.46169
|
from enum import Enum, auto
class J1939_PDU(Enum):
"""
J1939 PDU type
"""
PDU1 = auto()
PDU2 = auto()
class J1939_PGN:
"""
J1939 PGN class.
"""
_id = None
def __init__(self, msg_id: int = None, msg_pgn: int = None):
"""
Takes either a message ID or PGN
:param msg_id: CAN-bus message ID
:param msg_pgn: CAN-bus message J1939 PGN
"""
if msg_id is not None:
self._id = msg_id
elif msg_pgn is not None:
self._id = msg_pgn << 8
@property
def p(self) -> int:
"""
Priority
:return: J1939 priority value
"""
return (self._id >> 26) & 0x7
@property
def r(self) -> int:
"""
Reserved bit
:return: Reserved bit value
"""
return (self._id >> 25) & 0x1
@property
def dp(self) -> int:
"""
Data Page
:return: Data Page value
"""
return (self._id >> 24) & 0x1
@property
def pf(self) -> int:
"""
PDU format
:return: PDU format value
"""
return (self._id >> 16) & 0xFF
@property
def ps(self) -> int:
"""
PDU Specific
:return: PDU specific value
"""
return (self._id >> 8) & 0xFF
@property
def sa(self) -> int:
"""
Source Address
:return: Source Address value
"""
return self._id & 0xFF
@property
def pdu(self) -> J1939_PDU:
"""
PDU type
:return: PDU type as J1939_PDU
"""
if self.pf < 240:
return J1939_PDU.PDU1
else:
return J1939_PDU.PDU2
@property
def id(self) -> int:
"""
Message ID
:return: Message ID value
"""
return self._id
@property
def pgn(self):
"""
Message PGN
:return: Message PGN value
"""
if self.pdu is J1939_PDU.PDU1:
# Clear target address
return (self._id >> 8) & 0x3FF00
else:
return (self._id >> 8) & 0x3FFFF
def __str__(self):
return f"ID: 0x{self.id:08X} ({self.id:>9}), " \
f"P: 0x{self.p:01X}, " \
f"DP: 0x{self.dp:01X}, " \
f"PF: 0x{self.pf:02X} ({self.pf:>3}), " \
f"PS: 0x{self.ps:02X} ({self.ps:>3}), " \
f"SA: 0x{self.sa:02X} ({self.sa:>3}), " \
f"PGN: 0x{self.pgn:05X} ({self.pgn:>6})"
| 363
| 0
| 27
|
13568121b4814c3d81c59418c50fdd6901e9fb77
| 379
|
py
|
Python
|
exercises/exercise26.py
|
djangojeng-e/TIL
|
bdbe1dfb6ebc48b89067fddda195227cca64b8dc
|
[
"MIT"
] | null | null | null |
exercises/exercise26.py
|
djangojeng-e/TIL
|
bdbe1dfb6ebc48b89067fddda195227cca64b8dc
|
[
"MIT"
] | null | null | null |
exercises/exercise26.py
|
djangojeng-e/TIL
|
bdbe1dfb6ebc48b89067fddda195227cca64b8dc
|
[
"MIT"
] | null | null | null |
birthday_dictionary = {
"Albert Einstein" : "1/12/1912",
"Jeong Eun Kim" : "21/12/1983",
"Djangojeng-e" : "18/12/1986",
"Django": "01/01/2005"
}
name = input("Who's Birthday do you want to look up?")
if name in birthday_dictionary:
print(f'{name}s birthday is {birthday_dictionary[name]}')
else:
print("We dont' have {}'s birthday".format(name))
| 21.055556
| 62
| 0.630607
|
birthday_dictionary = {
"Albert Einstein" : "1/12/1912",
"Jeong Eun Kim" : "21/12/1983",
"Djangojeng-e" : "18/12/1986",
"Django": "01/01/2005"
}
name = input("Who's Birthday do you want to look up?")
if name in birthday_dictionary:
print(f'{name}s birthday is {birthday_dictionary[name]}')
else:
print("We dont' have {}'s birthday".format(name))
| 0
| 0
| 0
|
49d3502c205ec9a70b70b5cd9b2d3a0fa593abcc
| 1,621
|
py
|
Python
|
Dice Rolling Simulator/dice_game.py
|
Hongyanlee0614/Cool-Python-Projects
|
4e35822b696674221636ede1b86acc0793305c18
|
[
"MIT"
] | null | null | null |
Dice Rolling Simulator/dice_game.py
|
Hongyanlee0614/Cool-Python-Projects
|
4e35822b696674221636ede1b86acc0793305c18
|
[
"MIT"
] | null | null | null |
Dice Rolling Simulator/dice_game.py
|
Hongyanlee0614/Cool-Python-Projects
|
4e35822b696674221636ede1b86acc0793305c18
|
[
"MIT"
] | null | null | null |
import tkinter # for GUI
from PIL import Image, ImageTk # operation regarding image
import random
# toplevel widget which represents the main window of an application
root = tkinter.Tk()
root.geometry('400x400')
root.title('Data Flair Roll the Dice')
# Adding label into the frame. Here we skip a line
l0 = tkinter.Label(root, text="")
l0.pack()
# adding label with different font and formatting
l1 = tkinter.Label(root, text="Hello from Data Flair!", fg="light green",
bg="dark green",
font="Helvetica 16 bold italic")
l1.pack()
# images
dice = ['die1.png', 'die2.png', 'die3.png', 'die4.png', 'die5.png', 'die6.png']
# simulating the dice with random numbers between 0 to 6 and generating image
image1 = ImageTk.PhotoImage(Image.open(random.choice(dice)))
# construct a label widget for image
label1 = tkinter.Label(root, image=image1)
label1.image = image1
# packing a widget in the parent widget
# expand=True enables image to be centered no matter how we resize the window
label1.pack(expand=True)
# function activated by button
# adding button, and command will use rolling_dice function
button = tkinter.Button(root, text='Roll the Dice',
fg='blue', command=rolling_dice)
# pack a widget in the parent widget
button.pack(expand=True)
# call the mainloop of Tk
# keeps window open
root.mainloop()
| 30.018519
| 80
| 0.687847
|
import tkinter # for GUI
from PIL import Image, ImageTk # operation regarding image
import random
# toplevel widget which represents the main window of an application
root = tkinter.Tk()
root.geometry('400x400')
root.title('Data Flair Roll the Dice')
# Adding label into the frame. Here we skip a line
l0 = tkinter.Label(root, text="")
l0.pack()
# adding label with different font and formatting
l1 = tkinter.Label(root, text="Hello from Data Flair!", fg="light green",
bg="dark green",
font="Helvetica 16 bold italic")
l1.pack()
# images
dice = ['die1.png', 'die2.png', 'die3.png', 'die4.png', 'die5.png', 'die6.png']
# simulating the dice with random numbers between 0 to 6 and generating image
image1 = ImageTk.PhotoImage(Image.open(random.choice(dice)))
# construct a label widget for image
label1 = tkinter.Label(root, image=image1)
label1.image = image1
# packing a widget in the parent widget
# expand=True enables image to be centered no matter how we resize the window
label1.pack(expand=True)
# function activated by button
def rolling_dice():
image1 = ImageTk.PhotoImage(Image.open(random.choice(dice)))
# update image
label1.configure(image=image1)
# keep a reference
label1.image = image1
# adding button, and command will use rolling_dice function
button = tkinter.Button(root, text='Roll the Dice',
fg='blue', command=rolling_dice)
# pack a widget in the parent widget
button.pack(expand=True)
# call the mainloop of Tk
# keeps window open
root.mainloop()
| 171
| 0
| 25
|
e1bc973a11687fa3e830c8942a5ed3120eac2208
| 197
|
py
|
Python
|
fetch_gitignore/__init__.py
|
clickyotomy/git-fetch-gitignore
|
f534abf491f15ac85540fcb6ff5c6a9837c9b14e
|
[
"MIT"
] | 3
|
2016-09-16T12:19:36.000Z
|
2019-08-08T20:24:57.000Z
|
fetch_gitignore/__init__.py
|
clickyotomy/git-fetch-gitignore
|
f534abf491f15ac85540fcb6ff5c6a9837c9b14e
|
[
"MIT"
] | null | null | null |
fetch_gitignore/__init__.py
|
clickyotomy/git-fetch-gitignore
|
f534abf491f15ac85540fcb6ff5c6a9837c9b14e
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python2.7
'''
__init__ file for fetch_gitignore.
'''
__author__ = "Srinidhi Kaushik"
__license__ = "MIT"
__version__ = "0.0.1"
__email__ = "clickyotomy@users.noreply.github.com"
| 16.416667
| 50
| 0.715736
|
#! /usr/bin/env python2.7
'''
__init__ file for fetch_gitignore.
'''
__author__ = "Srinidhi Kaushik"
__license__ = "MIT"
__version__ = "0.0.1"
__email__ = "clickyotomy@users.noreply.github.com"
| 0
| 0
| 0
|
2a4e2a2f19d2573e61fe39bb7dc94a42dc9ed626
| 3,030
|
py
|
Python
|
lista07_listaEncadeada/pylista07_questao06.py
|
mayararysia/ESTD
|
65aa8816aa8773066201cb410b02c1cb72ad5611
|
[
"MIT"
] | null | null | null |
lista07_listaEncadeada/pylista07_questao06.py
|
mayararysia/ESTD
|
65aa8816aa8773066201cb410b02c1cb72ad5611
|
[
"MIT"
] | null | null | null |
lista07_listaEncadeada/pylista07_questao06.py
|
mayararysia/ESTD
|
65aa8816aa8773066201cb410b02c1cb72ad5611
|
[
"MIT"
] | null | null | null |
"""
06.
Escreva um método show() para escrever toda a lista. Exemplo: lista = ListaOrdenada();
lista.append(3); lista.append(2); lista.show() imprime: [2,3]
"""
#Implementação da Classe Noh
#Lista não ordenada:
if __name__ == '__main__':
minha_lista = ListaOrdenada() #exemplo de uso
print(minha_lista.isEmpty())
minha_lista.append(8)
minha_lista.append(2)
minha_lista.append(9)
print(minha_lista.isEmpty())
print("Size: ", minha_lista.size())
print(minha_lista.show())
| 23.129771
| 86
| 0.593729
|
"""
06.
Escreva um método show() para escrever toda a lista. Exemplo: lista = ListaOrdenada();
lista.append(3); lista.append(2); lista.show() imprime: [2,3]
"""
#Implementação da Classe Noh
class Noh:
def __init__(self,valor_inicial):
self._dados = valor_inicial
self._proximo = None
def getData(self):
return self._dados
def getNext(self):
return self._proximo
def setData(self, novo_valor):
self._dados = novo_valor
def setNext(self, novo_proximo):
self._proximo = novo_proximo
#Lista não ordenada:
class ListaOrdenada:
def __init__(self): #construtor
self.head = None
self.final = None
def isEmpty(self): #<<<<
return self.head == None
def append(self, item):
temp = Noh(item)
if self.head == None:
temp.setNext(self.head)
self.head = temp
if self.final == None:
self.final = self.head
else:
self.final.setNext(temp)
self.final = temp
def add(self,item):
atual = self.head
anterior = None
parar = False
while atual != None and not parar:
if atual.getData() > item:
parar = True
else:
anterior = atual
atual = atual.getNext()
temp = Noh(item)
if anterior == None:
temp.setNext(self.head)
self.head = temp
else:
temp.setNext(atual)
anterior.setNext(temp)
def size(self):
atual = self.head
contador = 0
while atual != None:
contador = contador + 1
atual = atual.getNext()
return contador
def remove(self, item):
atual = self.head
anterior = None
encontrou = False
while not encontrou: #percorre a lista
if atual.getData() == item:
encontrou = True
else:
anterior = atual
atual = atual.getNext()
if anterior == None:
self.head = atual.getNext()
else:
anterior.setNext(atual.getNext())
def search(self,item):
atual = self.head #atual == temp
encontrou = False
parar = False
while atual != None and not encontrou and not parar:
if atual.getData() == item:
encontrou = True
else:
if atual.getData() > item:
parar = True
else:
atual = atual.getNext()
return encontrou
def show(self): #show
if self.isEmpty():
print("Lista Vazia!")
else:
lista = []
aux = self.head
while aux.getData()!= None:
lista.append(aux.getData())
if aux.getNext() == None: break
aux = aux.getNext()
tam = len(lista)
j=1
for i in range(tam-1):
if j == (tam-1): break
if lista[i]>lista[j]:
aux = lista[i]
lista[i] = lista[j]
lista[j] = aux
j = j+1
return lista
if __name__ == '__main__':
minha_lista = ListaOrdenada() #exemplo de uso
print(minha_lista.isEmpty())
minha_lista.append(8)
minha_lista.append(2)
minha_lista.append(9)
print(minha_lista.isEmpty())
print("Size: ", minha_lista.size())
print(minha_lista.show())
| 2,181
| -12
| 371
|
b2524167a78952f075171b4993c8cca048abf2dd
| 901
|
py
|
Python
|
experiment/hyper-params-search/hps/common/Common.py
|
GyunHyukLee/GoldMine
|
93d765934cd8c4e152f12dc1a9677bd77ea64f28
|
[
"Apache-2.0"
] | 13
|
2020-06-01T06:31:01.000Z
|
2021-03-08T04:57:01.000Z
|
experiment/hyper-params-search/hps/common/Common.py
|
GyunHyukLee/GoldMine
|
93d765934cd8c4e152f12dc1a9677bd77ea64f28
|
[
"Apache-2.0"
] | 1
|
2020-10-13T08:56:38.000Z
|
2020-10-13T08:56:38.000Z
|
experiment/hyper-params-search/hps/common/Common.py
|
GyunHyukLee/GoldMine
|
93d765934cd8c4e152f12dc1a9677bd77ea64f28
|
[
"Apache-2.0"
] | 7
|
2020-06-03T01:46:22.000Z
|
2021-03-08T04:57:04.000Z
|
# -*- coding: utf-8 -*-
# Author : Jin Kim
# e-mail : jinkim@seculayer.com
# Powered by Seculayer © 2020 Solution Development 2 Team, R&D Center.
import os
from hps.common.Constants import Constants
from hps.utils.Singleton import Singleton
from hps.utils.MPLogger import MPLogger
from hps.utils.CommonUtils import CommonUtils
# class : Common
| 32.178571
| 93
| 0.7303
|
# -*- coding: utf-8 -*-
# Author : Jin Kim
# e-mail : jinkim@seculayer.com
# Powered by Seculayer © 2020 Solution Development 2 Team, R&D Center.
import os
from hps.common.Constants import Constants
from hps.utils.Singleton import Singleton
from hps.utils.MPLogger import MPLogger
from hps.utils.CommonUtils import CommonUtils
# class : Common
class Common(metaclass=Singleton):
__FILE_REAL_PATH = os.path.dirname(os.path.realpath(__file__))
# make directories
CommonUtils.mkdir(Constants.DIR_DATA)
CommonUtils.mkdir(Constants.DIR_PARAMS)
# make multi-process logger
__DIR_LOG = __FILE_REAL_PATH + "/../../" + Constants.DEFAULT.get("LOG_CONFIG", "LOG_DIR")
CommonUtils.mkdir(__DIR_LOG)
LOGGER = MPLogger(
log_dir=__DIR_LOG, log_name=Constants.DEFAULT.get("LOG_CONFIG", "LOG_NAME"),
log_level=Constants.DEFAULT.get("LOG_CONFIG", "LOG_LEVEL")
)
| 0
| 531
| 22
|
57d9b30c8c5f267a7385f0b7589bbb8355313743
| 2,306
|
py
|
Python
|
posto/webhook/auth.py
|
tefra/posto
|
95cc39348f0c59339d9b7038d8640510849a2805
|
[
"MIT"
] | null | null | null |
posto/webhook/auth.py
|
tefra/posto
|
95cc39348f0c59339d9b7038d8640510849a2805
|
[
"MIT"
] | 1
|
2022-01-27T09:37:28.000Z
|
2022-01-27T09:40:50.000Z
|
posto/webhook/auth.py
|
tefra/posto
|
95cc39348f0c59339d9b7038d8640510849a2805
|
[
"MIT"
] | null | null | null |
import functools
import hmac
from typing import Any
from typing import Callable
from typing import Optional
from flask import abort
from flask import current_app
from flask import request
from werkzeug.exceptions import ServiceUnavailable
def authorize_source() -> Callable:
"""Detect the source from the headers and authenticate by the config
secret."""
return decorator
def authorize_gitlab() -> Optional[str]:
"""
Check gitlab header token is correct and return the source name.
The token is raw because gitlab only allows for ssl endpoints.
"""
source = "gitlab"
if get_secret(source) == request.headers["X-Gitlab-Token"]:
return source
return None
def authorize_github() -> Optional[str]:
"""
Verify github signature matches our secret with the payload and return the
source name.
Github uses HMAC signature verification, encode the payload with the
secret
"""
source = "github"
secret = get_secret(source)
signature = request.headers["X-Hub-Signature"]
signature_prefix = "sha1="
if not signature.startswith(signature_prefix):
return None
hmac_ = hmac.new(secret.encode("UTF-8"), msg=request.data, digestmod="sha1")
calculated_sig = signature_prefix + hmac_.hexdigest()
if not hmac.compare_digest(signature, calculated_sig):
return None
return source
def get_secret(source: str) -> str:
"""Get the secret key from config by the source or raise an exception."""
secret = current_app.config.get(f"{source.upper()}_SECRET", None)
if secret is None:
raise ServiceUnavailable(f"Missing {source} secret")
return secret
__PARSERS__ = {
"X-Gitlab-Token": authorize_gitlab,
"X-Hub-Signature": authorize_github,
}
| 25.622222
| 80
| 0.645707
|
import functools
import hmac
from typing import Any
from typing import Callable
from typing import Optional
from flask import abort
from flask import current_app
from flask import request
from werkzeug.exceptions import ServiceUnavailable
def authorize_source() -> Callable:
"""Detect the source from the headers and authenticate by the config
secret."""
def decorator(fn: Callable) -> Callable:
@functools.wraps(fn)
def wrapper(*args: Any, **kwargs: Any) -> Any:
source = next(
(
authorize_func()
for name, authorize_func in __PARSERS__.items()
if name in request.headers
),
None,
)
if source is None:
abort(401)
return fn(*args, source=source, **kwargs)
return wrapper
return decorator
def authorize_gitlab() -> Optional[str]:
"""
Check gitlab header token is correct and return the source name.
The token is raw because gitlab only allows for ssl endpoints.
"""
source = "gitlab"
if get_secret(source) == request.headers["X-Gitlab-Token"]:
return source
return None
def authorize_github() -> Optional[str]:
"""
Verify github signature matches our secret with the payload and return the
source name.
Github uses HMAC signature verification, encode the payload with the
secret
"""
source = "github"
secret = get_secret(source)
signature = request.headers["X-Hub-Signature"]
signature_prefix = "sha1="
if not signature.startswith(signature_prefix):
return None
hmac_ = hmac.new(secret.encode("UTF-8"), msg=request.data, digestmod="sha1")
calculated_sig = signature_prefix + hmac_.hexdigest()
if not hmac.compare_digest(signature, calculated_sig):
return None
return source
def get_secret(source: str) -> str:
"""Get the secret key from config by the source or raise an exception."""
secret = current_app.config.get(f"{source.upper()}_SECRET", None)
if secret is None:
raise ServiceUnavailable(f"Missing {source} secret")
return secret
__PARSERS__ = {
"X-Gitlab-Token": authorize_gitlab,
"X-Hub-Signature": authorize_github,
}
| 492
| 0
| 27
|
e509428d899feeca516dab5cb1e43ec7ec393808
| 15,280
|
py
|
Python
|
python/pysvso/optimizers/icp.py
|
mfkiwl/SEMANTIC_VISUAL_SUPPORTED_ODEMETRY
|
2249bf358f51b337eb52a347ea7d46bff0654576
|
[
"Apache-2.0"
] | 191
|
2020-07-01T11:57:17.000Z
|
2022-03-23T12:40:43.000Z
|
python/pysvso/optimizers/icp.py
|
mfkiwl/SEMANTIC_VISUAL_SUPPORTED_ODEMETRY
|
2249bf358f51b337eb52a347ea7d46bff0654576
|
[
"Apache-2.0"
] | 10
|
2020-07-06T12:41:51.000Z
|
2022-02-09T23:43:11.000Z
|
python/pysvso/optimizers/icp.py
|
mfkiwl/SEMANTIC_VISUAL_SUPPORTED_ODEMETRY
|
2249bf358f51b337eb52a347ea7d46bff0654576
|
[
"Apache-2.0"
] | 45
|
2020-07-01T13:31:20.000Z
|
2022-02-03T07:21:42.000Z
|
import numpy as np
from enum import Enum
from pysvso.lib.maths.rotation import Euler, Quaternion, rotation_matrix, dRx, dRy, dRz
from scipy.optimize import fmin_bfgs
from scipy.optimize import fmin
from scipy.optimize import minimize
from scipy.optimize import approx_fprime
# when point cloud is parse
from sklearn.neighbors import NearestNeighbors
# see FLANN manual https://www.cs.ubc.ca/research/flann/uploads/FLANN/flann_manual-1.8.4.pdf
# remember to run 2to3 upon root of the source when you complete downloading the codes!
from pyflann import *
import numpy as np
# used to build computation graph with
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import tensorflow_graphics as tfg
rotation_matrix_3d = tfg.geometry.transformation.rotation_matrix_3d
# Lie Algebra ICP solver
# The algorithm was first implemented by Lei (yiak.wy@gmail.com) in C++ in later of 2019 and reimplemented in python in 2020
# you should not use this algorithm without consent of Lei in any form and purposes.
# ALL RIGHTS RESERVED
# Points are very sparse, we don't have to do random sampling
| 30.499002
| 142
| 0.532199
|
import numpy as np
from enum import Enum
from pysvso.lib.maths.rotation import Euler, Quaternion, rotation_matrix, dRx, dRy, dRz
from scipy.optimize import fmin_bfgs
from scipy.optimize import fmin
from scipy.optimize import minimize
from scipy.optimize import approx_fprime
# when point cloud is parse
from sklearn.neighbors import NearestNeighbors
# see FLANN manual https://www.cs.ubc.ca/research/flann/uploads/FLANN/flann_manual-1.8.4.pdf
# remember to run 2to3 upon root of the source when you complete downloading the codes!
from pyflann import *
import numpy as np
# used to build computation graph with
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import tensorflow_graphics as tfg
rotation_matrix_3d = tfg.geometry.transformation.rotation_matrix_3d
class ICP:
class Algorithm(Enum):
POINT_POINT = 1
POINT_PLANE = 2 # G-ICP
def __init__(self, algorithm=None):
self.algorithm = algorithm or ICP.Algorithm.POINT_POINT
#
self.euler = Euler(0, 0, 0)
#
self.R = self.euler.R
self.t = np.zeros((3, 1))
#
self.max_iterations = 3
#
self.USE_GRADIENT = False
#
self.HAND_MADE_OPT = True
def transform(self, pcloud_src, R, t):
N = pcloud_src.shape[0]
matl = pcloud_src.T
matl = R.dot(matl) + t
return matl.T
# @todo : TODO estimate transformation [R|t] between two points cloud
def costImpl(self, x, pcloud_src, pcloud_dest):
# print("x.shape: ", x.shape)
N = pcloud_src.shape[0]
assert (pcloud_src.shape == pcloud_dest.shape)
R, t = self._get_pose(x)
matl = pcloud_src.T # 3 * N
matl = R.dot(matl) + t
dx = matl.T - pcloud_dest
# N = dx.shape[0]
# _sum = 0.
# for i in range(N):
# v = dx[i].reshape((3,1))
# _sum += v.T.dot(v)[0,0]
# pass
_sum = np.sum(np.diag(np.dot(dx, dx.T)))
_sum /= N
return _sum
# @todo : TODO
# @state : DEPREACTED see "jac_mat" and "_optimize"
# reference: g2o/types/icp/types/types_icp.cpp
# I have tried autograd by HIPS from Harvard University. However, it doesn't work.
# Supprisingly, tensorflow computes a reasonable gradient.
def jac_flow(self, x, pcloud_src, pcloud_dest):
"""
Author : LEI WANG (yiakwy@gmail.com)
Date : Jun 2, 2020
p* = R*p + t = [R|t]*p
partial dp* / partial dt.T ~ I (partial d P* / dt1 = 0 + [1, 0, 0].T)
(parital d P* / dt2 = 0 + [0, 1, 0].T)
(partial d P* / dt3 = 0 + [0, 0, 1].T)
In matrix form:
J ~ 3*6 matrix
"""
# using computing graph
N = pcloud_src.shape[0]
assert (pcloud_src.shape == pcloud_dest.shape)
R0, t0 = self._get_pose(x)
rotation_matrix_3d = tfg.geometry.tranformation.rotation_matrix_3d
# tf variables
pcloud_src = tf.Variable(pcloud_src)
pcloud_dest = tf.Variable(pcloud_dest)
# R = tf.constant(R0, dtype=tf.float64)
angle = tf.constant(np.array([x[0], x[1], x[2]]))
t = tf.constant(t0, dtype=tf.float64)
with tf.GradientTape(persistent=True) as tape:
tape.watch(angle)
# tape.watch(R)
tape.watch(t)
_T = tf.transpose
R = rotation_matrix_3d.from_euler(angle)
matl = _T(pcloud_src)
matl = _T(tf.matmul(R, matl) + t)
dx = matl - pcloud_dest
_sum = tf.reduce_sum(tf.linalg.diag_part(tf.matmul(dx, _T(dx))))
_sum /= N
dTheta = tape.gradient(_sum, angle)
# dR = tape.gradient(_sum, R)
dt = tape.gradient(_sum, t)
# R1 = R0 + dR * step
# t1 = t0 + dt * step
# euler = Euler.fromMatrix(R1)
# grad = np.array([
# euler.roll,
# euler.yaw,
# euler.pitch,
# t1[0],
# t1[1],
# t1[2]
# ]) - estimated_pose
grad = np.array([
dTheta[0],
dTheta[1],
dTheta[2],
dt[0],
dt[1],
dt[2]
])
del tape
return grad
pass
# @todo : TODO
def hess(self, x, pcloud_src, pcloud_dest):
pass
# @todo : TODO
def check_gradient(self, x, pcloud_src, pcloud_dest):
pass
# @todo : TODO
def jac_se3(self, x, pcloud_src, pcloud_dest):
# using computing graph
N = pcloud_src.shape[0]
assert (pcloud_src.shape == pcloud_dest.shape)
R0, t0 = self._get_pose(x)
matl = pcloud_src.T # 3 * N
matl = R0.dot(matl) + t0
dx = matl.T - pcloud_dest
# ====
Rx = rotation_matrix(x[3], [1, 0, 0])[:3, :3]
Ry = rotation_matrix(x[4], [0, 1, 0])[:3, :3]
Rz = rotation_matrix(x[5], [0, 0, 1])[:3, :3]
dRx_ = dRx(Rx)
dRy_ = dRy(Ry)
dRz_ = dRz(Rz)
# hard coded
J = np.zeros((3, 6, N))
J[0:3, 0:3, :] = np.eye(3)[:, :, np.newaxis]
temp = dRx_.dot(Ry.dot(Rz.dot(pcloud_src.T)))
print("temp shape:", temp.shape)
J[0:3, 3, :] = temp
J[0:3, 4, :] = Rx.dot(dRy_.dot(Rz.dot(pcloud_src.T)))
J[0:3, 5, :] = Rx.dot(Ry.dot(dRz_.dot(pcloud_src.T)))
return dx, J
# implemented and refactored based on the algorithm elabrated in the following talk
# ref : dis.uniroma1.it/~labrococo/tutorial_icra_2016/icra16_slam_tutorial_grisetti.pdf
# note : does not work as expected for the moment, Jun 16, 2020
def jac_se3_manifold(self, x, pcloud_src, pcloud_dest):
# using computing graph
N = pcloud_src.shape[0]
assert (pcloud_src.shape == pcloud_dest.shape)
R0, t0 = self._get_pose(x)
matl = pcloud_src.T # 3 * N
matl = R0.dot(matl) + t0
dx = matl.T - pcloud_dest
# ====
# hard coded
J = np.zeros((3, 6, N))
J[0:3, 0:3, :] = np.eye(3)[:, :, np.newaxis]
def _Skew(pred):
skew = np.zeros((3, 3, N))
for i in range(N):
v = pred[:, i].reshape((3,))
skew[:, :, i] = np.array([
[0., -v[2], v[1]],
[v[2], 0., -v[0]],
[-v[1], v[0], 0.]
])
return skew
J[0:3, 3:6, :] = _Skew(matl)
return dx, J
# implemented and refactored based on the algorithm elabrated in the following talk
# ref : dis.uniroma1.it/~labrococo/tutorial_icra_2016/icra16_slam_tutorial_grisetti.pdf
# note : does not work as expected for the moment, Jun 16, 2020
def _minimize(self, pcloud_src, pcloud_dest, estimated_pose, max_iter=10):
# find optimal H such that incremental function 0 = H * dx holds
x = estimated_pose.copy()
chi_stats = np.zeros((max_iter,))
for k in range(max_iter):
H = np.zeros((6, 6))
b = np.zeros((6, 1))
chi = 0.
# dx, J = self.jac_se3(x, pcloud_src, pcloud_dest)
# manifold method
dx, J = self.jac_se3_manifold(x, pcloud_src, pcloud_dest)
N = pcloud_src.shape[0]
for i in range(N):
dxi = dx[i, :].reshape((3, 1))
err = dxi.T.dot(dxi)
print("err_i : %f" % err)
if err > 1.:
continue
# print("dxi shape:", dxi)
Ji = J[:, :, i]
# print("Ji shape:", Ji)
H += Ji.T.dot(Ji)
temp = Ji.T.dot(dxi)
# print("temp shape:", temp)
b += temp
chi += err
chi /= N
chi_stats[k] = chi
# might produce huge error here
delta = -np.linalg.solve(H, b).reshape((6,))
# x += delta
# manifold method
R1, t1 = self._get_pose(delta)
R0, t0 = self._get_pose(x)
R = R1.dot(R0)
t = R1.dot(t0) + t1
euler = Euler.fromMatrix(R)
x = np.array([
euler.roll,
euler.yaw,
euler.pitch,
t[0][0],
t[1][0],
t[2][0]])
print("Iter %d, chi %f, delta %s, x %s" % (k, chi, delta, x))
return x, chi_stats[-1]
pass
# see github.com/gramaziokohler/icp/blob/master/icp.py
# used for initial estimation of the R, t, this will make matches closer to the fact
def svd_solver(self, pcloud_src, pcloud_dest):
N = pcloud_src.shape[0]
M = pcloud_src.shape[1]
assert (pcloud_src.shape == pcloud_dest.shape)
# substract the clouds to origins
centroid_src = np.mean(pcloud_src, axis=0).reshape((3, 1))
centroid_dest = np.mean(pcloud_dest, axis=0).reshape((3, 1))
print("[ICP.svd_solver] centroid_src.shape", centroid_src.shape)
pcl_src = pcloud_src - centroid_src.T
pcl_dest = pcloud_dest - centroid_dest.T
# get rotation matrix using SVD
H = np.dot(pcl_src.T, pcl_dest)
U, S, VT = np.linalg.svd(H)
R = np.dot(VT.T, U.T)
if np.linalg.det(R) < 0:
VT[M - 1, :] *= -1
R = np.dot(VT.T, U.T)
# get translation
t = centroid_dest - R.dot(centroid_src)
print("[ICP.svd_solver] t.shape", t.shape)
return R, t
# @todo : TODO
# refine the cost by obj=([R|t]*x - x').T.dot([R|t]*x - x')
def computeError(self, pcloud_src, pcloud_dest, estimated_pose):
N = pcloud_src.shape[0]
assert (N == pcloud_dest.shape[0])
# searching direction
numOfvar = estimated_pose.shape[0]
assert (numOfvar == 6)
delta = np.zeros((numOfvar,))
# reformed input functions
def costImpl(x):
return self.costImpl(x, pcloud_src, pcloud_dest)
#
err0 = costImpl(estimated_pose)
print("[ICP] error %s for estimated_pose %s" % (
err0,
estimated_pose
))
# using Lie algebra to computed SO3 jacobian and hessian instead, see G2OICP
# def jac(x):
# return self.jac(x, pcloud_src, pcloud_dest)
# pass
# def hess(x):
# return self.hess(x, pcloud_src, pcloud_dest)
# pass
# res = minimize(costImpl, estimated_pose, method='Newton-CG', jac=jac, hess=hess)
if not self.USE_GRADIENT:
res = fmin(costImpl, estimated_pose, full_output=1)
else:
if self.HAND_MADE_OPT:
# compute se3 jaccoby internally
res = self._minimize(pcloud_src, pcloud_dest, estimated_pose.copy(), max_iter=10) # optimize!
pass
else:
raise Exception("Not Implemented yet!")
new_estimated_pose, err1 = res[0], res[1]
print("[ICP] error %s with new_estimated_pose %s" % (
err1,
new_estimated_pose
))
if np.abs(err1) < np.abs(err0):
delta = new_estimated_pose # - estimated_pose
cost = err1
else:
print("[ICP] reject the proposal, using previous estimated_pose %s with error %f instead" % (
estimated_pose,
err0
))
delta = estimated_pose
cost = err0
# reform the output
return delta, cost
def _get_pose(self, estimated_pose):
self.euler.update(*estimated_pose[0:3])
R = self.euler.R
t = estimated_pose[3:6]
t = t.reshape((3, 1))
return R[:3, :3], t
# credits to
# 1. (2D/python) https://github.com/agnivsen/icp as the starter of the implementation
# 2. (2D/python) https://gist.github.com/ecward/c373932638fd04a2243e
# 3. (3D/c++) Alex Segal original G-ICP implementation :
# 4. (2D/python) python implementation by Jacob Everist recommended by Alex Segal though it is very poor, http://jacobeverist.com/gen_icp
def _icp_point_point(self, pcloud_src, pcloud_dest, matches, estimated_pose, callback=None):
# make initial estiamtion
if estimated_pose is None:
R, t = self.svd_solver(pcloud_src, pcloud_dest)
print("estimated_pose, \nR:\n%s\nt:\n%s\n" % (R, t))
self.euler = Euler.fromMatrix(R)
estimated_pose = np.array([
self.euler.roll,
self.euler.yaw,
self.euler.pitch,
t[0][0],
t[1][0],
t[2][0]])
pcloud_src = self.transform(pcloud_src, R, t)
k = 0
numOfvars = estimated_pose.shape[0]
while k < self.max_iterations:
init_pose = np.zeros((numOfvars,))
# association
if matches is None:
nbrs = NearestNeighbors(n_neighbors=1, algorithm='auto').fit(pcloud_dest)
dist, indices = nbrs.kneighbors(pcloud_src, return_distance=True)
print("indices: \n%s\n" % indices)
N = pcloud_src.shape[0]
matches = np.zeros((N, 2)).astype(np.integer)
for i in range(N):
matches[i, 0] = i
matches[i, 1] = indices[i][0]
pass
print("Association: \nmatches:\n%s\n" % matches)
# iteration
delta, cost = self.computeError(pcloud_src[matches[:, 0].ravel()], pcloud_dest[matches[:, 1].ravel()],
init_pose)
delta = delta.reshape((6,))
R, t = self._get_pose(delta)
# update source point cloud
pcloud_src = self.transform(pcloud_src, R, t)
# update estimated pose
estimated_pose += delta
# check errors
if callback is not None:
callback(pcloud_src)
k += 1
print("cost:", cost)
if np.abs(cost) < 1e-6:
break
else:
print("iterating ...")
#
return estimated_pose
# I am concerned that we don't have enough points and the assumption of ICP point-plane is no longer held.
def _icp_point_plane(self):
raise Exception("Not Implemented Yet")
# Lie Algebra ICP solver
class G2OICP: pass
class SacModel(object):
def __init__(self):
pass
# The algorithm was first implemented by Lei (yiak.wy@gmail.com) in C++ in later of 2019 and reimplemented in python in 2020
# you should not use this algorithm without consent of Lei in any form and purposes.
# ALL RIGHTS RESERVED
# Points are very sparse, we don't have to do random sampling
class SacVolume(SacModel):
def __init__(self):
pass
class Ransac:
def __init__(self, model):
self._sac_model = model
self.max_iterations = 10
def optimize(self):
pass
| 9,835
| 4,125
| 221
|
7f93836e41dc7de37d20c9086ad3bdf24f6efdd0
| 938
|
py
|
Python
|
train.py
|
tfandkusu/food101-tflite
|
be09912a8b7c398a348eb86a1b216356e56cbe13
|
[
"Apache-2.0"
] | 7
|
2019-05-12T22:44:37.000Z
|
2021-06-07T08:29:58.000Z
|
train.py
|
tfkeras/food101-tflite
|
be09912a8b7c398a348eb86a1b216356e56cbe13
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
tfkeras/food101-tflite
|
be09912a8b7c398a348eb86a1b216356e56cbe13
|
[
"Apache-2.0"
] | 3
|
2019-06-04T12:08:43.000Z
|
2020-04-23T18:09:29.000Z
|
import tensorflow as tf
import model
import data
# 訓練データ作成担当
g = data.Data()
# GPUをすべて使わないオプション
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
# モデルを作成
model = model.make(tflite=False)
# 最適化を定義
optimizer = tf.keras.optimizers.Adam(lr=0.001)
model.compile(optimizer=optimizer,loss="categorical_crossentropy",
metrics=["categorical_accuracy"])
# コールバック
cb = Callback()
# 途中から学習する場合
initial_epoch = 0
if initial_epoch >= 1:
model.load_weights("weight.hdf5")
# 学習する
model.fit_generator(g.generator(),
validation_data=g.generator_test(),
validation_steps=g.test_steps(),
callbacks = [cb],
steps_per_epoch=data.TRAIN_SIZE/data.BATCH_SIZE,epochs=50,
initial_epoch=initial_epoch)
| 26.8
| 66
| 0.749467
|
import tensorflow as tf
import model
import data
# 訓練データ作成担当
g = data.Data()
# GPUをすべて使わないオプション
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
# モデルを作成
model = model.make(tflite=False)
# 最適化を定義
optimizer = tf.keras.optimizers.Adam(lr=0.001)
model.compile(optimizer=optimizer,loss="categorical_crossentropy",
metrics=["categorical_accuracy"])
# コールバック
class Callback(tf.keras.callbacks.Callback):
def on_epoch_end(self,epoch, logs=None):
"各エポック終了時に重みを保存する"
model.save("weight.hdf5")
cb = Callback()
# 途中から学習する場合
initial_epoch = 0
if initial_epoch >= 1:
model.load_weights("weight.hdf5")
# 学習する
model.fit_generator(g.generator(),
validation_data=g.generator_test(),
validation_steps=g.test_steps(),
callbacks = [cb],
steps_per_epoch=data.TRAIN_SIZE/data.BATCH_SIZE,epochs=50,
initial_epoch=initial_epoch)
| 0
| 161
| 22
|
96d3891dd1dd4c7b4e68f80119deca6d242b9c04
| 6,468
|
py
|
Python
|
fastapi/account/controllers/account.py
|
ilDug/docker-utils
|
6580e916a8c2c0d91f2e3da52a9d839507569bb7
|
[
"MIT"
] | null | null | null |
fastapi/account/controllers/account.py
|
ilDug/docker-utils
|
6580e916a8c2c0d91f2e3da52a9d839507569bb7
|
[
"MIT"
] | null | null | null |
fastapi/account/controllers/account.py
|
ilDug/docker-utils
|
6580e916a8c2c0d91f2e3da52a9d839507569bb7
|
[
"MIT"
] | null | null | null |
import uuid
import hashlib
import bcrypt
import json
from string import Template
from datetime import datetime
from fastapi import HTTPException
from fastapi.encoders import jsonable_encoder
from pydantic import IPvAnyAddress
from pymongo import MongoClient
from bson import json_util
from core.models import MongoModel
from account.models import JWTModel, UserModel
from core import DagMail, DagMailConfig
from account.jwt import JWT
from core.utils.string import random_string
from config.conf import (
MONGO_CS,
ACTIVATION_KEY_LENGTH,
EMAIL_TEMPLATE_ACTIVATION,
EMAIL_TEMPLATE_BASE,
MAIL_CONFIG,
)
| 36.96
| 110
| 0.548237
|
import uuid
import hashlib
import bcrypt
import json
from string import Template
from datetime import datetime
from fastapi import HTTPException
from fastapi.encoders import jsonable_encoder
from pydantic import IPvAnyAddress
from pymongo import MongoClient
from bson import json_util
from core.models import MongoModel
from account.models import JWTModel, UserModel
from core import DagMail, DagMailConfig
from account.jwt import JWT
from core.utils.string import random_string
from config.conf import (
MONGO_CS,
ACTIVATION_KEY_LENGTH,
EMAIL_TEMPLATE_ACTIVATION,
EMAIL_TEMPLATE_BASE,
MAIL_CONFIG,
)
class Account:
UUID_NAMESPACE = uuid.UUID("9711c6f0-b5a2-11eb-9e14-c82a1456945d")
ACTIVATION_SCOPE = "account_activation"
def __init__(self):
pass
# si connette al server e restituisce il token JWT
def login(self, email: str, password: str, ip: IPvAnyAddress = None):
if not email:
raise HTTPException(400, "il campo email non è specificato")
if not password:
raise HTTPException(400, "la richiesta non contiene la password")
email = email.lower().strip()
# controlla che l'utente sia presente nel database
with MongoClient(MONGO_CS) as c:
user = c.shop.users.find_one({"email": email})
if user is None:
raise HTTPException(
404,
"Utente non registrato. Procedi prima con la registrazione del tuo account.",
)
user = UserModel(**user)
# verifica la password
if not bcrypt.checkpw(password.encode(), user.hashed_password.encode()):
raise HTTPException(500, "password non corretta per questo account.")
# crea il token filtrando i dati sensibili
token: str = JWT.create(
user.dict(exclude={"id", "hashed_password", "registration_date"})
)
jwt: JWTModel = JWT.verify(token)
# inserisce il token nel database assieme ai dati del login
access = c.shop.accesses.insert_one(
{
"uid": str(jwt.uid),
"jti": str(jwt.jti),
"ip": ip,
"date": datetime.utcnow(),
}
).inserted_id
if access is None:
raise HTTPException(
500,
"Errore, si sono verificati problemi con la connsessione al server, riprovare più tardi",
)
return str(token)
# registra l'utente e rotrna il token JWT
def register(self, email: str, password: str, ip: IPvAnyAddress = None) -> str:
if not email:
raise HTTPException(400, "il campo email non è specificato")
if not password:
raise HTTPException(400, "la richiesta non contiene la password")
email = email.lower().strip()
# controlla che l'utente esista
email_hash = hashlib.md5(email.encode()).hexdigest()
if self.exists(email_hash):
raise HTTPException(
400, "un utente con questa email esiste gia' nel database"
)
salt = bcrypt.gensalt()
hashed_pw = bcrypt.hashpw(password.encode(), salt).decode()
uid = uuid.uuid5(self.UUID_NAMESPACE, email)
with MongoClient(MONGO_CS) as c:
with c.start_session() as s:
with s.start_transaction() as t:
# cerca se la chiave di attivazione esiste
while True:
activation_key = random_string(ACTIVATION_KEY_LENGTH)
if c.shop.operations.find_one({"key": activation_key}) is None:
break
# inserisce il nuovo utente
id = c.shop.users.insert_one(
{
"uid": str(uid),
"email": email,
"active": False,
"authorizations": ["basic"],
"hashed_password": hashed_pw,
"registration_date": datetime.now(),
}
).inserted_id
if id is None:
raise HTTPException(500, str("errore inserimento nuovo utente"))
# genera una chiave di attivazione e la inserisce
id = c.shop.operations.insert_one(
{
"uid": str(uid),
"key": activation_key,
"created_at": datetime.utcnow(),
"used_at": None,
"scope": self.ACTIVATION_SCOPE,
}
).inserted_id
if id is None:
raise HTTPException(
500, "errore generazione chiave di attivazione"
)
# manda la mail di attivazione
if not self.send_activation_email(email, activation_key):
raise HTTPException(500, "errori di invio email di attivazione")
return self.login(email, password, ip)
def exists(self, email_hash: str) -> bool:
with MongoClient(MONGO_CS) as c:
emails = [
hashlib.md5(e["email"].encode()).hexdigest()
for e in c.shop.users.find({}, {"email": 1, "_id": 0})
]
return email_hash in emails
# manda la email cn il codice di attivazione edell'account
# @return boolean se la mail è stata invata
def send_activation_email(self, email: str, activation_key: str) -> bool:
content_template = EMAIL_TEMPLATE_ACTIVATION.read_text()
content = Template(content_template).substitute(ACTIVATION_KEY=activation_key)
body_template = EMAIL_TEMPLATE_BASE.read_text()
body = Template(body_template).substitute(CONTENT=content)
try:
config = DagMailConfig(**MAIL_CONFIG)
with DagMail(config) as ms:
ms.add_receiver(email)
ms.messageHTML(body, "Attivazione Account")
ms.send()
return True
except Exception as e:
print(str(e))
return False
| 5,367
| 458
| 23
|
511eaa58ab745f5d34d4945e2abfe854d6b5c2c5
| 1,305
|
py
|
Python
|
tools/ui2py.py
|
BYUCamachoLab/autogator
|
464d43d797c58a03263527bb7fdc737adb045c75
|
[
"MIT"
] | null | null | null |
tools/ui2py.py
|
BYUCamachoLab/autogator
|
464d43d797c58a03263527bb7fdc737adb045c75
|
[
"MIT"
] | 2
|
2021-05-04T15:46:46.000Z
|
2022-02-14T19:18:38.000Z
|
tools/ui2py.py
|
BYUCamachoLab/autogator
|
464d43d797c58a03263527bb7fdc737adb045c75
|
[
"MIT"
] | 2
|
2019-09-09T20:33:02.000Z
|
2019-09-25T21:44:46.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © Autogator Project Contributors
# Licensed under the terms of the MIT License
# (see autogator/__init__.py for details)
"""
Tool that converts all .ui and .qrc files in the autogator.resources folder to
python files in autogator.compiled.
Usage:
$ python3 ui2py.py
"""
import sys
import os
import subprocess
os.chdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir))
path = 'autogator'
res = os.path.join(path, 'resources')
dest = os.path.join(path, 'compiled')
for root, directories, filenames in os.walk(res):
for filename in filenames:
item = os.path.join(root, filename)
if item.endswith('.ui'):
name, _ = os.path.splitext(filename)
rename = name + '_ui' + '.py'
path2dest = os.path.join(dest, rename)
print(*['pyside2-uic', '--from-imports', item, '-o', path2dest])
subprocess.call(['pyside2-uic', '--from-imports', item, '-o', path2dest])
if item.endswith('.qrc'):
name, _ = os.path.splitext(filename)
rename = name + '_rc' + '.py'
path2dest = os.path.join(dest, rename)
args = ['pyside2-rcc', item, '-o', path2dest]
print(*args)
subprocess.call(args)
| 31.071429
| 85
| 0.616092
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © Autogator Project Contributors
# Licensed under the terms of the MIT License
# (see autogator/__init__.py for details)
"""
Tool that converts all .ui and .qrc files in the autogator.resources folder to
python files in autogator.compiled.
Usage:
$ python3 ui2py.py
"""
import sys
import os
import subprocess
os.chdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir))
path = 'autogator'
res = os.path.join(path, 'resources')
dest = os.path.join(path, 'compiled')
for root, directories, filenames in os.walk(res):
for filename in filenames:
item = os.path.join(root, filename)
if item.endswith('.ui'):
name, _ = os.path.splitext(filename)
rename = name + '_ui' + '.py'
path2dest = os.path.join(dest, rename)
print(*['pyside2-uic', '--from-imports', item, '-o', path2dest])
subprocess.call(['pyside2-uic', '--from-imports', item, '-o', path2dest])
if item.endswith('.qrc'):
name, _ = os.path.splitext(filename)
rename = name + '_rc' + '.py'
path2dest = os.path.join(dest, rename)
args = ['pyside2-rcc', item, '-o', path2dest]
print(*args)
subprocess.call(args)
| 0
| 0
| 0
|
37f14711c2fe081a327c2678cd3e96f52faa5215
| 2,150
|
py
|
Python
|
annotate/tests/test_pages.py
|
iafisher/exegesis
|
25bb55ecf82e9ffbf4dbdb71e6e86000d37a9966
|
[
"MIT"
] | null | null | null |
annotate/tests/test_pages.py
|
iafisher/exegesis
|
25bb55ecf82e9ffbf4dbdb71e6e86000d37a9966
|
[
"MIT"
] | 7
|
2018-07-28T02:30:45.000Z
|
2018-08-11T21:05:55.000Z
|
annotate/tests/test_pages.py
|
iafisher/exegesis
|
25bb55ecf82e9ffbf4dbdb71e6e86000d37a9966
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.test import TestCase
from annotate import models
class TestPages(TestCase):
"""Test that the proper templates are used to render pages."""
| 38.392857
| 77
| 0.669767
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from annotate import models
class TestPages(TestCase):
"""Test that the proper templates are used to render pages."""
def setUp(self):
User = get_user_model()
User.objects.create_user('temporary', 'temporary@example.com', 'pwd')
def test_index_page_template(self):
self.client.login(username='temporary', password='pwd')
response = self.client.get('/')
self.assertTemplateUsed(response, 'annotate/index.html')
def test_login_page_template(self):
response = self.client.get('/login')
self.assertTemplateUsed(response, 'annotate/login.html')
def test_path_page_template(self):
self.client.login(username='temporary', password='pwd')
# Create a minimal project with a directory and a file.
project = models.Project.objects.create(name='temporary')
models.Directory.objects.create(fullpath='', dirpath='', name='',
project=project)
models.Snippet.objects.create(fullpath='README.txt', dirpath='',
name='README.txt', text='Lorem ipsum', project=project,
downloaded=True)
response = self.client.get('/project/temporary')
self.assertTemplateUsed(response, 'annotate/directory.html')
response = self.client.get('/project/temporary/README.txt')
self.assertTemplateUsed(response, 'annotate/snippet.html')
def test_login_page_redirects(self):
self.client.login(username='temporary', password='pwd')
response = self.client.get('/login')
self.assertRedirects(response, '/')
def test_index_page_redirects(self):
response = self.client.get('/')
self.assertRedirects(response, '/login/?next=/')
def test_logout_page_redirects(self):
self.client.login(username='temporary', password='pwd')
response = self.client.get('/logout')
self.assertRedirects(response, '/login')
# Can't access pages after logging out.
response = self.client.get('/')
self.assertRedirects(response, '/login/?next=/')
| 1,757
| 0
| 189
|
ae43fe8255d01b1bdd638bd84beb9976436c8118
| 1,117
|
py
|
Python
|
Resize.py
|
PENGsBIT/PIL-demo
|
36181d79ca6ab9fe177d734ab20ec093a9ba2246
|
[
"Apache-2.0"
] | null | null | null |
Resize.py
|
PENGsBIT/PIL-demo
|
36181d79ca6ab9fe177d734ab20ec093a9ba2246
|
[
"Apache-2.0"
] | null | null | null |
Resize.py
|
PENGsBIT/PIL-demo
|
36181d79ca6ab9fe177d734ab20ec093a9ba2246
|
[
"Apache-2.0"
] | null | null | null |
# -*-coding:utf-8-*-
import numpy as np
from PIL import Image
from scipy import misc
| 32.852941
| 97
| 0.617726
|
# -*-coding:utf-8-*-
import numpy as np
from PIL import Image
from scipy import misc
def resize(img):
# img = Image.open(cirFileName)
# w, h = img.size
# # 去掉浮点,防报错
# w, h = round(w * 0.2), round(h * 0.2)
img = img.resize((200, 200), Image.ANTIALIAS).convert("RGBA")
return img
def getResultArray():
fileName = "1.png"
imgBeforeExpand = misc.imread(fileName, flatten=False, mode='YCbCr')
imgBeforeExpand = imgBeforeExpand / 255.0
# imgBeforeExpand = np.uint8(imgBeforeExpand*255)
# h, w = imgBeforeExpand.shape[:2]
# print(imgBeforeExpand.shape)
h = 150
w = 160
data = list()
data.append(misc.imresize(imgBeforeExpand[:, :, 0], [h, w], 'bicubic', mode="F")[:, :, None])
data.append(misc.imresize(imgBeforeExpand[:, :, 1], [h, w], 'bicubic', mode="F")[:, :, None])
data.append(misc.imresize(imgBeforeExpand[:, :, 2], [h, w], 'bicubic', mode="F")[:, :, None])
data_out = np.concatenate(data, axis=2)
data_out[data_out > 1] = 1.0
data_out = np.uint8(data_out * 255)
img = misc.toimage(arr=data_out, mode="YCbCr")
return img
| 1,000
| 0
| 46
|
f179ba977ded22d79e6b5ab337596e7134fe8e7c
| 2,023
|
py
|
Python
|
.ipynb_checkpoints/settings-checkpoint.py
|
dbbabcock/data_690_ai_the_toy_show
|
97614cdaa1d7204abd1c30b5463976578be6a67d
|
[
"MIT"
] | null | null | null |
.ipynb_checkpoints/settings-checkpoint.py
|
dbbabcock/data_690_ai_the_toy_show
|
97614cdaa1d7204abd1c30b5463976578be6a67d
|
[
"MIT"
] | null | null | null |
.ipynb_checkpoints/settings-checkpoint.py
|
dbbabcock/data_690_ai_the_toy_show
|
97614cdaa1d7204abd1c30b5463976578be6a67d
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import torch
import torchvision
def load_stored_resnet_model(categories, file_path):
'''Adapted from NVIDIA DLI Course Code: Getting Started with AI on Jetson Nano'''
print("Loading stored classification model...")
# If we're on the Jetson Nano use cuda, otherwise cpu:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = torchvision.models.resnet18(pretrained=True)
model.fc = torch.nn.Linear(512, len(categories))
model = model.to(device)
# model.load_state_dict(torch.load(file_path))
model.load_state_dict(torch.load(file_path, map_location=torch.device('cpu')))
model.eval()
print("Model ready!")
return model
| 38.169811
| 92
| 0.638655
|
from datetime import datetime
import torch
import torchvision
def __init__():
global toy_list, model_file_path, model, response_dict, computer_name, categories
toy_list = ['Minnie Mouse', 'Tigger', 'Horse', 'White Bear', 'Flamingo', 'Tiger Puppet',
'Elephant Puppet', 'Oriole', 'Black Cat']
# Create the categories for the model to use, which requires a 'No Toy' option:
categories = toy_list.copy()
categories.append('No Toy')
model_file_path = './data/toy_classify_resnet18_model_051321.pth'
model = load_stored_resnet_model(categories, model_file_path)
response_dict = {
"morning_text_list": ["Good morning!", "It's a lovely morning!",
"What a great morning to visit the Toy Show!"],
"afternoon_text_list" : ["Good afternoon!", "It's a lovely afternoon!"],
"show_toy_text_list" : ["Would you like to show me a toy to talk to?",
"Do you have a friend to show me?",
"Who are you playing with today? Can you show me?"]
}
# My daughter named the computer 'Dorris Therese' so that's what we'll use:
computer_name = 'Dorris'
def load_stored_resnet_model(categories, file_path):
'''Adapted from NVIDIA DLI Course Code: Getting Started with AI on Jetson Nano'''
print("Loading stored classification model...")
# If we're on the Jetson Nano use cuda, otherwise cpu:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = torchvision.models.resnet18(pretrained=True)
model.fc = torch.nn.Linear(512, len(categories))
model = model.to(device)
# model.load_state_dict(torch.load(file_path))
model.load_state_dict(torch.load(file_path, map_location=torch.device('cpu')))
model.eval()
print("Model ready!")
return model
def check_time():
curr_hour = datetime.now().time().hour
return curr_hour
| 1,243
| 0
| 50
|
bfb54c2d2ea587f2eea3dd14e9aabadd7e86ef91
| 550
|
py
|
Python
|
utea_web/moduli/migrations/0009_auto_20181003_1248.py
|
arol-varesi/utea_web
|
36b04656b93b123d3805d25ee4e61c6ae68252ed
|
[
"MIT"
] | null | null | null |
utea_web/moduli/migrations/0009_auto_20181003_1248.py
|
arol-varesi/utea_web
|
36b04656b93b123d3805d25ee4e61c6ae68252ed
|
[
"MIT"
] | null | null | null |
utea_web/moduli/migrations/0009_auto_20181003_1248.py
|
arol-varesi/utea_web
|
36b04656b93b123d3805d25ee4e61c6ae68252ed
|
[
"MIT"
] | 1
|
2021-10-02T11:45:34.000Z
|
2021-10-02T11:45:34.000Z
|
# Generated by Django 2.1.1 on 2018-10-03 10:48
from django.db import migrations, models
| 22.916667
| 50
| 0.581818
|
# Generated by Django 2.1.1 on 2018-10-03 10:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('moduli', '0008_auto_20181002_2154'),
]
operations = [
migrations.AlterField(
model_name='sigla',
name='descrizione',
field=models.CharField(max_length=60),
),
migrations.AlterField(
model_name='traduzione',
name='traduzione',
field=models.CharField(max_length=60),
),
]
| 0
| 436
| 23
|
9f9d45af4cae398c88e5f196080679db57c797e3
| 540
|
py
|
Python
|
venv/lib/python3.8/site-packages/zif.py
|
Jn-mic/Projects-Portfolio
|
642b9e58b81e8d505cc7462370e80fbab2945fd9
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/zif.py
|
Jn-mic/Projects-Portfolio
|
642b9e58b81e8d505cc7462370e80fbab2945fd9
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/zif.py
|
Jn-mic/Projects-Portfolio
|
642b9e58b81e8d505cc7462370e80fbab2945fd9
|
[
"MIT"
] | null | null | null |
import zipfile
# #压缩
# file=zipfile.ZipFile('text.zip','w')#text.zip创建压缩包文件名 w读
# file.write("class.py")#压缩的文件名
# file.close()
#解压缩
# file=zipfile.ZipFile('text.zip','r')#写
# file.extractall(path="../")#写到哪个路径下 创建到卓面 默认是在跟慕入下面
#暴力破解加密的压缩包
fileobj=open("pwd.txt","r")#新建一个txt文件
for item in fileobj.readlines():
print(item.strip())#strip()去空格
newpwd=item.strip()
try:
file=zipfile.ZipFile("class2.zip","r")#解压缩包
file.extractall(pwd=newpwd.encode("utf-8"))
except:
print("errot")
| 25.714286
| 59
| 0.631481
|
import zipfile
# #压缩
# file=zipfile.ZipFile('text.zip','w')#text.zip创建压缩包文件名 w读
# file.write("class.py")#压缩的文件名
# file.close()
#解压缩
# file=zipfile.ZipFile('text.zip','r')#写
# file.extractall(path="../")#写到哪个路径下 创建到卓面 默认是在跟慕入下面
#暴力破解加密的压缩包
fileobj=open("pwd.txt","r")#新建一个txt文件
for item in fileobj.readlines():
print(item.strip())#strip()去空格
newpwd=item.strip()
try:
file=zipfile.ZipFile("class2.zip","r")#解压缩包
file.extractall(pwd=newpwd.encode("utf-8"))
except:
print("errot")
| 0
| 0
| 0
|
c8c11a61c01891c5110af5047097e53b2754d5ff
| 2,155
|
py
|
Python
|
feature_extraction/measurements/granularity.py
|
widoptimization-willett/feature-extraction
|
25e963e3383673aad6aedfd504e69a1df7f47f9a
|
[
"Apache-2.0"
] | null | null | null |
feature_extraction/measurements/granularity.py
|
widoptimization-willett/feature-extraction
|
25e963e3383673aad6aedfd504e69a1df7f47f9a
|
[
"Apache-2.0"
] | null | null | null |
feature_extraction/measurements/granularity.py
|
widoptimization-willett/feature-extraction
|
25e963e3383673aad6aedfd504e69a1df7f47f9a
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
import numpy as np
import skimage
import skimage.morphology as morph
from . import Measurement
from ..util.cleanup import cell_aoi_and_clip
| 34.758065
| 117
| 0.706729
|
# coding=utf-8
import numpy as np
import skimage
import skimage.morphology as morph
from . import Measurement
from ..util.cleanup import cell_aoi_and_clip
class Granularity(Measurement):
default_options = {
'subsampling': 2, # 2x downsampling
'element_size': 20,
'spectrum_length': 16,
# standard stuff
'clip_cell_borders': True,
'cell_border_erosion': None,
}
def compute(self, image):
# -- get mask, potentially clip image
image, aoi_mask = cell_aoi_and_clip(image, clip=self.options.clip_cell_borders,
erosion=self.options.cell_border_erosion)
# -- subsample image/mask
image = skimage.measure.block_reduce(image,
(self.options.subsampling, self.options.subsampling))
aoi_mask = skimage.measure.block_reduce(aoi_mask,
(self.options.subsampling, self.options.subsampling)) > 0.9 # reconvert to bool after averaging during subsampling
# -- remove background pixels using a black tophat transform.
# that is, heavily open the (subsampled) input image with a disk,
# and then subtract that from the original image.
bg_image = morph.opening(image, morph.disk(self.options.element_size))
image -= bg_image
image[image < 0] = 0 # make sure we don't have negative intensities
# -- perform "accelerated granulometry"
# adapted from lines [271-334] of cellprofiler/src/modules/measuregranularity.py
# structuring/neighborhood element
elem = np.array([[False, True, False],
[True, True, True],
[False, True, False]])
# set up an array for recording means at each spectrum step and populate with
# the initial mean
means = [max(np.mean(image[aoi_mask]), np.finfo(float).eps)]
# set up an array for recording {G_k(I)}, k∈{1,spectrum_length}
G = []
im_ero = image.copy() # im_ero holds the result of the erosion, which is eroded at each step with ``elem``.
for i in range(1, self.options.spectrum_length):
im_ero = morph.erosion(im_ero, elem)
im_rec = morph.reconstruction(im_ero, image, selem=elem)
means.append(np.mean(im_rec[aoi_mask]))
G_k = (means[i-1] - means[i]) * 100 / means[0]
G.append(G_k)
return G
| 1,753
| 224
| 23
|
4ecda2218db21b332e4657876d786a1dfb654049
| 8,817
|
py
|
Python
|
src/Linestring.py
|
Lovely-XPP/tkzgeom
|
bf68e139dc05f759542d6611f4dc07f4f2727b92
|
[
"MIT"
] | 41
|
2021-11-24T05:54:08.000Z
|
2022-03-26T10:19:30.000Z
|
src/Linestring.py
|
Lovely-XPP/tkzgeom
|
bf68e139dc05f759542d6611f4dc07f4f2727b92
|
[
"MIT"
] | 1
|
2022-02-28T04:34:51.000Z
|
2022-03-07T10:49:27.000Z
|
src/Linestring.py
|
Lovely-XPP/tkzgeom
|
bf68e139dc05f759542d6611f4dc07f4f2727b92
|
[
"MIT"
] | 10
|
2021-11-24T07:35:17.000Z
|
2022-03-25T18:42:14.000Z
|
from PyQt5 import QtCore, QtWidgets, QtGui
from Item import Item
from Tikzifyables.Arrowable import Arrowable
from Tikzifyables.DashPatternable import DashPatternable
from Tikzifyables.Colourable.LineColourable import LineColourable
from Tikzifyables.Decorationable import Decorationable
from Tikzifyables.CurveStrategyable import CurveStrategyable
import Constant as c
from GeometryMath import point_segment_dist_sqr
| 52.171598
| 135
| 0.653851
|
from PyQt5 import QtCore, QtWidgets, QtGui
from Item import Item
from Tikzifyables.Arrowable import Arrowable
from Tikzifyables.DashPatternable import DashPatternable
from Tikzifyables.Colourable.LineColourable import LineColourable
from Tikzifyables.Decorationable import Decorationable
from Tikzifyables.CurveStrategyable import CurveStrategyable
import Constant as c
from GeometryMath import point_segment_dist_sqr
class Linestring(Item, DashPatternable, LineColourable, Decorationable, Arrowable, CurveStrategyable):
def __init__(self, item):
"""Create Linestring."""
Item.__init__(self, item)
if item is None:
self.dictionary_builder(None, "")
DashPatternable.__init__(self, self.item)
LineColourable.__init__(self, self.item)
Decorationable.__init__(self, self.item)
Arrowable.__init__(self, self.item)
CurveStrategyable.__init__(self, self.item)
def tikzify(self):
strategy_options, strategy_coordinates = self.tikzify_strategy(True)
options = [
'' if self.item["line"]["line_width"] == c.Segment.Default.LINE_WIDTH else f'line width={self.item["line"]["line_width"]}',
self.tikzify_dash(),
'draw=' + self.tikzify_line_colour(),
self.tikzify_arrows(),
self.tikzify_decoration(),
strategy_options
]
options = filter(bool, options)
return "\\draw[%s] %s;" % (', '.join(options), strategy_coordinates)
def __str__(self): # TODO modify this too, this is now an error
return "Segment from (%s) to (%s)" % (self.item["definition"]["A"], self.item["definition"]["B"])
def draw_on_canvas(self, items, scene, colour=QtCore.Qt.darkMagenta):
thickness = 4
coord_list = [items[i].get_canvas_coordinates() for i in self.item["definition"]]
for i in range(len(coord_list) - 1):
graphics_line = QtWidgets.QGraphicsLineItem(*coord_list[i], *coord_list[i + 1])
pen = QtGui.QPen()
pen.setWidth(thickness)
pen.setDashPattern([4, 4])
pen.setColor(colour)
graphics_line.setPen(pen)
scene.addItem(graphics_line)
@staticmethod
def draw_on_canvas_static(x, y, id_history, scene, colour=QtCore.Qt.darkMagenta):
thickness = 4
coord_list = [scene.project_data.items[i].get_canvas_coordinates() for i in id_history] + [[x, y]]
for i in range(len(coord_list) - 1):
graphics_line = QtWidgets.QGraphicsLineItem(*coord_list[i], *coord_list[i + 1])
pen = QtGui.QPen()
pen.setWidth(thickness)
pen.setColor(colour)
pen.setDashPattern([4, 4])
graphics_line.setPen(pen)
scene.addItem(graphics_line)
def distance_sqr(self, x, y, items):
min_dist = 1e6
coord_list = [items[i].get_canvas_coordinates() for i in self.depends_on()]
for i in range(len(coord_list) - 1):
if (distance := point_segment_dist_sqr(coord_list[i], coord_list[i + 1], [x, y])) < min_dist:
min_dist = distance
return min_dist
def depends_on(self):
return self.item["definition"]
def definition_string(self):
def_str = [('{0:.6g}'.format(i) if isinstance(i, float) else i) for i in self.item["definition"]]
return '%s(%s)' % (type(self).__name__, ', '.join(def_str))
@staticmethod
def static_patterns():
return [i * 'p' for i in range(3, 40)]
def patterns(self):
return [i * 'p' for i in range(3, 40)]
def next_id_func(self, definition, iter_counter):
return 'Linestring_' + chr(ord('A') + iter_counter % 26) + (iter_counter // 26) * '\''
def definition_builder(self, data, items=None):
return data[:-1]
def parse_into_definition(self, arguments, items):
# arguments length condition
if len(arguments) <= 1:
return None
# all arguments are members of the regular expression for argument name
if not all(map(lambda x: self.name_pattern(x), arguments)):
return None
# all arguments are items that already exist
if not all(map(lambda x: x in items, arguments)):
return None
# the type of all arguments is of a certain type
if not all(map(lambda x: items[x].item["type"] == 'point', arguments)):
return None
# self-reference condition (self-reference is not permitted)
if self.get_id() in arguments:
return None
return self.definition_builder(arguments+['mock item'])
def dictionary_builder(self, definition, id_, sub_type=None): # TODO create Linestring class in Constant.py and modify entries here
dictionary = {}
dictionary["id"] = id_
dictionary["type"] = 'linestring'
dictionary["sub_type"] = None
dictionary["show"] = True
dictionary["definition"] = definition
dictionary["line"] = {}
dictionary["line"]["line_width"] = c.Linestring.Default.LINE_WIDTH
dictionary["line"]["colour"] = {}
dictionary["line"]["colour"]["name"] = c.Linestring.Default.Line_Colour.NAME
dictionary["line"]["colour"]["mix_with"] = c.Linestring.Default.Line_Colour.MIX_WITH
dictionary["line"]["colour"]["mix_percent"] = c.Linestring.Default.Line_Colour.MIX_RATIO
dictionary["line"]["colour"]["strength"] = c.Linestring.Default.Line_Colour.STRENGTH
dictionary["line"]["dash"] = {}
dictionary["line"]["dash"]["stroke"] = c.Linestring.Default.LINE_DASH_STROKE
dictionary["line"]["dash"]["custom_pattern"] = c.Linestring.Default.LINE_DASH_CUSTOM
dictionary["line"]["double"] = {}
dictionary["line"]["double"]["distance"] = c.Linestring.Default.Double_Line.DISTANCE
dictionary["line"]["double"]["colour"] = {}
dictionary["line"]["double"]["colour"]["name"] = c.Linestring.Default.Double_Line.Colour.NAME
dictionary["line"]["double"]["colour"]["mix_with"] = c.Linestring.Default.Double_Line.Colour.MIX_WITH
dictionary["line"]["double"]["colour"]["mix_percent"] = c.Linestring.Default.Double_Line.Colour.MIX_RATIO
dictionary["line"]["double"]["colour"]["strength"] = c.Linestring.Default.Double_Line.Colour.STRENGTH
dictionary["line"]["decoration"] = {}
dictionary["line"]["decoration"]["type"] = c.Linestring.Default.Decoration.TYPE
dictionary["line"]["decoration"]["amplitude"] = c.Linestring.Default.Decoration.AMPLITUDE
dictionary["line"]["decoration"]["wavelength"] = c.Linestring.Default.Decoration.WAVELENGTH
dictionary["line"]["decoration"]["text"] = c.Linestring.Default.Decoration.TEXT
dictionary["line"]["strategy"] = {}
dictionary["line"]["strategy"]["type"] = c.Linestring.Default.Strategy.TYPE
dictionary["line"]["strategy"]["rounded_corners"] = c.Linestring.Default.Strategy.ROUNDED_CORNERS
dictionary["line"]["strategy"]["bend_angle"] = c.Linestring.Default.Strategy.BEND_ANGLE
dictionary["line"]["strategy"]["in_angle"] = c.Linestring.Default.Strategy.IN_ANGLE
dictionary["line"]["strategy"]["out_angle"] = c.Linestring.Default.Strategy.OUT_ANGLE
dictionary["line"]["strategy"]["smooth_tension"] = c.Linestring.Default.Strategy.SMOOTH_TENSION
dictionary["line"]["strategy"]["loop"] = False
dictionary["line"]["strategy"]["loop_size"] = c.Linestring.Default.Strategy.LOOP_SIZE
dictionary["line"]["connect_to"] = c.Linestring.Default.LINE_CONNECT_TO
dictionary["o_arrow"] = {}
dictionary["o_arrow"]["width"] = c.Linestring.Default.O_Arrow.WIDTH
dictionary["o_arrow"]["length"] = c.Linestring.Default.O_Arrow.LENGTH
dictionary["o_arrow"]["tip"] = c.Linestring.Default.O_Arrow.TIP
dictionary["o_arrow"]["bending"] = c.Linestring.Default.O_Arrow.BENDING
dictionary["o_arrow"]["side"] = c.Linestring.Default.O_Arrow.SIDE
dictionary["o_arrow"]["reversed"] = c.Linestring.Default.O_Arrow.REVERSED
dictionary["o_arrow"]["bending"] = c.Linestring.Default.O_ARROW_BENDING
dictionary["d_arrow"] = {}
dictionary["d_arrow"]["width"] = c.Linestring.Default.D_Arrow.WIDTH
dictionary["d_arrow"]["length"] = c.Linestring.Default.D_Arrow.LENGTH
dictionary["d_arrow"]["tip"] = c.Linestring.Default.D_Arrow.TIP
dictionary["d_arrow"]["bending"] = c.Linestring.Default.D_Arrow.BENDING
dictionary["d_arrow"]["side"] = c.Linestring.Default.D_Arrow.SIDE
dictionary["d_arrow"]["reversed"] = c.Linestring.Default.D_Arrow.REVERSED
dictionary["d_arrow"]["bending"] = c.Linestring.Default.D_ARROW_BENDING
self.item = dictionary
| 7,494
| 880
| 23
|
67995b2f86c3ae2b6a09815a02f81e478aa1eb27
| 12,847
|
py
|
Python
|
virtnet_conf.py
|
gizmoguy/faucet-gui
|
0055b29be284dfcc68e36f3f0f864c99c062376c
|
[
"Apache-2.0"
] | 16
|
2018-08-31T02:17:55.000Z
|
2021-12-27T06:43:51.000Z
|
virtnet_conf.py
|
urantialife/faucet-gui
|
f9741a8c47da6fc6fb1a508c794e04bcb69cfef6
|
[
"Apache-2.0"
] | 2
|
2018-08-31T04:02:09.000Z
|
2021-06-04T13:27:48.000Z
|
virtnet_conf.py
|
urantialife/faucet-gui
|
f9741a8c47da6fc6fb1a508c794e04bcb69cfef6
|
[
"Apache-2.0"
] | 9
|
2019-01-06T17:58:48.000Z
|
2021-12-28T17:59:55.000Z
|
"""
PROGRAM_NAME: virtnet_creator
FILE_NAME: conf.py
AUTHOR: Brendan Geoghegan
PROGRAM_DESCRIPTION: This program is a GUI application for users to build or load network topologies that
have a SDN controller at their center. The original code tied into a Xen loadout used to clone, startup, and
operate VMs, but this simplified version is only meant to visually generate network topologies and then generate
the requisite YAML files for a Faucet SDN controller.
FILE_DESCRIPTION: This file contains functions to read and write a custom config file for saving and loading
a specific network configuration. In theory this would allow users to quickly spin up the same network environment
over and over again. Right now the file type is saves as .virtnet, you can find this being called from main.py.
"""
class confIO:
'''Initiate a series of read functions going through the .virtnet file and creating instances of devices'''
'''Function to read controller information from .virtnet file and to instantiate new controller objects'''
'''Function to read switch information from .virtnet file and to instantiate new controller objects'''
'''Function to read host information from .virtnet file and to instantiate new controller objects'''
'''read in all the in/out of band connections, controller relations, and vlans to create links'''
'''Initiate a series of write functions going through the list of devices and pulling out info'''
'''Function to write controller information to a .virtnet file'''
'''Function to write switch information and any attached hosts information to a .virtnet file'''
'''Function to capture all the links (In/Out band) from each switch to write to .virtnet file'''
'''Function to capture the VLAN data from a controller and add to the end of the file'''
| 49.794574
| 120
| 0.592745
|
"""
PROGRAM_NAME: virtnet_creator
FILE_NAME: conf.py
AUTHOR: Brendan Geoghegan
PROGRAM_DESCRIPTION: This program is a GUI application for users to build or load network topologies that
have a SDN controller at their center. The original code tied into a Xen loadout used to clone, startup, and
operate VMs, but this simplified version is only meant to visually generate network topologies and then generate
the requisite YAML files for a Faucet SDN controller.
FILE_DESCRIPTION: This file contains functions to read and write a custom config file for saving and loading
a specific network configuration. In theory this would allow users to quickly spin up the same network environment
over and over again. Right now the file type is saves as .virtnet, you can find this being called from main.py.
"""
class confIO:
def __init__(self):
self.id = "YAML_IO"
self.file = None
self.device_dictionary = {}
self.parent = None
self.host = None
'''Initiate a series of read functions going through the .virtnet file and creating instances of devices'''
def read(self, filename, sdn_widget):
# print ("entering config read function")
self.file = open(filename, "r")
'''check the first and last line of the file for good formatting'''
if self.file.readline() != "Row 0 : Controller\n":
print "First line of config file is not correct"
return
for line in self.file:
pass
if line != "END_CONNECTION_LIST\n":
print "Last line of config file is not correct"
return
self.file.seek(0,0) #reset file reader
line = self.file.readline()
self.device_dictionary = {}
'''first we read in all the equipment and populate our program'''
while line != "END_ASSET_LIST\n":
if line.split(':').__len__() != 2:
line = self.file.readline()
continue
else:
device_type = line.split(':')[1].strip()
if device_type == "Controller":
self.__read_controller(sdn_widget)
elif device_type == "Switch" or device_type == "Host":
device_row = int(line.split(":")[0].strip("Row ")) #rows help determine parents
while device_row <= self.parent.row: # move back up the tree until you find the next parent
self.parent = self.parent.parent
if device_type == "Switch":
self.__read_switch(sdn_widget)
elif device_type == "Host":
self.__read_host(sdn_widget)
line = self.file.readline()
'''read in all the in/out of band connections, assigned controllers, and vlans'''
self.__read_connections()
self.device_dictionary.clear() # done using this, time to clean
self.file.close() # and close file
'''Function to read controller information from .virtnet file and to instantiate new controller objects'''
def __read_controller(self, sdn_widget):
self.parent = sdn_widget.add_controller()
line = self.file.readline()
while line.split(':')[0].strip() != "END":
line_in = line.split(':')
if line_in[0].strip() == 'ID': self.parent.id = line_in[1].strip()
if line_in[0].strip() == 'DISPLAY_ID': self.parent.display_id = line_in[1].strip()
if line_in[0].strip() == 'IP_ADDR': self.parent.ip_address = line_in[1].strip()
if line_in[0].strip() == 'NETMASK': self.parent.netmask = line_in[1].strip()
if line_in[0].strip() == 'MAC_ADDR':
mac = line_in[1].strip()
self.parent.mac_address = ':'.join([mac[i:i + 2] for i in range(0, len(mac), 2)]) # puts :'s in mac_adr
if line_in[0].strip() == 'Next_Switch_Num': self.parent.next_switch_number = int(line_in[1].strip())
line = self.file.readline()
self.device_dictionary[self.parent.mac_address] = self.parent # this is used to quickly check mac addrs later
'''Function to read switch information from .virtnet file and to instantiate new controller objects'''
def __read_switch(self, sdn_widget):
self.parent.next_switch_number -= 1 # due to how the system adds switches in the first place
self.parent = sdn_widget.add_switch(self.parent)
line = self.file.readline()
while line.split(':')[0].strip() != "END":
line_in = line.split(':')
if line_in[0].strip() == 'ID': self.parent.id = line_in[1].strip()
if line_in[0].strip() == 'DISPLAY_ID': self.parent.display_id = line_in[1].strip()
if line_in[0].strip() == 'IP_ADDR': self.parent.ip_address = line_in[1].strip()
if line_in[0].strip() == 'NETMASK': self.parent.netmask = line_in[1].strip()
if line_in[0].strip() == 'MAC_ADDR':
mac = line_in[1].strip()
self.parent.mac_address = ':'.join([mac[i:i + 2] for i in range(0, len(mac), 2)]) # puts :'s in mac_adr
if line_in[0].strip() == 'DP_ID': self.parent.dp_id = line_in[1].strip()
if line_in[0].strip() == 'Next_Switch_Num': self.parent.next_switch_number = int(line_in[1].strip())
if line_in[0].strip() == 'Next_Host_Num': self.parent.next_host_number = int(line_in[1].strip())
line = self.file.readline()
self.device_dictionary[self.parent.mac_address] = self.parent # this is used to quickly check mac_addrs later
'''Function to read host information from .virtnet file and to instantiate new controller objects'''
def __read_host(self, sdn_widget):
self.parent.next_host_number -= 1 # due to how the system adds switches in the first place
host = sdn_widget.add_host(self.parent)
line = self.file.readline()
while line.split(':')[0].strip() != "END":
line_in = line.split(':')
if line_in[0].strip() == 'ID': host.id = line_in[1].strip()
if line_in[0].strip() == 'DISPLAY_ID': host.display_id = line_in[1].strip()
if line_in[0].strip() == 'IP_ADDR': host.ip_address = line_in[1].strip()
if line_in[0].strip() == 'NETMASK': host.netmask = line_in[1].strip()
if line_in[0].strip() == 'MAC_ADDR':
mac = line_in[1].strip()
host.mac_address = ':'.join([mac[i:i + 2] for i in range(0, len(mac), 2)]) # puts :'s in mac_adr
if line_in[0].strip() == 'VLAN' : host.vlan = int(line_in[1].strip())
line = self.file.readline()
self.device_dictionary[host.mac_address] = host # this is used to quickly check s later
'''read in all the in/out of band connections, controller relations, and vlans to create links'''
def __read_connections(self):
# clear any connections that would have been automatically added in equipment population
for i in self.device_dictionary:
if self.device_dictionary[i].type == "Switch":
tmp_switch = self.device_dictionary[i]
tmp_switch.interface_list[:] = []
tmp_switch.controller_list[:] = []
tmp_switch.vlans.clear()
tmp_switch.next_eth_num = 1 # counter for naming the next ethX added
tmp_switch.next_in_band_num = 1 # counter for naming the next in_band connection
tmp_switch.next_out_band_num = 1 # counter for naming the next out_band connection
elif self.device_dictionary[i].type == "Controller":
self.device_dictionary[i].vlans.clear()
line = self.file.readline()
while line != "END_CONNECTION_LIST\n":
connection = line.split(":")[0].strip()
mac_a = line.split(":")[1].strip()
mac_a = ':'.join([mac_a[i:i + 2] for i in range(0, len(mac_a), 2)]) # puts :'s in mac addr
mac_b = line.split(":")[2].strip()
mac_b = ':'.join([mac_b[i:i + 2] for i in range(0, len(mac_b), 2)]) # puts :'s in mac addr
if connection == "In_Band":
self.device_dictionary[mac_a].add_interface(self.device_dictionary[mac_b], True)
elif connection == "Out_Band":
self.device_dictionary[mac_a].add_interface(self.device_dictionary[mac_b], False)
elif connection == "Assigned_Controller":
self.device_dictionary[mac_a].controller_list.append(self.device_dictionary[mac_b])
elif connection == "Vlans":
vlan_list = mac_b.split()
for i in vlan_list:
self.device_dictionary[mac_a].vlans.add(i.strip())
line = self.file.readline()
'''Initiate a series of write functions going through the list of devices and pulling out info'''
def write(self, filename, controller_list, switch_list):
# print ("entering config write function")
if controller_list is None:
return
self.file = open(filename,"w")
for c in controller_list:
self.file.write("Row 0 : Controller\n")
self.__write_controller(c)
self.file.write("END_ASSET_LIST\n")
for s in switch_list:
self.__write_switch_connections(s)
for c in controller_list:
self.__write_controller_vlans(c)
self.file.write("END_CONNECTION_LIST\n")
self.file.close()
'''Function to write controller information to a .virtnet file'''
def __write_controller(self, c):
self.file.write("ID : "+ c.id+"\n")
self.file.write("DISPLAY_ID :"+ c.display_id+"\n")
self.file.write("IP_ADDR : "+ c.ip_address+"\n")
self.file.write("NETMASK : "+ c.netmask+"\n")
self.file.write("MAC_ADDR : " + str(c.mac_address).replace(':','') + "\n")
self.file.write("Next_Switch_Num : "+ str(c.next_switch_number)+"\n")
v_lan_string = " "
for i in c.vlans:
v_lan_string += str(i)+" "
self.file.write("V_LAN : " + v_lan_string + "\n")
self.file.write("END : Controller\n")
for s in c.children:
self.__write_switch(s)
'''Function to write switch information and any attached hosts information to a .virtnet file'''
def __write_switch(self, s):
self.file.write("Row "+ str(s.row) + " : Switch\n")
self.file.write("ID : "+ s.id+"\n")
self.file.write("DISPLAY_ID :" + s.display_id+"\n")
self.file.write("IP_ADDR : "+ s.ip_address+"\n")
self.file.write("NETMASK : " + s.netmask + "\n")
self.file.write("MAC_ADDR : "+ str(s.mac_address).replace(":",'')+"\n")
self.file.write("DP_ID : " + s.dp_id + "\n")
self.file.write("Next_Switch_Num : "+ str(s.next_switch_number)+"\n")
self.file.write("Next_Host_Num : "+ str(s.next_host_number)+"\n")
self.file.write("END : Switch\n")
for child in s.children:
if child.type == "Host":
self.file.write("Row " + str(child.row) + " : Host\n")
self.file.write("ID : "+ child.id + "\n")
self.file.write("DISPLAY_ID :" + child.display_id+"\n")
self.file.write("IP_ADDR : "+ child.ip_address + "\n")
self.file.write("NETMASK : " + child.netmask + "\n")
self.file.write("MAC_ADDR : " + str(child.mac_address).replace(':','') + "\n")
self.file.write("VLAN : " + str(child.vlan) + "\n")
self.file.write("END : Host\n")
else:
self.__write_switch(child)
'''Function to capture all the links (In/Out band) from each switch to write to .virtnet file'''
def __write_switch_connections(self, s):
s_mac = str(s.mac_address).replace(':','')
for i in s.interface_list:
if i.in_band:
self.file.write("In_Band: " + s_mac + " : " + str(i.device.mac_address).replace(':', '') + "\n")
else:
self.file.write("Out_Band: " + s_mac + " : " + str(i.device.mac_address).replace(':', '') + "\n")
for i in s.controller_list:
self.file.write("Assigned_Controller: " + s_mac + " : " + str(i.mac_address).replace(':','') + "\n")
vlan_string = "Vlans : " + s_mac + " : "
for i in s.vlans:
vlan_string += str(i) + ' '
self.file.write(vlan_string + '\n')
'''Function to capture the VLAN data from a controller and add to the end of the file'''
def __write_controller_vlans(self, c):
c_mac= str(c.mac_address).replace(':','')
vlan_string = "Vlans : " + c_mac + " : "
for i in c.vlans:
vlan_string += str(i) + ' '
self.file.write(vlan_string + '\n')
| 10,725
| 0
| 286
|
9630077f65b7d28eba7099895195d4d403635301
| 339
|
py
|
Python
|
StoryMap/apps/users/admin.py
|
LittleBai0606/StoryMap
|
1169e763031f92ee6da65829fa9cbce1ebd991ad
|
[
"MIT"
] | 1
|
2021-11-01T05:58:25.000Z
|
2021-11-01T05:58:25.000Z
|
StoryMap/apps/users/admin.py
|
LittleBai0606/StoryMap
|
1169e763031f92ee6da65829fa9cbce1ebd991ad
|
[
"MIT"
] | null | null | null |
StoryMap/apps/users/admin.py
|
LittleBai0606/StoryMap
|
1169e763031f92ee6da65829fa9cbce1ebd991ad
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import UserProfile
# Register your models here.
admin.site.register(UserProfile, UserProfileAdmin)
| 24.214286
| 92
| 0.722714
|
from django.contrib import admin
from .models import UserProfile
# Register your models here.
class UserProfileAdmin(admin.ModelAdmin):
fields = ['username', 'password', 'email', 'nickname', 'gender', 'birthday', 'avatar', ]
list_display = ['username', 'email', 'nickname']
admin.site.register(UserProfile, UserProfileAdmin)
| 0
| 168
| 23
|
df7a5da0ce6a0c9361b27bfdc4e070c0fb9fba67
| 2,013
|
py
|
Python
|
turn_classification/turn_classification.py
|
team8/outdoor-blind-navigation
|
78b99fe1a193161c4b809990c71940fbed719785
|
[
"MIT"
] | 6
|
2021-05-18T16:46:04.000Z
|
2022-01-08T02:49:27.000Z
|
turn_classification/turn_classification.py
|
aoberai/outdoor-blind-navigation
|
78b99fe1a193161c4b809990c71940fbed719785
|
[
"MIT"
] | 15
|
2021-05-19T23:40:50.000Z
|
2021-07-06T08:24:40.000Z
|
turn_classification/turn_classification.py
|
aoberai/outdoor-blind-navigation
|
78b99fe1a193161c4b809990c71940fbed719785
|
[
"MIT"
] | 4
|
2021-05-26T02:10:43.000Z
|
2021-09-04T17:59:51.000Z
|
import cv2
import threading
import tensorflow as tf
import numpy as np
import time
import capturer
from utils.circularBuffer import CircularBuffer
labels = ['Left Turn', 'No Turn', 'Right Turn']
model_path = "./turn_classification/turn_classification_model_final_v1.h5"
readings_buffer_size = 20
image_preprocessing_dimens = (100, 100)
detection_threshold = 0.5
| 38.711538
| 141
| 0.694983
|
import cv2
import threading
import tensorflow as tf
import numpy as np
import time
import capturer
from utils.circularBuffer import CircularBuffer
labels = ['Left Turn', 'No Turn', 'Right Turn']
model_path = "./turn_classification/turn_classification_model_final_v1.h5"
readings_buffer_size = 20
image_preprocessing_dimens = (100, 100)
detection_threshold = 0.5
class TurnClassification:
def __init__(self):
self.model = tf.keras.models.load_model(model_path)
self.readings_buffer = CircularBuffer(readings_buffer_size, noneOverridePercent=0.5)
self.images_queue = CircularBuffer(1)
self.classifier_queue = CircularBuffer(1)
threading.Thread(target=self.classification_starter).start()
def capture_processing(self):
while True:
try:
frame = capturer.get_images().get_last()
if frame is not None:
preprocessed_frame = cv2.resize(frame, image_preprocessing_dimens, interpolation=cv2.INTER_LINEAR)
self.images_queue.add(np.expand_dims(preprocessed_frame, 0))
except Exception as e:
print("Capturing Not Working", e)
def classification_starter(self):
threading.Thread(target=self.capture_processing).start()
while True:
try:
self.perform_inference(self.images_queue.get_last())
except Exception as e:
print("Classification Not Working", e)
def perform_inference(self, image):
feedforward_result = self.model.predict(image).tolist()[0]
self.readings_buffer.add(None if feedforward_result == None or max(feedforward_result) < detection_threshold else feedforward_result)
averaged_result = self.readings_buffer.mean()
# print(averaged_result)
self.classifier_queue.add("No Turn" if averaged_result is None else labels[np.argmax(averaged_result)])
def get_inference(self):
return self.classifier_queue.get_last()
| 1,487
| 4
| 158
|
8a3a89d4511059d142d7dd9786e728c20d3522fe
| 8,071
|
py
|
Python
|
ardana_service/admin.py
|
ArdanaCLM/ardana-service
|
680099101de36fce893ef6d138b75d778f92f182
|
[
"Apache-2.0"
] | 1
|
2017-07-20T01:04:21.000Z
|
2017-07-20T01:04:21.000Z
|
ardana_service/admin.py
|
GarySmith/ardana-service
|
680099101de36fce893ef6d138b75d778f92f182
|
[
"Apache-2.0"
] | null | null | null |
ardana_service/admin.py
|
GarySmith/ardana-service
|
680099101de36fce893ef6d138b75d778f92f182
|
[
"Apache-2.0"
] | 2
|
2017-08-02T17:18:47.000Z
|
2019-10-11T23:48:58.000Z
|
# (c) Copyright 2017-2019 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import abort
from flask import Blueprint
from flask import jsonify
from flask import request
from keystoneauth1 import exceptions as exc
from keystoneauth1 import session as ks_session
from keystoneclient.auth.identity import v3
from keystoneclient.v3 import client as ks_client
import logging
import os
from oslo_config import cfg
import pbr.version
import pwd
import threading
import time
from .util import ping
from . import config
from . import policy
bp = Blueprint('admin', __name__)
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
USER_AGENT = 'Installer UI'
@bp.route("/api/v2/version")
def version():
"""Returns the version of the service
.. :quickref: Admin; Returns the version of the service
**Example valid response**:
.. sourcecode:: http
HTTP/1.1 200 OK
0.0.1.dev16
"""
version_info = pbr.version.VersionInfo('ardana-service')
return version_info.version_string_with_vcs()
@bp.route("/api/v2/heartbeat")
def heartbeat():
"""Returns the epoch time
Simple API to verify that the service is up and responding. Returns
the number of seconds since 1970-01-01 00:00:00 GMT.
.. :quickref: Admin; Returns the epoch time
**Example valid response**:
.. sourcecode:: http
HTTP/1.1 200 OK
1502745650
"""
return jsonify(int(time.time()))
@bp.route("/api/v2/user")
@policy.enforce('lifecycle:get_user')
def user():
"""Returns the username the service is running under
.. :quickref: Admin; Returns the username the service is running under
**Example valid response**:
.. sourcecode:: http
HTTP/1.1 200 OK
{"username": "myusername"}
"""
user_dict = {'username': pwd.getpwuid(os.getuid()).pw_name}
return jsonify(user_dict)
@bp.route("/api/v2/restart", methods=['POST'])
@policy.enforce('lifecycle:restart')
def restart():
"""Requests the service to restart after a specified delay, in seconds
.. :quickref: Admin; Requests a service restart after a delay
**Example Request**:
.. sourcecode:: http
POST /api/v2/user HTTP/1.1
Content-Type: application/json
{
"delay": 60
}
"""
info = request.get_json() or {}
delay_secs = int(info.get('delay', 0))
t = threading.Timer(delay_secs, update_trigger_file)
t.start()
return jsonify('Success')
@bp.route("/api/v2/login", methods=['POST'])
def login():
"""Authenticates with keystone and returns a token
.. :quickref: Admin; Authenticates with keystone
**Example Request**:
.. sourcecode:: http
POST /api/v2/login HTTP/1.1
Content-Type: application/json
{
"username": "admin",
"password": "secret"
}
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"token": "gAAAAABbEaruZDQGIH5KmKWHlDZIw7CLq",
"expires": "2018-06-01T21:22:06+00:00"
}
:status 200: successful authentication
:status 401: invalid credentials
:status 403: authentication not permitted, or user not authorized for any
projects
"""
if not config.requires_auth():
abort(403,
"authentication not permitted since service is in insecure mode")
info = request.get_json() or {}
username = info.get('username')
password = info.get('password')
user_domain_name = info.get('user_domain_name', 'Default')
token = _authenticate(CONF.keystone_authtoken.auth_url,
username,
password,
user_domain_name)
return jsonify(token)
def _authenticate(auth_url, username=None, password=None,
user_domain_name='Default'):
"""Authenticate with keystone
Creates an unscoped token using the given credentials (which validates
them), and then uses that token to get a project-scoped token.
"""
unscoped_auth = v3.Password(auth_url,
username=username,
password=password,
user_domain_name=user_domain_name,
unscoped=True)
session = ks_session.Session(user_agent=USER_AGENT,
verify=not CONF.keystone_authtoken.insecure)
try:
# Trigger keystone to verify the credentials
unscoped_auth_ref = unscoped_auth.get_access(session)
except exc.connection.ConnectFailure as e:
abort(503, str(e))
except exc.http.HttpError as e:
abort(e.http_status, e.message)
except exc.ClientException as e:
abort(401, str(e))
except Exception as e:
LOG.exception(e)
abort(500, "Unable to authenticate")
client = ks_client.Client(session=session,
auth=unscoped_auth,
user_agent=USER_AGENT)
auth_url = unscoped_auth.auth_url
projects = client.projects.list(user=unscoped_auth_ref.user_id)
# Filter out disabled projects
projects = [project for project in projects if project.enabled]
# Prioritize the admin project by putting it at the beginning of the list
for pos, project in enumerate(projects):
if project.name == 'admin':
projects.pop(pos)
projects.insert(0, project)
break
# Return the first project token that we have the admin role on, otherwise
# return the first project token we have any role on.
fallback_auth_ref = None
for project in projects:
auth = v3.Token(auth_url=auth_url,
token=unscoped_auth_ref.auth_token,
project_id=project.id,
reauthenticate=False)
try:
auth_ref = auth.get_access(session)
if 'admin' in auth_ref.role_names:
return {'token': auth_ref.auth_token,
'expires': auth_ref.expires.isoformat()}
elif not fallback_auth_ref:
fallback_auth_ref = auth_ref
except Exception as e:
pass
if fallback_auth_ref:
return {'token': fallback_auth_ref.auth_token,
'expires': fallback_auth_ref.expires.isoformat()}
# TODO(gary): Consider as a secondary fallback to return a domain-scoped
# token
abort(403, "Not authorized for any project")
@bp.route("/api/v2/is_secured")
def get_secured():
"""Returns whether authentication is required
Returns a json object indicating whether the service is configured to
enforce authentication
.. :quickref: Model; Returns whether authentication is required
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"isSecured": false
}
:status 200: success
"""
return jsonify({'isSecured': config.requires_auth()})
@bp.route("/api/v2/connection_test", methods=['POST'])
| 26.813953
| 79
| 0.638087
|
# (c) Copyright 2017-2019 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import abort
from flask import Blueprint
from flask import jsonify
from flask import request
from keystoneauth1 import exceptions as exc
from keystoneauth1 import session as ks_session
from keystoneclient.auth.identity import v3
from keystoneclient.v3 import client as ks_client
import logging
import os
from oslo_config import cfg
import pbr.version
import pwd
import threading
import time
from .util import ping
from . import config
from . import policy
bp = Blueprint('admin', __name__)
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
USER_AGENT = 'Installer UI'
@bp.route("/api/v2/version")
def version():
"""Returns the version of the service
.. :quickref: Admin; Returns the version of the service
**Example valid response**:
.. sourcecode:: http
HTTP/1.1 200 OK
0.0.1.dev16
"""
version_info = pbr.version.VersionInfo('ardana-service')
return version_info.version_string_with_vcs()
@bp.route("/api/v2/heartbeat")
def heartbeat():
"""Returns the epoch time
Simple API to verify that the service is up and responding. Returns
the number of seconds since 1970-01-01 00:00:00 GMT.
.. :quickref: Admin; Returns the epoch time
**Example valid response**:
.. sourcecode:: http
HTTP/1.1 200 OK
1502745650
"""
return jsonify(int(time.time()))
@bp.route("/api/v2/user")
@policy.enforce('lifecycle:get_user')
def user():
"""Returns the username the service is running under
.. :quickref: Admin; Returns the username the service is running under
**Example valid response**:
.. sourcecode:: http
HTTP/1.1 200 OK
{"username": "myusername"}
"""
user_dict = {'username': pwd.getpwuid(os.getuid()).pw_name}
return jsonify(user_dict)
def update_trigger_file():
trigger_file = os.path.join(CONF.paths.log_dir, 'trigger.txt')
with open(trigger_file, 'w') as f:
f.write("Triggered restart at %s\n" % time.asctime())
@bp.route("/api/v2/restart", methods=['POST'])
@policy.enforce('lifecycle:restart')
def restart():
"""Requests the service to restart after a specified delay, in seconds
.. :quickref: Admin; Requests a service restart after a delay
**Example Request**:
.. sourcecode:: http
POST /api/v2/user HTTP/1.1
Content-Type: application/json
{
"delay": 60
}
"""
info = request.get_json() or {}
delay_secs = int(info.get('delay', 0))
t = threading.Timer(delay_secs, update_trigger_file)
t.start()
return jsonify('Success')
@bp.route("/api/v2/login", methods=['POST'])
def login():
"""Authenticates with keystone and returns a token
.. :quickref: Admin; Authenticates with keystone
**Example Request**:
.. sourcecode:: http
POST /api/v2/login HTTP/1.1
Content-Type: application/json
{
"username": "admin",
"password": "secret"
}
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"token": "gAAAAABbEaruZDQGIH5KmKWHlDZIw7CLq",
"expires": "2018-06-01T21:22:06+00:00"
}
:status 200: successful authentication
:status 401: invalid credentials
:status 403: authentication not permitted, or user not authorized for any
projects
"""
if not config.requires_auth():
abort(403,
"authentication not permitted since service is in insecure mode")
info = request.get_json() or {}
username = info.get('username')
password = info.get('password')
user_domain_name = info.get('user_domain_name', 'Default')
token = _authenticate(CONF.keystone_authtoken.auth_url,
username,
password,
user_domain_name)
return jsonify(token)
def _authenticate(auth_url, username=None, password=None,
user_domain_name='Default'):
"""Authenticate with keystone
Creates an unscoped token using the given credentials (which validates
them), and then uses that token to get a project-scoped token.
"""
unscoped_auth = v3.Password(auth_url,
username=username,
password=password,
user_domain_name=user_domain_name,
unscoped=True)
session = ks_session.Session(user_agent=USER_AGENT,
verify=not CONF.keystone_authtoken.insecure)
try:
# Trigger keystone to verify the credentials
unscoped_auth_ref = unscoped_auth.get_access(session)
except exc.connection.ConnectFailure as e:
abort(503, str(e))
except exc.http.HttpError as e:
abort(e.http_status, e.message)
except exc.ClientException as e:
abort(401, str(e))
except Exception as e:
LOG.exception(e)
abort(500, "Unable to authenticate")
client = ks_client.Client(session=session,
auth=unscoped_auth,
user_agent=USER_AGENT)
auth_url = unscoped_auth.auth_url
projects = client.projects.list(user=unscoped_auth_ref.user_id)
# Filter out disabled projects
projects = [project for project in projects if project.enabled]
# Prioritize the admin project by putting it at the beginning of the list
for pos, project in enumerate(projects):
if project.name == 'admin':
projects.pop(pos)
projects.insert(0, project)
break
# Return the first project token that we have the admin role on, otherwise
# return the first project token we have any role on.
fallback_auth_ref = None
for project in projects:
auth = v3.Token(auth_url=auth_url,
token=unscoped_auth_ref.auth_token,
project_id=project.id,
reauthenticate=False)
try:
auth_ref = auth.get_access(session)
if 'admin' in auth_ref.role_names:
return {'token': auth_ref.auth_token,
'expires': auth_ref.expires.isoformat()}
elif not fallback_auth_ref:
fallback_auth_ref = auth_ref
except Exception as e:
pass
if fallback_auth_ref:
return {'token': fallback_auth_ref.auth_token,
'expires': fallback_auth_ref.expires.isoformat()}
# TODO(gary): Consider as a secondary fallback to return a domain-scoped
# token
abort(403, "Not authorized for any project")
@bp.route("/api/v2/is_secured")
def get_secured():
"""Returns whether authentication is required
Returns a json object indicating whether the service is configured to
enforce authentication
.. :quickref: Model; Returns whether authentication is required
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"isSecured": false
}
:status 200: success
"""
return jsonify({'isSecured': config.requires_auth()})
@bp.route("/api/v2/connection_test", methods=['POST'])
def connection_test():
body = request.get_json() or {}
host = body['host']
try:
ping(host, 22)
return jsonify('Success')
except Exception as e:
return jsonify(error=str(e)), 404
| 369
| 0
| 45
|
3a3491b43a5a2b9285bd18407b12c00d9f4f6e46
| 8,549
|
py
|
Python
|
src/generate.py
|
josephnavarro/ascii-art-generator
|
9317aaff92b58ab68a9598fdb89c95699bea4927
|
[
"MIT"
] | null | null | null |
src/generate.py
|
josephnavarro/ascii-art-generator
|
9317aaff92b58ab68a9598fdb89c95699bea4927
|
[
"MIT"
] | null | null | null |
src/generate.py
|
josephnavarro/ascii-art-generator
|
9317aaff92b58ab68a9598fdb89c95699bea4927
|
[
"MIT"
] | null | null | null |
#! usr/bin/env python3
"""
:
: Utility for generating ASCII art given an input image.
:
:
"""
from src.ascii import *
from src.luminosity import *
UPPER = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
LOWER = "abcdefghijklmnopqrstuvwxyz"
DIGIT = "0123456789"
SYMBOL = "~!@#$%^&*()_+`-=[]{}|\\:;\"'<>?,./"
#CHARACTERS = UPPER + LOWER + DIGIT + SYMBOL
CHARACTERS = SYMBOL
ITER_WIDTH = 4
ITER_HEIGHT = 2
FONT_SIZE = 99
INVERT = True
def fn_GetNumValues(d_Dict: dict) -> int:
"""
:
: Gets the total number of unique values from the given mapping.
:
:
: Args:
: dict d_Luminosity :
:
: Returns:
: Number of unique values
:
:
"""
return len(list(set(d_Dict.values())))
def fn_MapRange(l_List: iter, i_Range: int) -> dict:
"""
:
: Evenly assigns a numerical value within a given range to each element of a list.
:
:
: Args:
: iter l_List : List of items that will be mapped as values in the output dictionary
: int i_Range : Maximum value for the range of numbers that will be mapped as keys in the output
:
: Returns:
: Dictionary containing the above mapping
:
:
"""
# Set up local containers
# ...
i_LenList = len(l_List) # type: int
i_M = 0 # type: int
f_Step = i_LenList / i_Range # type: float
f_N = 0 # type: float
d_Output = {} # type: dict
# Step through list and populate output dictionary evenly
# ...
while f_N < i_LenList:
try:
i_Index = int(round(f_N))
o_Item = l_List[i_Index]
d_Output[i_M] = o_Item
f_N += f_Step
i_M += 1
except IndexError:
break
return d_Output
def fn_CrossChain(d_Dict1: dict, d_Dict2: dict, b_Default: bool = False) -> dict:
"""
:
: Maps keys in the first dictionary to values in the second dictionary. (In other words, the first dictionary's
: keys will be mapped to the second dictionary's values, for each value in the first that's a key in the second).
:
: In case of hash misses in the second dictionary, supplying a "default" value may be enabled. If enabled, the
: default value will be taken as the "last" element of the second dictionary (that is, as though its entries
: were sorted in order of its keys).
:
:
: Args:
: dict d_Dict1 : Dictionary whose keys are to become keys in the output dictionary
: dict d_Dict2 : Dictionary whose values are to become values in the output dictionary
: bool b_Default : Whether to use a fallback value in place of "missing" values (default False)
:
: Returns:
: Dictionary containing keys from the first dictionary mapped to values from the second dictionary
:
:
"""
d_Output = {}
# Supply default value if needed
# ...
if b_Default:
v_Default = fn_SortByKey(d_Dict2)[-1]
else:
v_Default = None
# Remap keys(1) to values(2)
# ...
for k1, v1 in d_Dict1.items():
if v1 in d_Dict2:
d_Output[k1] = d_Dict2[v1]
elif b_Default:
d_Output[k1] = v_Default
return d_Output
def fn_ParallelChain(d_Dict1: dict, d_Dict2: dict, b_Default: bool = False) -> dict:
"""
:
: Maps values in the first dictionary to values in the second dictionary. (In other words, the first dictionary's
: values will be mapped to the second dictionary's values, for each key in the first that's also a key in the
: second).
:
: In case of hash misses in the second dictionary, supplying a "default" value may be enabled. If enabled, the
: default value will be taken as the "last" element of the second dictionary (that is, as though its entries were
: sorted in order of its keys).
:
:
: Args:
: dict d_Dict1 : Dictionary whose values are to become keys in the output dictionary
: dict d_Dict2 : Dictionary whose values are to become values in the output dictionary
: bool b_Default : Whether to use a fallback value in place of "missing" values (default False)
:
: Returns:
: Dictionary containing values from the first dictionary mapped to values from the second dictionary
:
:
"""
d_Output = {}
# Supply default value if needed
# ...
if b_Default:
v_Default = fn_SortByKey(d_Dict2)[-1]
else:
v_Default = None
# Remap values(1) to values(2)
# ...
for k1, v1 in d_Dict1.items():
if k1 in d_Dict2:
d_Output[v1] = d_Dict2[k1]
elif b_Default:
d_Output[v1] = v_Default
return d_Output
def fn_SortByKey(d_Dict: dict) -> list:
"""
:
: Returns a list of values in a dictionary in order of their keys.
:
:
: Args:
: dict d_Dict : An unsigned integer!
:
: Returns:
: List of values sorted by key
:
:
"""
return [y[1] for y in sorted([(k, v) for k, v in d_Dict.items()], key=lambda x: x[0])]
def fn_GenerateAscii(d_CoordToChar: dict) -> str:
"""
:
: Generates an ASCII image string given characters mapped to (relative) rendering coordinates.
:
:
: Args:
: dict d_CoordToChar : Mapping from 2-tuple coordinates to string characters
:
: Returns:
: ASCII image (or any other encoding really), separated by newlines
:
:
"""
d_Ascii = {} # type: dict
s_Output = '' # type: str
# Map images to x-y coordinates, splitting x- and y- into their own sub-dictionaries
# ...
for i2_Coord, s_Char in d_CoordToChar.items():
i_X, i_Y = i2_Coord
if i_X not in d_Ascii:
d_Ascii[i_X] = {}
if i_Y not in d_Ascii[i_X]:
d_Ascii[i_X][i_Y] = s_Char
# Sort the entries for each "row" of ASCII characters in the dictionary, then join them and concatenate the
# resulting string
# ...
for i_X, d_X in d_Ascii.items():
s_Output += ''.join(fn_SortByKey(d_X))
s_Output += '\n'
return s_Output
def fn_ProcessImage(
s_FontFilename: str,
s_ImageFilename: str,
s_Output: str = '',
s_CharacterSet: str = CHARACTERS,
b_Invert: bool = INVERT,
i_Size: int = FONT_SIZE,
i_IterWidth: int = ITER_WIDTH,
i_IterHeight: int = ITER_HEIGHT,
):
"""
:
: Loads an image and converts it to ASCII art, then prints it out.
:
:
"""
# Load font and sort glyphs in order of luminosity
# ...
o_FreetypeFace = fn_LoadFont(s_FontFilename, i_Size) # type: freetype.Face
s_Characters = fn_SortGlyphs(o_FreetypeFace, s_CharacterSet, b_Invert) # type: str
# Load image and profile it for luminosity
# ...
o_Image = fn_LoadImage(s_ImageFilename) # type: Image
d_CoordToImage = fn_Iterate2D(o_Image, i_IterWidth, i_IterHeight) # type: dict
d_CoordToLum = fn_MapLuminosity2D(d_CoordToImage) # type: dict
i_NumLuminosity = fn_GetNumValues(d_CoordToLum) # type: int
l_Luminosity = list(set(d_CoordToLum.values())) # type: list
# Relay a series of associative mappings, ending with characters mapped under relative coordinates
# ...
d_CharRange = fn_MapRange(s_Characters, i_NumLuminosity) # type: dict
d_LumRange = fn_MapRange(l_Luminosity, i_NumLuminosity) # type: dict
d_LumToChar = fn_ParallelChain(d_LumRange, d_CharRange) # type: dict
d_ImageToLum = fn_ParallelChain(d_CoordToImage, d_CoordToLum) # type: dict
d_ImageToChar = fn_CrossChain(d_ImageToLum, d_LumToChar, True) # type: dict
d_CoordToChar = fn_CrossChain(d_CoordToImage, d_ImageToChar) # type: dict
# Generate ASCII image using coordinates -> characters
# ...
s_Out = fn_GenerateAscii(d_CoordToChar)
# If output filename specified, save file there; otherwise, just print it out.
# ...
if s_Output:
with open(s_Output, 'w') as f:
f.write(s_Out)
print("Wrote to output file: {0}".format(s_Output))
else:
print()
print(s_Out)
if __name__ == "__main__":
fn_ProcessImage("../res/arial.ttf", "../res/nagatoro.png")
| 30.208481
| 118
| 0.601474
|
#! usr/bin/env python3
"""
:
: Utility for generating ASCII art given an input image.
:
:
"""
from src.ascii import *
from src.luminosity import *
UPPER = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
LOWER = "abcdefghijklmnopqrstuvwxyz"
DIGIT = "0123456789"
SYMBOL = "~!@#$%^&*()_+`-=[]{}|\\:;\"'<>?,./"
#CHARACTERS = UPPER + LOWER + DIGIT + SYMBOL
CHARACTERS = SYMBOL
ITER_WIDTH = 4
ITER_HEIGHT = 2
FONT_SIZE = 99
INVERT = True
def fn_GetNumValues(d_Dict: dict) -> int:
"""
:
: Gets the total number of unique values from the given mapping.
:
:
: Args:
: dict d_Luminosity :
:
: Returns:
: Number of unique values
:
:
"""
return len(list(set(d_Dict.values())))
def fn_MapRange(l_List: iter, i_Range: int) -> dict:
"""
:
: Evenly assigns a numerical value within a given range to each element of a list.
:
:
: Args:
: iter l_List : List of items that will be mapped as values in the output dictionary
: int i_Range : Maximum value for the range of numbers that will be mapped as keys in the output
:
: Returns:
: Dictionary containing the above mapping
:
:
"""
# Set up local containers
# ...
i_LenList = len(l_List) # type: int
i_M = 0 # type: int
f_Step = i_LenList / i_Range # type: float
f_N = 0 # type: float
d_Output = {} # type: dict
# Step through list and populate output dictionary evenly
# ...
while f_N < i_LenList:
try:
i_Index = int(round(f_N))
o_Item = l_List[i_Index]
d_Output[i_M] = o_Item
f_N += f_Step
i_M += 1
except IndexError:
break
return d_Output
def fn_CrossChain(d_Dict1: dict, d_Dict2: dict, b_Default: bool = False) -> dict:
"""
:
: Maps keys in the first dictionary to values in the second dictionary. (In other words, the first dictionary's
: keys will be mapped to the second dictionary's values, for each value in the first that's a key in the second).
:
: In case of hash misses in the second dictionary, supplying a "default" value may be enabled. If enabled, the
: default value will be taken as the "last" element of the second dictionary (that is, as though its entries
: were sorted in order of its keys).
:
:
: Args:
: dict d_Dict1 : Dictionary whose keys are to become keys in the output dictionary
: dict d_Dict2 : Dictionary whose values are to become values in the output dictionary
: bool b_Default : Whether to use a fallback value in place of "missing" values (default False)
:
: Returns:
: Dictionary containing keys from the first dictionary mapped to values from the second dictionary
:
:
"""
d_Output = {}
# Supply default value if needed
# ...
if b_Default:
v_Default = fn_SortByKey(d_Dict2)[-1]
else:
v_Default = None
# Remap keys(1) to values(2)
# ...
for k1, v1 in d_Dict1.items():
if v1 in d_Dict2:
d_Output[k1] = d_Dict2[v1]
elif b_Default:
d_Output[k1] = v_Default
return d_Output
def fn_ParallelChain(d_Dict1: dict, d_Dict2: dict, b_Default: bool = False) -> dict:
"""
:
: Maps values in the first dictionary to values in the second dictionary. (In other words, the first dictionary's
: values will be mapped to the second dictionary's values, for each key in the first that's also a key in the
: second).
:
: In case of hash misses in the second dictionary, supplying a "default" value may be enabled. If enabled, the
: default value will be taken as the "last" element of the second dictionary (that is, as though its entries were
: sorted in order of its keys).
:
:
: Args:
: dict d_Dict1 : Dictionary whose values are to become keys in the output dictionary
: dict d_Dict2 : Dictionary whose values are to become values in the output dictionary
: bool b_Default : Whether to use a fallback value in place of "missing" values (default False)
:
: Returns:
: Dictionary containing values from the first dictionary mapped to values from the second dictionary
:
:
"""
d_Output = {}
# Supply default value if needed
# ...
if b_Default:
v_Default = fn_SortByKey(d_Dict2)[-1]
else:
v_Default = None
# Remap values(1) to values(2)
# ...
for k1, v1 in d_Dict1.items():
if k1 in d_Dict2:
d_Output[v1] = d_Dict2[k1]
elif b_Default:
d_Output[v1] = v_Default
return d_Output
def fn_SortByKey(d_Dict: dict) -> list:
"""
:
: Returns a list of values in a dictionary in order of their keys.
:
:
: Args:
: dict d_Dict : An unsigned integer!
:
: Returns:
: List of values sorted by key
:
:
"""
return [y[1] for y in sorted([(k, v) for k, v in d_Dict.items()], key=lambda x: x[0])]
def fn_GenerateAscii(d_CoordToChar: dict) -> str:
"""
:
: Generates an ASCII image string given characters mapped to (relative) rendering coordinates.
:
:
: Args:
: dict d_CoordToChar : Mapping from 2-tuple coordinates to string characters
:
: Returns:
: ASCII image (or any other encoding really), separated by newlines
:
:
"""
d_Ascii = {} # type: dict
s_Output = '' # type: str
# Map images to x-y coordinates, splitting x- and y- into their own sub-dictionaries
# ...
for i2_Coord, s_Char in d_CoordToChar.items():
i_X, i_Y = i2_Coord
if i_X not in d_Ascii:
d_Ascii[i_X] = {}
if i_Y not in d_Ascii[i_X]:
d_Ascii[i_X][i_Y] = s_Char
# Sort the entries for each "row" of ASCII characters in the dictionary, then join them and concatenate the
# resulting string
# ...
for i_X, d_X in d_Ascii.items():
s_Output += ''.join(fn_SortByKey(d_X))
s_Output += '\n'
return s_Output
def fn_ProcessImage(
s_FontFilename: str,
s_ImageFilename: str,
s_Output: str = '',
s_CharacterSet: str = CHARACTERS,
b_Invert: bool = INVERT,
i_Size: int = FONT_SIZE,
i_IterWidth: int = ITER_WIDTH,
i_IterHeight: int = ITER_HEIGHT,
):
"""
:
: Loads an image and converts it to ASCII art, then prints it out.
:
:
"""
# Load font and sort glyphs in order of luminosity
# ...
o_FreetypeFace = fn_LoadFont(s_FontFilename, i_Size) # type: freetype.Face
s_Characters = fn_SortGlyphs(o_FreetypeFace, s_CharacterSet, b_Invert) # type: str
# Load image and profile it for luminosity
# ...
o_Image = fn_LoadImage(s_ImageFilename) # type: Image
d_CoordToImage = fn_Iterate2D(o_Image, i_IterWidth, i_IterHeight) # type: dict
d_CoordToLum = fn_MapLuminosity2D(d_CoordToImage) # type: dict
i_NumLuminosity = fn_GetNumValues(d_CoordToLum) # type: int
l_Luminosity = list(set(d_CoordToLum.values())) # type: list
# Relay a series of associative mappings, ending with characters mapped under relative coordinates
# ...
d_CharRange = fn_MapRange(s_Characters, i_NumLuminosity) # type: dict
d_LumRange = fn_MapRange(l_Luminosity, i_NumLuminosity) # type: dict
d_LumToChar = fn_ParallelChain(d_LumRange, d_CharRange) # type: dict
d_ImageToLum = fn_ParallelChain(d_CoordToImage, d_CoordToLum) # type: dict
d_ImageToChar = fn_CrossChain(d_ImageToLum, d_LumToChar, True) # type: dict
d_CoordToChar = fn_CrossChain(d_CoordToImage, d_ImageToChar) # type: dict
# Generate ASCII image using coordinates -> characters
# ...
s_Out = fn_GenerateAscii(d_CoordToChar)
# If output filename specified, save file there; otherwise, just print it out.
# ...
if s_Output:
with open(s_Output, 'w') as f:
f.write(s_Out)
print("Wrote to output file: {0}".format(s_Output))
else:
print()
print(s_Out)
if __name__ == "__main__":
fn_ProcessImage("../res/arial.ttf", "../res/nagatoro.png")
| 0
| 0
| 0
|
8feb772e81d2e7de2f57a541d565f8376b286c08
| 11,234
|
py
|
Python
|
app/admin.py
|
sedrof/not-phs
|
8f4031e220a7690c5304cebe310e3ef14d8e7424
|
[
"MIT"
] | null | null | null |
app/admin.py
|
sedrof/not-phs
|
8f4031e220a7690c5304cebe310e3ef14d8e7424
|
[
"MIT"
] | null | null | null |
app/admin.py
|
sedrof/not-phs
|
8f4031e220a7690c5304cebe310e3ef14d8e7424
|
[
"MIT"
] | null | null | null |
from import_export.admin import ImportExportModelAdmin, ExportMixin
from django.contrib import admin
from master_data.models import *
from centrelink.models import *
from calculator.models import *
from imports.models import *
from django.utils.safestring import mark_safe
from json2html import *
from django.contrib.auth.models import Group, User
from django import forms
from django.contrib.admin.widgets import FilteredSelectMultiple
from import_export import resources
import nested_admin
admin.site.unregister(Group)
# Register the new Group ModelAdmin.
admin.site.register(Group, GroupAdmin)
admin.site.site_header = 'CRA Calculator'
admin.site.index_title = 'CRA Calculator'
admin.site.site_title = 'CRA Calculator'
admin.site.register(Transaction, TransactionAdmin)
@ admin.register(
FamilySituation,
MaintenanceType,
Relationship,
)
@ admin.register(RentAssessmentRate)
@ admin.register(CraRate)
@ admin.register(FtbRate)
@ admin.register(MaintenanceIncomeTestRate)
@ admin.register(
FamilySituationRate,
FtbAMaximumPayment,
MaintenanceTypeRate,
)
@ admin.register(Batch)
# from django_otp import OTP_HOTP
# admin.site.unregister(OTP_HOTP)
| 32.847953
| 129
| 0.60673
|
from import_export.admin import ImportExportModelAdmin, ExportMixin
from django.contrib import admin
from master_data.models import *
from centrelink.models import *
from calculator.models import *
from imports.models import *
from django.utils.safestring import mark_safe
from json2html import *
from django.contrib.auth.models import Group, User
from django import forms
from django.contrib.admin.widgets import FilteredSelectMultiple
from import_export import resources
import nested_admin
class GroupAdminForm(forms.ModelForm):
class Meta:
model = Group
exclude = []
# Add the users field.
users = forms.ModelMultipleChoiceField(
queryset=User.objects.all(),
required=False,
widget=FilteredSelectMultiple('users', False)
)
def __init__(self, *args, **kwargs):
# Do the normal form initialisation.
super(GroupAdminForm, self).__init__(*args, **kwargs)
# If it is an existing group (saved objects have a pk).
if self.instance.pk:
# Populate the users field with the current Group users.
self.fields['users'].initial = self.instance.user_set.all()
def save_m2m(self):
# Add the users to the Group.
self.instance.user_set.set(self.cleaned_data['users'])
def save(self, *args, **kwargs):
# Default save
instance = super(GroupAdminForm, self).save()
# Save many-to-many data
self.save_m2m()
return instance
admin.site.unregister(Group)
class GroupAdmin(admin.ModelAdmin):
# Use our custom form.
form = GroupAdminForm
# Filter permissions horizontal as well.
filter_horizontal = ['permissions']
# Register the new Group ModelAdmin.
admin.site.register(Group, GroupAdmin)
admin.site.site_header = 'CRA Calculator'
admin.site.index_title = 'CRA Calculator'
admin.site.site_title = 'CRA Calculator'
class FamilyMemberInline(nested_admin.NestedTabularInline):
model = FamilyMember
extra = 0
max_num = 5
def sufficient_info_provided(self, obj):
return obj.sufficient_information_provided
sufficient_info_provided.boolean = True
def age(self, obj):
return obj.age
fieldsets = (
('FamilyMember Details', {
'fields': ('name', 'date_of_birth', 'age', 'income',
'relationship', 'rent_percentage', 'care_percentage', 'sufficient_info_provided'),
}),)
readonly_fields = ['sufficient_info_provided', 'age' ]
class FamilyGroupInline(nested_admin.NestedTabularInline):
model = FamilyGroup
inlines = [FamilyMemberInline,]
extra = 0
def sufficient_info_provided(self, obj):
return obj.sufficient_information_provided
sufficient_info_provided.boolean = True
fieldsets = (
('FamilyGroup Details', {
'fields': ('name', 'family_type', 'last_rent', 'any_income_support_payment',
'cra_eligibilty', 'cra_amount', 'ftb_a', 'ftb_b', 'maintenance_amount',
'maintenance_type', 'number_of_additional_children', 'sufficient_info_provided'),
}),)
readonly_fields = ['sufficient_info_provided' ]
class TransactionAdmin(nested_admin.SortableHiddenMixin, nested_admin.NestedModelAdmin):
inlines = [FamilyGroupInline,]
def save_model(self, request, obj, form, change):
obj.user = request.user
super().save_model(request, obj, form, change)
def get_queryset(self, request):
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
# return qs.filter(user__groups__user=request.user)
return qs.filter(user__groups=request.user.groups.first())
def complete(self, obj):
return obj.complete
complete.boolean = True
def last_rent(self, obj):
if obj.complete:
return obj.last_rent
return None
def household_rent(self, obj):
if obj.complete:
return "%.2f" % float(obj.household_rent)
return None
def report(self, obj):
if obj.complete:
return mark_safe(json2html.convert(json=obj.report, table_attributes="class=\"results\" style=\"overflow-x:auto;\""))
# return 0
def export_csv(modeladmin, request, queryset):
import csv
from django.utils.encoding import smart_str
from django.http import HttpResponse
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=Report.csv'
writer = csv.writer(response, csv.excel)
response.write(u'\ufeff'.encode('utf8'))
writer.writerow([
smart_str(u"chp_referene"),
smart_str(u"Name"),
smart_str(u"contact_id"),
smart_str(u"date_of_birth"),
smart_str(u"age"),
smart_str(u"Relationship"),
smart_str(u"Income"),
smart_str(u"property_market_rent"),
smart_str(u"FG_Name"),
smart_str(u"last_rent"),
smart_str(u"Family_type"),
smart_str(u"any_income_support_payment"),
smart_str(u"cra_eligibilty"),
smart_str(u"cra_amount"),
smart_str(u"ftb_a"),
smart_str(u"ftb_b"),
smart_str(u"maintenance_amount"),
smart_str(u"maintenance_type"),
smart_str(u"maintenance_amount"),
smart_str(u"number_of_additional_children"),
smart_str(u"rent_effective_date"),
smart_str(u"income_period"),
smart_str(u"number_of_family_group"),
smart_str(u"cruser"),
smart_str(u"prop_id"),
smart_str(u"state"),
smart_str(u"CRA"),
smart_str(u"household_rent"),
])
for obj in queryset:
for fg in obj.family_groups.all():
for fm in fg.family_members.all():
if obj.complete:
writer.writerow([
smart_str(obj.chp_reference),
smart_str(fm.name),
smart_str(fm.contact_id),
smart_str(fm.date_of_birth),
smart_str(fm.age),
smart_str(fm.relationship),
smart_str(fm.income),
smart_str(obj.property_market_rent),
smart_str(fm.family_group),
smart_str(fm.family_group.last_rent),
smart_str(fm.family_group.family_type),
smart_str(fm.family_group.any_income_support_payment),
smart_str(fm.family_group.cra_eligibilty),
smart_str(fm.family_group.cra_amount),
smart_str(fm.family_group.ftb_a),
smart_str(fm.family_group.ftb_b),
smart_str(fm.family_group.maintenance_amount),
smart_str(fm.family_group.maintenance_type),
smart_str(fm.family_group.maintenance_amount),
smart_str(fm.family_group.number_of_additional_children),
smart_str(obj.rent_effective_date),
smart_str(obj.income_period),
smart_str(obj.number_of_family_group),
smart_str(obj.cruser),
smart_str(obj.prop_id),
smart_str(obj.state),
smart_str(obj.cra_compon),
smart_str(obj.household_rent),
])
elif not obj.complete:
writer.writerow([
smart_str(obj.chp_reference),
smart_str('ERROR' ),
smart_str('IN THE'),
smart_str('INPUTS'),
smart_str('PLEASE GO'),
smart_str('BACK TO THE'),
smart_str('APP AND '),
smart_str('CORRECT IT'),
])
writer.writerows([])
return response
export_csv.short_description = u"Export CSV"
fieldsets = (
(None, {
'fields': ('complete',),
}),
('Transaction Details', {
'fields': ('chp_reference', 'income_period', 'property_market_rent', 'rent_effective_date',
'number_of_family_group','state',),
}),
('Result', {
'classes': ('collapse',),
'fields': ('report', 'print_report'),
}),
)
readonly_fields = ['report', 'complete', 'last_rent', 'print_report']
list_display = ['chp_reference', 'complete', 'income_period', 'last_rent',
'household_rent', 'property_market_rent', 'rent_effective_date', 'print_report',]
display_text = ['Result']
search_fields = ['chp_reference', 'user__username']
actions = [export_csv,]
max_num = 5
admin.site.register(Transaction, TransactionAdmin)
class FamilySituationRateInline(admin.TabularInline):
model = FamilySituationRate
extra = 0
class FtbAMaximumPaymentInline(admin.TabularInline):
model = FtbAMaximumPayment
extra = 0
class MaintenanceTypeRateInline(admin.TabularInline):
model = MaintenanceTypeRate
extra = 0
class TransactionResource(resources.ModelResource):
class Meta:
model = Transaction
@ admin.register(
FamilySituation,
MaintenanceType,
Relationship,
)
class CraAdmin(ImportExportModelAdmin, admin.ModelAdmin):
pass
@ admin.register(RentAssessmentRate)
class RentAssessmentRateAdmin(ImportExportModelAdmin, admin.ModelAdmin):
list_editable = ['cra', 'ftb', 'maintenance', 'active']
list_display = ['name', 'cra', 'ftb', 'maintenance', 'active']
@ admin.register(CraRate)
class CraRateAdmin(ImportExportModelAdmin, admin.ModelAdmin):
inlines = [FamilySituationRateInline, ]
@ admin.register(FtbRate)
class FtbRateAdmin(ImportExportModelAdmin, admin.ModelAdmin):
inlines = [FtbAMaximumPaymentInline, ]
@ admin.register(MaintenanceIncomeTestRate)
class MaintenanceIncomeTestRateAdmin(ImportExportModelAdmin, admin.ModelAdmin):
inlines = [MaintenanceTypeRateInline, ]
@ admin.register(
FamilySituationRate,
FtbAMaximumPayment,
MaintenanceTypeRate,
)
class CraAdmin(ImportExportModelAdmin, admin.ModelAdmin):
def get_model_perms(self, request):
return {}
@ admin.register(Batch)
class BatchAdmin(ImportExportModelAdmin):
def has_change_permission(self, request, obj=None):
return None
def has_add_permission(self, request, obj=None):
return None
# list_display = ['batch', 'transaction_chp_reference',
# 'family_group_name', 'family_member_name', 'message']
list_display = ['batch', 'transaction_chp_reference', 'message',]
# from django_otp import OTP_HOTP
# admin.site.unregister(OTP_HOTP)
| 5,757
| 3,877
| 387
|
2ed8a4a9bb1c53941ec398bf527dee4dcaca6772
| 686
|
py
|
Python
|
skyportal/handlers/api/internal/instrument_observation_params.py
|
jialin-wu-02/skyportal
|
29d606ad8567b2230fb0553b18dd3cb9d3ab2d84
|
[
"BSD-3-Clause"
] | null | null | null |
skyportal/handlers/api/internal/instrument_observation_params.py
|
jialin-wu-02/skyportal
|
29d606ad8567b2230fb0553b18dd3cb9d3ab2d84
|
[
"BSD-3-Clause"
] | 156
|
2019-10-17T19:35:22.000Z
|
2021-08-01T13:23:47.000Z
|
skyportal/handlers/api/internal/instrument_observation_params.py
|
jialin-wu-02/skyportal
|
29d606ad8567b2230fb0553b18dd3cb9d3ab2d84
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from baselayer.app.access import auth_or_token
from ...base import BaseHandler
| 38.111111
| 89
| 0.677843
|
import json
from baselayer.app.access import auth_or_token
from ...base import BaseHandler
class InstrumentObservationParamsHandler(BaseHandler):
@auth_or_token
def get(self):
try:
with open("instrument_observation_params.json") as f:
instrument_data = json.load(f)
except FileNotFoundError:
return self.error("Instrument observation parameters file does not exist.")
except json.JSONDecodeError:
return self.error("JSON parse error: instrument observation parameters file "
"does not contain properly formatted JSON.")
return self.success(data=instrument_data)
| 493
| 78
| 23
|
951b82e5411a16957eaeb5663362a43cd0c01672
| 1,555
|
py
|
Python
|
src/data/parse.py
|
samatix/ml-asset-managers
|
27c9c0b3f67fd0350e80c5fb2729e64a13dccbb8
|
[
"Apache-2.0"
] | 2
|
2022-01-01T11:06:22.000Z
|
2022-02-19T03:19:18.000Z
|
src/data/parse.py
|
samatix/ml-asset-managers
|
27c9c0b3f67fd0350e80c5fb2729e64a13dccbb8
|
[
"Apache-2.0"
] | null | null | null |
src/data/parse.py
|
samatix/ml-asset-managers
|
27c9c0b3f67fd0350e80c5fb2729e64a13dccbb8
|
[
"Apache-2.0"
] | 2
|
2020-08-15T05:38:49.000Z
|
2022-03-05T07:31:11.000Z
|
# Copyright 2020 Ayoub ENNASSIRI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime as dt
import csv
import logging
from src.data.models import Tick
from src.runner import pipeline
class ParseTickDataFn(pipeline.DoFn):
"""
Parse the raw tick data events into a tick object
"""
| 31.1
| 74
| 0.579421
|
# Copyright 2020 Ayoub ENNASSIRI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime as dt
import csv
import logging
from src.data.models import Tick
from src.runner import pipeline
class ParseTickDataFn(pipeline.DoFn):
"""
Parse the raw tick data events into a tick object
"""
def __init__(self):
self.ticks_counter = 0
self.errors_parse_num = 0
def process(self, elements):
for e in elements:
try:
row = list(csv.reader([e]))[0]
self.ticks_counter += 1
yield Tick(
time=dt.datetime.strptime(
f"{row[0]},{row[1]}",
'%m/%d/%Y,%H:%M:%S'
),
price=float(row[2]),
bid=float(row[3]),
ask=float(row[4]),
quantity=float(row[5])
)
except:
self.errors_parse_num += 1
logging.error(f"Parsing error of element = {e}")
| 697
| 0
| 54
|
059cf7be37e442771a2a7e9542192e55afa38fe8
| 675
|
py
|
Python
|
pytup2019/bowler_queries/remove_debugger.py
|
lensvol/pytup2019
|
9fe2413a7fcb5e4a4631c42400daeebc2176fce1
|
[
"MIT"
] | 1
|
2020-03-14T15:16:55.000Z
|
2020-03-14T15:16:55.000Z
|
pytup2019/bowler_queries/remove_debugger.py
|
lensvol/pytup2019
|
9fe2413a7fcb5e4a4631c42400daeebc2176fce1
|
[
"MIT"
] | null | null | null |
pytup2019/bowler_queries/remove_debugger.py
|
lensvol/pytup2019
|
9fe2413a7fcb5e4a4631c42400daeebc2176fce1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
from bowler import Query, Filename, Capture, LN
PATTERN = """
power < "ipdb" trailer < '.' 'set_trace' > trailer < '(' ')' > > |
power < "breakpoint" trailer < "(" ")" > >
"""
| 22.5
| 87
| 0.62963
|
#!/usr/bin/env python
import sys
from bowler import Query, Filename, Capture, LN
PATTERN = """
power < "ipdb" trailer < '.' 'set_trace' > trailer < '(' ')' > > |
power < "breakpoint" trailer < "(" ")" > >
"""
def remove_debugger_statements(node: LN, capture: Capture, filename: Filename):
parent_index = node.parent.children.index(node)
node.parent.children = (
node.parent.children[0:parent_index] + node.parent.children[parent_index + 2 :]
)
return None
def get_query(path):
return (
Query(path)
.select(PATTERN)
.modify(remove_debugger_statements)
.execute(interactive=False, write=True)
)
| 405
| 0
| 46
|
d036b24263c3c6126474bf42a609edea5fe3d39d
| 1,679
|
py
|
Python
|
Web/testyummly.py
|
magmastonealex/InTheFridge
|
95b2e7127f259bd57afceb5759914621c6ab3e5b
|
[
"Apache-2.0"
] | 1
|
2015-04-20T01:25:25.000Z
|
2015-04-20T01:25:25.000Z
|
Web/testyummly.py
|
magmapus/InTheFridge
|
95b2e7127f259bd57afceb5759914621c6ab3e5b
|
[
"Apache-2.0"
] | null | null | null |
Web/testyummly.py
|
magmapus/InTheFridge
|
95b2e7127f259bd57afceb5759914621c6ab3e5b
|
[
"Apache-2.0"
] | null | null | null |
import requests
import urllib
import json
if __name__ == "__main__":
y=Yummly()
y.setup('d579507d','736c339e12b7a287c72c2de82313657e')
ing=["fruit","milk"]
print y.getAll(ing,"course^course-Beverages")
y.getCourses()
#todo
| 30.527273
| 152
| 0.645622
|
import requests
import urllib
import json
class Yummly:
s=requests.Session()
base="http://api.yummly.com/v1/api/recipes"
def setup(self,appid,appkey):
self.s.headers.update({'X-Yummly-App-ID':appid,'X-Yummly-App-Key':appkey})
def qsearch(self,ing,course):
pms=urllib.quote("allowedCourse[]")+"="+urllib.quote(course)
pms=pms+"&"+urllib.quote("allowedIngredient[]")+"="+urllib.quote(ing)
q=self.s.get(self.base+"?"+pms)
return json.loads(q.text)["matches"]
def getRecipie(self,rid):
pms=urllib.quote(rid)
q=self.s.get("http://api.yummly.com/v1/api/recipe/"+pms)
rec=json.loads(q.text)
return {'url':rec["attribution"]["url"], 'image':rec["images"][0]["hostedLargeUrl"],'name':rec["name"],'time':rec["totalTime"],'rating':rec["rating"]}
def getAll(self,ing,course):
total=[]
for item in ing:
t=self.qsearch(item,course)
for k in t:
k["rank"]=1
k["needmore"]=len(k["ingredients"])
for i in k["ingredients"]:
for x in ing:
if i.find(x) != -1:
k["rank"]=k["rank"]+1
k["needmore"]=k["needmore"]-1
k["rank"]=k["rank"]-k["needmore"]
total.append(k)
final=[]
ids=[]
for it in total:
if it["id"] not in ids:
ids.append(it["id"])
final.append(it)
newlist = sorted(final, key=lambda k: k['rank'])
newlist.reverse()
last=[]
for r in newlist[:10]:
last.append(y.getRecipie(r["id"]))
return last
def getCourses(self):
q=self.s.get("http://api.yummly.com/v1/api/metadata/course")
print q.text
if __name__ == "__main__":
y=Yummly()
y.setup('d579507d','736c339e12b7a287c72c2de82313657e')
ing=["fruit","milk"]
print y.getAll(ing,"course^course-Beverages")
y.getCourses()
#todo
| 1,255
| 174
| 22
|
b9544fa75fdfdd8216e4bbd3c9533a4f56740534
| 1,182
|
py
|
Python
|
data_storing/assets/tables/moving_average.py
|
marcellogoccia/deep-value-investing
|
4d45cc92c157246485b638d2052596a76975ec8a
|
[
"MIT"
] | null | null | null |
data_storing/assets/tables/moving_average.py
|
marcellogoccia/deep-value-investing
|
4d45cc92c157246485b638d2052596a76975ec8a
|
[
"MIT"
] | null | null | null |
data_storing/assets/tables/moving_average.py
|
marcellogoccia/deep-value-investing
|
4d45cc92c157246485b638d2052596a76975ec8a
|
[
"MIT"
] | null | null | null |
from utilities.config import database
from utilities.common_methods import getDebugInfo
from utilities import log
from sqlalchemy import Column, ForeignKey
from sqlalchemy.types import Integer, Float, Date, String
from sqlalchemy.orm import relationship
from data_storing.assets.base import Base
table_name = database.name_table_moving_average
| 35.818182
| 103
| 0.725888
|
from utilities.config import database
from utilities.common_methods import getDebugInfo
from utilities import log
from sqlalchemy import Column, ForeignKey
from sqlalchemy.types import Integer, Float, Date, String
from sqlalchemy.orm import relationship
from data_storing.assets.base import Base
table_name = database.name_table_moving_average
class MovingAverage(Base):
__tablename__ = table_name
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
equity_id = Column(Integer, ForeignKey(f'{database.name_table_equity}.id'))
technical_analysis_id = Column(Integer, ForeignKey(f'{database.name_table_technical_analysis}.id'))
technical_analysis = relationship(u'TechnicalAnalysis', back_populates=f'{table_name}')
day = Column(Date, default=None) # the day it was measured
type = Column(String(15), default=None) # simple or exponential
period = Column(Float, default=None) # 5 - 10 - 20 - 200 etc.
value = Column(Float, default=None) # the value of the moving average
def __repr__(self):
return f"<{table_name}," \
f"day: {self.day}," \
f"type: {self.type}>"
| 101
| 710
| 23
|
d913e76a3b390e2b515230b0eab52acb3113fad6
| 387
|
py
|
Python
|
tests/unittests/test_scaled_f_score.py
|
sharockys/listentweet
|
afd376f527488fa0809a80173dcedd9847f85e19
|
[
"MIT"
] | null | null | null |
tests/unittests/test_scaled_f_score.py
|
sharockys/listentweet
|
afd376f527488fa0809a80173dcedd9847f85e19
|
[
"MIT"
] | 16
|
2021-04-28T07:54:55.000Z
|
2022-03-29T11:15:05.000Z
|
tests/unittests/test_scaled_f_score.py
|
sharockys/listentweet
|
afd376f527488fa0809a80173dcedd9847f85e19
|
[
"MIT"
] | null | null | null |
import nltk
| 29.769231
| 74
| 0.749354
|
import nltk
def test_ScaledFScore():
# primitive test only.
from listentweet.keyword_extraction.scaled_f_score import ScaledFScore
nltk.download("gutenberg")
emma = nltk.corpus.gutenberg.open("austen-emma.txt").readlines()
sfs = ScaledFScore(emma)
characteristic_words = sfs.get_f1_score(keyword="emma", max_terms=30)
assert "emma" in characteristic_words
| 351
| 0
| 23
|
a96eca71d5759d02d2e44335fcb14f0c2dff7682
| 478
|
py
|
Python
|
src/exercise_one/scripts/data_processing.py
|
Ali-Elganzory/ARL-ROS
|
403c05d1332c3814ec7039b5ef733f708d845f8b
|
[
"MIT"
] | null | null | null |
src/exercise_one/scripts/data_processing.py
|
Ali-Elganzory/ARL-ROS
|
403c05d1332c3814ec7039b5ef733f708d845f8b
|
[
"MIT"
] | null | null | null |
src/exercise_one/scripts/data_processing.py
|
Ali-Elganzory/ARL-ROS
|
403c05d1332c3814ec7039b5ef733f708d845f8b
|
[
"MIT"
] | null | null | null |
import rospy
from std_msgs.msg import String
if __name__ == "__main__":
data_processing()
| 22.761905
| 82
| 0.669456
|
import rospy
from std_msgs.msg import String
def process_clbk(raw_data: String):
name, age, height = map(lambda x: x.split(": ")[1], raw_data.data.split(", "))
age = int(age)
height = int(height)
rospy.loginfo(f"\nname: {name}\nage: {age}\nheight: {height}")
def data_processing():
rospy.init_node("data_processing")
subscriber = rospy.Subscriber("raw_data", String, process_clbk)
rospy.spin()
if __name__ == "__main__":
data_processing()
| 333
| 0
| 46
|
2d0e162b87c63c828d5b570a15927ce895086357
| 14,969
|
py
|
Python
|
src/3_model/minlp/data_plots.py
|
akxen/rep-gep
|
ed470cd60ecba110e228ce54b8c471cb9cb0256d
|
[
"MIT"
] | null | null | null |
src/3_model/minlp/data_plots.py
|
akxen/rep-gep
|
ed470cd60ecba110e228ce54b8c471cb9cb0256d
|
[
"MIT"
] | null | null | null |
src/3_model/minlp/data_plots.py
|
akxen/rep-gep
|
ed470cd60ecba110e228ce54b8c471cb9cb0256d
|
[
"MIT"
] | null | null | null |
"""Plot input traces to visualise data"""
import os
import sys
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
sys.path.append(os.path.join(os.path.dirname(__file__), 'utils'))
from utils.data import ModelData
def plot_coal_fuel_costs():
"""Coal fuel costs"""
# Class containing model data
data = ModelData()
# Get coal DUIDs
mask = data.existing_units['PARAMETERS']['FUEL_TYPE_PRIMARY'].isin(['COAL'])
# Initialise figures
fig, ax = plt.subplots()
# Generators in order of increasing fuel cost (based on cost in last year)
generator_order = data.existing_units.loc[mask, 'FUEL_COST'].T.iloc[-1].sort_values().index
# Plot fuel costs for existing units
data.existing_units.loc[mask, 'FUEL_COST'].T.loc[:, generator_order].plot(ax=ax, cmap='tab20c', alpha=0.9)
# Add legend
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=7, mode="expand", borderaxespad=0., prop={'size': 6})
# Add axes labels
ax.set_ylabel('Fuel cost (\$/GJ)')
ax.set_xlabel('Year')
# Format axes ticks
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(0.5))
ax.yaxis.set_minor_locator(MultipleLocator(0.1))
# Adjust figure placement and size
fig.subplots_adjust(top=0.8, left=0.1, right=0.95)
fig.set_size_inches(6.69291, 6.69291 * 0.8)
# Save figure
fig.savefig(os.path.join(os.path.dirname(__file__), 'output', 'figures', 'fuel_cost_coal.pdf'))
plt.show()
def plot_gas_fuel_costs():
"""Plot fuel costs for candidate gas generators"""
# Class containing model data
data = ModelData()
# Get coal DUIDs
mask = data.existing_units['PARAMETERS']['FUEL_TYPE_PRIMARY'].isin(['GAS'])
# Initialise figures
fig, ax = plt.subplots()
# Generators in order of increasing fuel cost (based on cost in last year)
generator_order = data.existing_units.loc[mask, 'FUEL_COST'].T.iloc[-1].sort_values().index
# Plot fuel costs for existing units
data.existing_units.loc[mask, 'FUEL_COST'].T.loc[:, generator_order].plot(ax=ax, cmap='tab20c', alpha=0.9)
# Add legend
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=7, mode="expand", borderaxespad=0., prop={'size': 6})
# Add axes labels
ax.set_ylabel('Fuel cost (\$/GJ)')
ax.set_xlabel('Year')
# Format axes ticks
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(0.5))
ax.yaxis.set_minor_locator(MultipleLocator(0.1))
# Adjust figure placement and size
fig.subplots_adjust(top=0.7, left=0.1, right=0.95)
fig.set_size_inches(6.69291, 6.69291 * 0.8)
# Save figure
fig.savefig(os.path.join(os.path.dirname(__file__), 'output', 'figures', 'fuel_cost_gas.pdf'))
plt.show()
def plot_coal_build_costs():
"""Plot build costs for candidate coal generators"""
# Class containing model data
data = ModelData()
# Get coal DUIDs
mask = data.candidate_units['PARAMETERS']['FUEL_TYPE_PRIMARY'].isin(['COAL'])
# Initialise figures
fig, ax = plt.subplots()
# Plot build costs for candidate units
data.candidate_units.loc[mask, 'BUILD_COST'].T.plot(ax=ax, cmap='tab20c', alpha=0.9)
# Add legend
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=5, mode="expand", borderaxespad=0., prop={'size': 6})
# Add axes labels
ax.set_ylabel('Build cost (\$/kW)')
ax.set_xlabel('Year')
# Format axes ticks
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(1000))
ax.yaxis.set_minor_locator(MultipleLocator(200))
# Adjust figure placement and size
fig.subplots_adjust(top=0.85, left=0.1, right=0.95)
fig.set_size_inches(6.69291, 6.69291 * 0.6)
# Save figure
fig.savefig(os.path.join(os.path.dirname(__file__), 'output', 'figures', f'build_costs_coal.pdf'))
plt.show()
def plot_gas_build_costs():
"""Plotting candidate gas generator build costs"""
# Class containing model data
data = ModelData()
# Get coal DUIDs
mask = data.candidate_units['PARAMETERS']['FUEL_TYPE_PRIMARY'].isin(['GAS'])
# Initialise figures
fig, ax = plt.subplots()
# Plot build costs for candidate units
data.candidate_units.loc[mask, 'BUILD_COST'].T.plot(ax=ax, cmap='tab20c', alpha=0.9)
# Add legend
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=5, mode="expand", borderaxespad=0., prop={'size': 6})
# Add axes labels
ax.set_ylabel('Build cost (\$/kW)')
ax.set_xlabel('Year')
# Format axes ticks
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(1000))
ax.yaxis.set_minor_locator(MultipleLocator(200))
# Adjust figure placement and size
fig.subplots_adjust(top=0.73, left=0.1, right=0.95)
fig.set_size_inches(6.69291, 6.69291 * 0.6)
# Save figure
fig.savefig(os.path.join(os.path.dirname(__file__), 'output', 'figures', f'build_costs_gas.pdf'))
plt.show()
def plot_solar_build_costs():
"""Plot solar build costs"""
# Class containing model data
data = ModelData()
# Get coal DUIDs
mask = data.candidate_units['PARAMETERS']['FUEL_TYPE_PRIMARY'].isin(['SOLAR'])
# Initialise figures
fig, ax = plt.subplots()
# Plot build costs for candidate units
data.candidate_units.loc[mask, 'BUILD_COST'].T.plot(ax=ax, cmap='tab20c', alpha=0.9)
# Add legend
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=5, mode="expand", borderaxespad=0., prop={'size': 6})
# Add axes labels
ax.set_ylabel('Build cost (\$/kW)')
ax.set_xlabel('Year')
# Format axes ticks
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(1000))
ax.yaxis.set_minor_locator(MultipleLocator(200))
# Adjust figure placement and size
fig.subplots_adjust(top=0.75, left=0.1, right=0.95)
fig.set_size_inches(6.69291, 6.69291 * 0.6)
# Save figure
fig.savefig(os.path.join(os.path.dirname(__file__), 'output', 'figures', f'build_costs_solar.pdf'))
plt.show()
def plot_wind_build_cost():
"""Plot build costs for candidate wind generators"""
# Class containing model data
data = ModelData()
# Get coal DUIDs
mask = data.candidate_units['PARAMETERS']['FUEL_TYPE_PRIMARY'].isin(['WIND'])
# Initialise figures
fig, ax = plt.subplots()
# Plot build costs for candidate units
data.candidate_units.loc[mask, 'BUILD_COST'].T.plot(ax=ax, cmap='tab20c', alpha=0.9)
# Add legend
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=5, mode="expand", borderaxespad=0., prop={'size': 6})
# Add axes labels
ax.set_ylabel('Build cost (\$/kW)')
ax.set_xlabel('Year')
# Format axes ticks
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(100))
ax.yaxis.set_minor_locator(MultipleLocator(50))
# Adjust figure placement and size
fig.subplots_adjust(top=0.85, left=0.1, right=0.95)
fig.set_size_inches(6.69291, 6.69291 * 0.6)
# Save figure
fig.savefig(os.path.join(os.path.dirname(__file__), 'output', 'figures', f'build_costs_wind.pdf'))
plt.show()
def plot_demand_profiles(nem_zone='ADE'):
"""Plot demand profiles"""
# Class containing model data
data = ModelData()
df = pd.read_hdf(os.path.join(os.path.dirname(__file__), os.path.pardir, '2_input_traces', 'output', 'dataset.h5'))
df_d = df.loc[:, [('DEMAND', 'ADE')]]
# Data for 20202
df_d = df_d.sort_index()
df_d = df_d.loc[df_d.index.year == 2020, :]
# Day of year
df_d['day_of_year'] = df_d.index.dayofyear.values
# Hour in a given day
df_d['hour'] = df_d.index.hour.values
# Adjust last hour in each day - set as 24th hour
df_d['hour'] = df_d['hour'].replace(0, 24)
plt.clf()
fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True)
df_d.pivot(index='day_of_year', columns='hour', values=('DEMAND', nem_zone)).T.plot(ax=ax1, legend=False, alpha=0.4,
cmap='viridis')
df_d.pivot(index='day_of_year', columns='hour', values=('DEMAND', nem_zone)).T.plot(ax=ax2, legend=False, alpha=0.1,
cmap='viridis')
# Plot traces obtained from k-means clustering
data.input_traces.loc[2020, ('DEMAND', nem_zone)].T.plot(ax=ax2, color='r', legend=False, alpha=0.8)
ax1.set_ylabel('ADE Demand (MW)')
ax2.set_ylabel('ADE Demand (MW)')
ax2.set_xlabel('Hour')
# Format axes ticks
ax2.xaxis.set_major_locator(MultipleLocator(5))
ax2.xaxis.set_minor_locator(MultipleLocator(1))
ax1.yaxis.set_major_locator(MultipleLocator(500))
ax1.yaxis.set_minor_locator(MultipleLocator(100))
ax2.yaxis.set_major_locator(MultipleLocator(500))
ax2.yaxis.set_minor_locator(MultipleLocator(100))
# Adjust figure placement and size
fig.subplots_adjust(top=0.95, bottom=0.08, left=0.1, right=0.95)
fig.set_size_inches(6.69291, 6.69291)
# Save figure
fig.savefig(os.path.join(os.path.dirname(__file__), 'output', 'figures', f'demand_profiles_{nem_zone}.pdf'))
plt.show()
def plot_wind_capacity_factors(wind_bubble='YPS'):
"""Plotting wind capacity factors for a given wind bubble"""
# Class containing model data
data = ModelData()
df = pd.read_hdf(os.path.join(os.path.dirname(__file__), os.path.pardir, '2_input_traces', 'output', 'dataset.h5'))
df_d = df.loc[:, [('WIND', wind_bubble)]]
# Data for 20202
df_d = df_d.sort_index()
df_d = df_d.loc[df_d.index.year == 2020, :]
# Day of year
df_d['day_of_year'] = df_d.index.dayofyear.values
# Hour in a given day
df_d['hour'] = df_d.index.hour.values
# Adjust last hour in each day - set as 24th hour
df_d['hour'] = df_d['hour'].replace(0, 24)
plt.clf()
fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True)
df_d.pivot(index='day_of_year', columns='hour', values=('WIND', wind_bubble)).T.plot(ax=ax1, legend=False,
alpha=0.4, cmap='viridis')
df_d.pivot(index='day_of_year', columns='hour', values=('WIND', wind_bubble)).T.plot(ax=ax2, legend=False,
alpha=0.1, cmap='viridis')
# Plot traces obtained from k-means clustering
data.input_traces.loc[2020, ('WIND', wind_bubble)].T.plot(ax=ax2, color='r', legend=False, alpha=0.8)
ax1.set_ylabel(f'{wind_bubble} capacity factor [-]')
ax2.set_ylabel(f'{wind_bubble} capacity factor [-]')
ax2.set_xlabel('Hour')
# Format axes ticks
ax2.xaxis.set_major_locator(MultipleLocator(5))
ax2.xaxis.set_minor_locator(MultipleLocator(1))
ax1.yaxis.set_major_locator(MultipleLocator(0.2))
ax1.yaxis.set_minor_locator(MultipleLocator(0.05))
ax2.yaxis.set_major_locator(MultipleLocator(0.2))
ax2.yaxis.set_minor_locator(MultipleLocator(0.05))
# Adjust figure placement and size
fig.subplots_adjust(top=0.95, bottom=0.08, left=0.1, right=0.95)
fig.set_size_inches(6.69291, 6.69291)
# Save figure
fig.savefig(os.path.join(os.path.dirname(__file__), 'output', 'figures', f'wind_profiles_{wind_bubble}.pdf'))
plt.show()
def plot_solar_capacity_factors(nem_zone='ADE', technology='DAT'):
"""Plot solar capacity factors"""
# Construct solar technology ID based on NEM zone and technology type
tech_id = f'{nem_zone}|{technology}'
# Class containing model data
data = ModelData()
df = pd.read_hdf(os.path.join(os.path.dirname(__file__), os.path.pardir, '2_input_traces', 'output', 'dataset.h5'))
df_d = df.loc[:, [('SOLAR', tech_id)]]
# Data for 2020
df_d = df_d.sort_index()
df_d = df_d.loc[df_d.index.year == 2020, :]
# Day of year
df_d['day_of_year'] = df_d.index.dayofyear.values
# Hour in a given day
df_d['hour'] = df_d.index.hour.values
# Adjust last hour in each day - set as 24th hour
df_d['hour'] = df_d['hour'].replace(0, 24)
plt.clf()
fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True)
df_d.pivot(index='day_of_year', columns='hour', values=('SOLAR', tech_id)).T.plot(ax=ax1, legend=False,
alpha=0.4, cmap='viridis')
df_d.pivot(index='day_of_year', columns='hour', values=('SOLAR', tech_id)).T.plot(ax=ax2, legend=False,
alpha=0.1, cmap='viridis')
# Plot traces obtained from k-means clustering
data.input_traces.loc[2020, ('SOLAR', tech_id)].T.plot(ax=ax2, color='r', legend=False, alpha=0.8)
ax1.set_ylabel(f'{tech_id} capacity factor [-]')
ax2.set_ylabel(f'{tech_id} capacity factor [-]')
ax2.set_xlabel('Hour')
# Format axes ticks
ax2.xaxis.set_major_locator(MultipleLocator(5))
ax2.xaxis.set_minor_locator(MultipleLocator(1))
ax1.yaxis.set_major_locator(MultipleLocator(0.2))
ax1.yaxis.set_minor_locator(MultipleLocator(0.05))
ax2.yaxis.set_major_locator(MultipleLocator(0.2))
ax2.yaxis.set_minor_locator(MultipleLocator(0.05))
# Adjust figure placement and size
fig.subplots_adjust(top=0.95, bottom=0.08, left=0.1, right=0.95)
fig.set_size_inches(6.69291, 6.69291)
# Save figure
fig.savefig(os.path.join(os.path.dirname(__file__), 'output', 'figures',
f"solar_profiles_{tech_id.replace('|', '-')}.pdf"))
plt.show()
if __name__ == '__main__':
# Create plots
plot_coal_build_costs()
plot_gas_build_costs()
plot_solar_build_costs()
plot_wind_build_cost()
plot_coal_fuel_costs()
plot_gas_fuel_costs()
plot_demand_profiles(nem_zone='ADE')
plot_wind_capacity_factors(wind_bubble='YPS')
plot_solar_capacity_factors(nem_zone='ADE', technology='DAT')
| 34.175799
| 121
| 0.636582
|
"""Plot input traces to visualise data"""
import os
import sys
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
sys.path.append(os.path.join(os.path.dirname(__file__), 'utils'))
from utils.data import ModelData
def plot_coal_fuel_costs():
"""Coal fuel costs"""
# Class containing model data
data = ModelData()
# Get coal DUIDs
mask = data.existing_units['PARAMETERS']['FUEL_TYPE_PRIMARY'].isin(['COAL'])
# Initialise figures
fig, ax = plt.subplots()
# Generators in order of increasing fuel cost (based on cost in last year)
generator_order = data.existing_units.loc[mask, 'FUEL_COST'].T.iloc[-1].sort_values().index
# Plot fuel costs for existing units
data.existing_units.loc[mask, 'FUEL_COST'].T.loc[:, generator_order].plot(ax=ax, cmap='tab20c', alpha=0.9)
# Add legend
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=7, mode="expand", borderaxespad=0., prop={'size': 6})
# Add axes labels
ax.set_ylabel('Fuel cost (\$/GJ)')
ax.set_xlabel('Year')
# Format axes ticks
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(0.5))
ax.yaxis.set_minor_locator(MultipleLocator(0.1))
# Adjust figure placement and size
fig.subplots_adjust(top=0.8, left=0.1, right=0.95)
fig.set_size_inches(6.69291, 6.69291 * 0.8)
# Save figure
fig.savefig(os.path.join(os.path.dirname(__file__), 'output', 'figures', 'fuel_cost_coal.pdf'))
plt.show()
def plot_gas_fuel_costs():
"""Plot fuel costs for candidate gas generators"""
# Class containing model data
data = ModelData()
# Get coal DUIDs
mask = data.existing_units['PARAMETERS']['FUEL_TYPE_PRIMARY'].isin(['GAS'])
# Initialise figures
fig, ax = plt.subplots()
# Generators in order of increasing fuel cost (based on cost in last year)
generator_order = data.existing_units.loc[mask, 'FUEL_COST'].T.iloc[-1].sort_values().index
# Plot fuel costs for existing units
data.existing_units.loc[mask, 'FUEL_COST'].T.loc[:, generator_order].plot(ax=ax, cmap='tab20c', alpha=0.9)
# Add legend
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=7, mode="expand", borderaxespad=0., prop={'size': 6})
# Add axes labels
ax.set_ylabel('Fuel cost (\$/GJ)')
ax.set_xlabel('Year')
# Format axes ticks
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(0.5))
ax.yaxis.set_minor_locator(MultipleLocator(0.1))
# Adjust figure placement and size
fig.subplots_adjust(top=0.7, left=0.1, right=0.95)
fig.set_size_inches(6.69291, 6.69291 * 0.8)
# Save figure
fig.savefig(os.path.join(os.path.dirname(__file__), 'output', 'figures', 'fuel_cost_gas.pdf'))
plt.show()
def plot_coal_build_costs():
"""Plot build costs for candidate coal generators"""
# Class containing model data
data = ModelData()
# Get coal DUIDs
mask = data.candidate_units['PARAMETERS']['FUEL_TYPE_PRIMARY'].isin(['COAL'])
# Initialise figures
fig, ax = plt.subplots()
# Plot build costs for candidate units
data.candidate_units.loc[mask, 'BUILD_COST'].T.plot(ax=ax, cmap='tab20c', alpha=0.9)
# Add legend
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=5, mode="expand", borderaxespad=0., prop={'size': 6})
# Add axes labels
ax.set_ylabel('Build cost (\$/kW)')
ax.set_xlabel('Year')
# Format axes ticks
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(1000))
ax.yaxis.set_minor_locator(MultipleLocator(200))
# Adjust figure placement and size
fig.subplots_adjust(top=0.85, left=0.1, right=0.95)
fig.set_size_inches(6.69291, 6.69291 * 0.6)
# Save figure
fig.savefig(os.path.join(os.path.dirname(__file__), 'output', 'figures', f'build_costs_coal.pdf'))
plt.show()
def plot_gas_build_costs():
"""Plotting candidate gas generator build costs"""
# Class containing model data
data = ModelData()
# Get coal DUIDs
mask = data.candidate_units['PARAMETERS']['FUEL_TYPE_PRIMARY'].isin(['GAS'])
# Initialise figures
fig, ax = plt.subplots()
# Plot build costs for candidate units
data.candidate_units.loc[mask, 'BUILD_COST'].T.plot(ax=ax, cmap='tab20c', alpha=0.9)
# Add legend
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=5, mode="expand", borderaxespad=0., prop={'size': 6})
# Add axes labels
ax.set_ylabel('Build cost (\$/kW)')
ax.set_xlabel('Year')
# Format axes ticks
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(1000))
ax.yaxis.set_minor_locator(MultipleLocator(200))
# Adjust figure placement and size
fig.subplots_adjust(top=0.73, left=0.1, right=0.95)
fig.set_size_inches(6.69291, 6.69291 * 0.6)
# Save figure
fig.savefig(os.path.join(os.path.dirname(__file__), 'output', 'figures', f'build_costs_gas.pdf'))
plt.show()
def plot_solar_build_costs():
"""Plot solar build costs"""
# Class containing model data
data = ModelData()
# Get coal DUIDs
mask = data.candidate_units['PARAMETERS']['FUEL_TYPE_PRIMARY'].isin(['SOLAR'])
# Initialise figures
fig, ax = plt.subplots()
# Plot build costs for candidate units
data.candidate_units.loc[mask, 'BUILD_COST'].T.plot(ax=ax, cmap='tab20c', alpha=0.9)
# Add legend
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=5, mode="expand", borderaxespad=0., prop={'size': 6})
# Add axes labels
ax.set_ylabel('Build cost (\$/kW)')
ax.set_xlabel('Year')
# Format axes ticks
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(1000))
ax.yaxis.set_minor_locator(MultipleLocator(200))
# Adjust figure placement and size
fig.subplots_adjust(top=0.75, left=0.1, right=0.95)
fig.set_size_inches(6.69291, 6.69291 * 0.6)
# Save figure
fig.savefig(os.path.join(os.path.dirname(__file__), 'output', 'figures', f'build_costs_solar.pdf'))
plt.show()
def plot_wind_build_cost():
"""Plot build costs for candidate wind generators"""
# Class containing model data
data = ModelData()
# Get coal DUIDs
mask = data.candidate_units['PARAMETERS']['FUEL_TYPE_PRIMARY'].isin(['WIND'])
# Initialise figures
fig, ax = plt.subplots()
# Plot build costs for candidate units
data.candidate_units.loc[mask, 'BUILD_COST'].T.plot(ax=ax, cmap='tab20c', alpha=0.9)
# Add legend
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=5, mode="expand", borderaxespad=0., prop={'size': 6})
# Add axes labels
ax.set_ylabel('Build cost (\$/kW)')
ax.set_xlabel('Year')
# Format axes ticks
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(100))
ax.yaxis.set_minor_locator(MultipleLocator(50))
# Adjust figure placement and size
fig.subplots_adjust(top=0.85, left=0.1, right=0.95)
fig.set_size_inches(6.69291, 6.69291 * 0.6)
# Save figure
fig.savefig(os.path.join(os.path.dirname(__file__), 'output', 'figures', f'build_costs_wind.pdf'))
plt.show()
def plot_demand_profiles(nem_zone='ADE'):
"""Plot demand profiles"""
# Class containing model data
data = ModelData()
df = pd.read_hdf(os.path.join(os.path.dirname(__file__), os.path.pardir, '2_input_traces', 'output', 'dataset.h5'))
df_d = df.loc[:, [('DEMAND', 'ADE')]]
# Data for 20202
df_d = df_d.sort_index()
df_d = df_d.loc[df_d.index.year == 2020, :]
# Day of year
df_d['day_of_year'] = df_d.index.dayofyear.values
# Hour in a given day
df_d['hour'] = df_d.index.hour.values
# Adjust last hour in each day - set as 24th hour
df_d['hour'] = df_d['hour'].replace(0, 24)
plt.clf()
fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True)
df_d.pivot(index='day_of_year', columns='hour', values=('DEMAND', nem_zone)).T.plot(ax=ax1, legend=False, alpha=0.4,
cmap='viridis')
df_d.pivot(index='day_of_year', columns='hour', values=('DEMAND', nem_zone)).T.plot(ax=ax2, legend=False, alpha=0.1,
cmap='viridis')
# Plot traces obtained from k-means clustering
data.input_traces.loc[2020, ('DEMAND', nem_zone)].T.plot(ax=ax2, color='r', legend=False, alpha=0.8)
ax1.set_ylabel('ADE Demand (MW)')
ax2.set_ylabel('ADE Demand (MW)')
ax2.set_xlabel('Hour')
# Format axes ticks
ax2.xaxis.set_major_locator(MultipleLocator(5))
ax2.xaxis.set_minor_locator(MultipleLocator(1))
ax1.yaxis.set_major_locator(MultipleLocator(500))
ax1.yaxis.set_minor_locator(MultipleLocator(100))
ax2.yaxis.set_major_locator(MultipleLocator(500))
ax2.yaxis.set_minor_locator(MultipleLocator(100))
# Adjust figure placement and size
fig.subplots_adjust(top=0.95, bottom=0.08, left=0.1, right=0.95)
fig.set_size_inches(6.69291, 6.69291)
# Save figure
fig.savefig(os.path.join(os.path.dirname(__file__), 'output', 'figures', f'demand_profiles_{nem_zone}.pdf'))
plt.show()
def plot_wind_capacity_factors(wind_bubble='YPS'):
"""Plotting wind capacity factors for a given wind bubble"""
# Class containing model data
data = ModelData()
df = pd.read_hdf(os.path.join(os.path.dirname(__file__), os.path.pardir, '2_input_traces', 'output', 'dataset.h5'))
df_d = df.loc[:, [('WIND', wind_bubble)]]
# Data for 20202
df_d = df_d.sort_index()
df_d = df_d.loc[df_d.index.year == 2020, :]
# Day of year
df_d['day_of_year'] = df_d.index.dayofyear.values
# Hour in a given day
df_d['hour'] = df_d.index.hour.values
# Adjust last hour in each day - set as 24th hour
df_d['hour'] = df_d['hour'].replace(0, 24)
plt.clf()
fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True)
df_d.pivot(index='day_of_year', columns='hour', values=('WIND', wind_bubble)).T.plot(ax=ax1, legend=False,
alpha=0.4, cmap='viridis')
df_d.pivot(index='day_of_year', columns='hour', values=('WIND', wind_bubble)).T.plot(ax=ax2, legend=False,
alpha=0.1, cmap='viridis')
# Plot traces obtained from k-means clustering
data.input_traces.loc[2020, ('WIND', wind_bubble)].T.plot(ax=ax2, color='r', legend=False, alpha=0.8)
ax1.set_ylabel(f'{wind_bubble} capacity factor [-]')
ax2.set_ylabel(f'{wind_bubble} capacity factor [-]')
ax2.set_xlabel('Hour')
# Format axes ticks
ax2.xaxis.set_major_locator(MultipleLocator(5))
ax2.xaxis.set_minor_locator(MultipleLocator(1))
ax1.yaxis.set_major_locator(MultipleLocator(0.2))
ax1.yaxis.set_minor_locator(MultipleLocator(0.05))
ax2.yaxis.set_major_locator(MultipleLocator(0.2))
ax2.yaxis.set_minor_locator(MultipleLocator(0.05))
# Adjust figure placement and size
fig.subplots_adjust(top=0.95, bottom=0.08, left=0.1, right=0.95)
fig.set_size_inches(6.69291, 6.69291)
# Save figure
fig.savefig(os.path.join(os.path.dirname(__file__), 'output', 'figures', f'wind_profiles_{wind_bubble}.pdf'))
plt.show()
def plot_solar_capacity_factors(nem_zone='ADE', technology='DAT'):
"""Plot solar capacity factors"""
# Construct solar technology ID based on NEM zone and technology type
tech_id = f'{nem_zone}|{technology}'
# Class containing model data
data = ModelData()
df = pd.read_hdf(os.path.join(os.path.dirname(__file__), os.path.pardir, '2_input_traces', 'output', 'dataset.h5'))
df_d = df.loc[:, [('SOLAR', tech_id)]]
# Data for 2020
df_d = df_d.sort_index()
df_d = df_d.loc[df_d.index.year == 2020, :]
# Day of year
df_d['day_of_year'] = df_d.index.dayofyear.values
# Hour in a given day
df_d['hour'] = df_d.index.hour.values
# Adjust last hour in each day - set as 24th hour
df_d['hour'] = df_d['hour'].replace(0, 24)
plt.clf()
fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True)
df_d.pivot(index='day_of_year', columns='hour', values=('SOLAR', tech_id)).T.plot(ax=ax1, legend=False,
alpha=0.4, cmap='viridis')
df_d.pivot(index='day_of_year', columns='hour', values=('SOLAR', tech_id)).T.plot(ax=ax2, legend=False,
alpha=0.1, cmap='viridis')
# Plot traces obtained from k-means clustering
data.input_traces.loc[2020, ('SOLAR', tech_id)].T.plot(ax=ax2, color='r', legend=False, alpha=0.8)
ax1.set_ylabel(f'{tech_id} capacity factor [-]')
ax2.set_ylabel(f'{tech_id} capacity factor [-]')
ax2.set_xlabel('Hour')
# Format axes ticks
ax2.xaxis.set_major_locator(MultipleLocator(5))
ax2.xaxis.set_minor_locator(MultipleLocator(1))
ax1.yaxis.set_major_locator(MultipleLocator(0.2))
ax1.yaxis.set_minor_locator(MultipleLocator(0.05))
ax2.yaxis.set_major_locator(MultipleLocator(0.2))
ax2.yaxis.set_minor_locator(MultipleLocator(0.05))
# Adjust figure placement and size
fig.subplots_adjust(top=0.95, bottom=0.08, left=0.1, right=0.95)
fig.set_size_inches(6.69291, 6.69291)
# Save figure
fig.savefig(os.path.join(os.path.dirname(__file__), 'output', 'figures',
f"solar_profiles_{tech_id.replace('|', '-')}.pdf"))
plt.show()
if __name__ == '__main__':
# Create plots
plot_coal_build_costs()
plot_gas_build_costs()
plot_solar_build_costs()
plot_wind_build_cost()
plot_coal_fuel_costs()
plot_gas_fuel_costs()
plot_demand_profiles(nem_zone='ADE')
plot_wind_capacity_factors(wind_bubble='YPS')
plot_solar_capacity_factors(nem_zone='ADE', technology='DAT')
| 0
| 0
| 0
|
98e8ef272f6ae099e91304a8f9b8a8b7b3ca7691
| 572
|
py
|
Python
|
app/mod_whatis/views.py
|
crtarsorg/glasomer.rs-v2
|
ac43e1ba30b80a4ba9ca67768680b6e79f1512f3
|
[
"CC0-1.0"
] | null | null | null |
app/mod_whatis/views.py
|
crtarsorg/glasomer.rs-v2
|
ac43e1ba30b80a4ba9ca67768680b6e79f1512f3
|
[
"CC0-1.0"
] | 54
|
2017-03-06T12:48:40.000Z
|
2017-04-20T08:33:09.000Z
|
app/mod_whatis/views.py
|
crtarsorg/glasomer.rs-v2
|
ac43e1ba30b80a4ba9ca67768680b6e79f1512f3
|
[
"CC0-1.0"
] | null | null | null |
from flask import Blueprint, render_template
from app import mongo_utils
from bson import json_util
import json
mod_whatis = Blueprint('whatis', __name__, url_prefix='/sta-je-glasomer')
@mod_whatis.route('/', methods=['GET'])
| 38.133333
| 108
| 0.758741
|
from flask import Blueprint, render_template
from app import mongo_utils
from bson import json_util
import json
mod_whatis = Blueprint('whatis', __name__, url_prefix='/sta-je-glasomer')
@mod_whatis.route('/', methods=['GET'])
def index():
year = ""
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
year = project['year']
glasomer_text = mongo_utils.find_glasomer_text(year)
return render_template('mod_whatis/index.html',glasomer_text=json.loads(json_util.dumps(glasomer_text)))
| 323
| 0
| 22
|
5a7cf5792682d6ad3c192a5fde31f431b97168ff
| 313
|
py
|
Python
|
course/migrations/0047_remove_page_data_ordinal_uniqueness.py
|
inducer/courseflow
|
0f9786e3616dbedf08365d81a731f672b97ba9f5
|
[
"Unlicense"
] | 284
|
2015-01-09T12:02:28.000Z
|
2022-03-27T14:30:46.000Z
|
course/migrations/0047_remove_page_data_ordinal_uniqueness.py
|
inducer/courseflow
|
0f9786e3616dbedf08365d81a731f672b97ba9f5
|
[
"Unlicense"
] | 799
|
2015-02-26T08:49:46.000Z
|
2022-03-31T16:09:26.000Z
|
course/migrations/0047_remove_page_data_ordinal_uniqueness.py
|
inducer/courseflow
|
0f9786e3616dbedf08365d81a731f672b97ba9f5
|
[
"Unlicense"
] | 120
|
2015-01-30T18:00:56.000Z
|
2022-03-28T06:24:43.000Z
|
from django.db import models, migrations
| 19.5625
| 57
| 0.613419
|
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('course', '0046_allow_null_page_data_ordinals'),
]
operations = [
migrations.AlterUniqueTogether(
name='flowpagedata',
unique_together=set(),
),
]
| 0
| 248
| 23
|
629635c2446dffba0f7e84880658f4cf63855f95
| 1,388
|
py
|
Python
|
main-step-demo.py
|
cfalguiere/datalab-utils
|
cfa36030ce12efc8e4b5b7ad065ab59e763e0b78
|
[
"MIT"
] | null | null | null |
main-step-demo.py
|
cfalguiere/datalab-utils
|
cfa36030ce12efc8e4b5b7ad065ab59e763e0b78
|
[
"MIT"
] | 6
|
2018-03-13T10:52:45.000Z
|
2018-03-14T21:37:17.000Z
|
main-step-demo.py
|
cfalguiere/datalab-utils
|
cfa36030ce12efc8e4b5b7ad065ab59e763e0b78
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#-*- coding: utf-8 -*-
"""Run Score Credit Habitat"""
from __future__ import print_function
import sys
import logging
from easydatalab.common.app import AppContext
from easydatalab.common.exceptions import ExecutionError
def main():
"""Main entry point for the script."""
cfgFile = 'easydatalab/tests/resources/config/config_for_unittests.cfg'
logCfgFile = 'easydatalab/resources/log_config.yml'
with AppContext(log_config_file=logCfgFile) as appContext:
appContext.logger.info("default logger for %s" % str( appContext) )
appContext.skip_steps( [ 'skipped step' ] )
with appContext.new_configuration(cfgFile) as appConfiguration:
appConfiguration.show()
with appContext.new_step ('something') as step:
if step.enabled():
print("does something")
with appContext.new_step ('skipped step') as step:
if step.enabled():
print("does skipped")
with appContext.new_step ('failed step') as step:
if step.enabled():
raise ExecutionError('step 3', 'failed to complete task')
with appContext.new_step ('something else') as step:
if step.enabled():
print("does something else")
if __name__ == '__main__':
sys.exit(main())
| 31.545455
| 77
| 0.627522
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
"""Run Score Credit Habitat"""
from __future__ import print_function
import sys
import logging
from easydatalab.common.app import AppContext
from easydatalab.common.exceptions import ExecutionError
def main():
"""Main entry point for the script."""
cfgFile = 'easydatalab/tests/resources/config/config_for_unittests.cfg'
logCfgFile = 'easydatalab/resources/log_config.yml'
with AppContext(log_config_file=logCfgFile) as appContext:
appContext.logger.info("default logger for %s" % str( appContext) )
appContext.skip_steps( [ 'skipped step' ] )
with appContext.new_configuration(cfgFile) as appConfiguration:
appConfiguration.show()
with appContext.new_step ('something') as step:
if step.enabled():
print("does something")
with appContext.new_step ('skipped step') as step:
if step.enabled():
print("does skipped")
with appContext.new_step ('failed step') as step:
if step.enabled():
raise ExecutionError('step 3', 'failed to complete task')
with appContext.new_step ('something else') as step:
if step.enabled():
print("does something else")
if __name__ == '__main__':
sys.exit(main())
| 0
| 0
| 0
|
2d0633efdab3a26a7e6897b19ca7b6bd561f2da7
| 697
|
py
|
Python
|
src/daemons/videod.py
|
schnema123/pydrive
|
287a9f83604470e6bd01d2a3d4f355559f8253d8
|
[
"MIT"
] | null | null | null |
src/daemons/videod.py
|
schnema123/pydrive
|
287a9f83604470e6bd01d2a3d4f355559f8253d8
|
[
"MIT"
] | null | null | null |
src/daemons/videod.py
|
schnema123/pydrive
|
287a9f83604470e6bd01d2a3d4f355559f8253d8
|
[
"MIT"
] | null | null | null |
import socket
import zmq
from shared.daemon_ports import get_port
from shared.messaging.sock import create_sub_sock
# Send from replayd -> videod
if __name__ == "__main__":
"""
context = zmq.Context()
port = get_port("replayd")
socket = create_sub_sock(context, port)
"""
videosocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting...")
connected = False
while not connected:
try:
videosocket.connect(("localhost", 4444))
connected = True
except ConnectionRefusedError as e:
pass
print("Connected...")
while True:
data = videosocket.recv(1024)
print(data)
| 21.78125
| 67
| 0.637016
|
import socket
import zmq
from shared.daemon_ports import get_port
from shared.messaging.sock import create_sub_sock
# Send from replayd -> videod
if __name__ == "__main__":
"""
context = zmq.Context()
port = get_port("replayd")
socket = create_sub_sock(context, port)
"""
videosocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting...")
connected = False
while not connected:
try:
videosocket.connect(("localhost", 4444))
connected = True
except ConnectionRefusedError as e:
pass
print("Connected...")
while True:
data = videosocket.recv(1024)
print(data)
| 0
| 0
| 0
|
1dc5885e8b632bcad5a0b70c1b8d8c5702ffa110
| 1,820
|
py
|
Python
|
setup.py
|
bocadilloproject/bocadillo-cli
|
f11ec438504eb2edd3c4e8f5d2992e804b3da6b0
|
[
"MIT"
] | 6
|
2019-04-17T17:07:46.000Z
|
2020-08-09T07:37:34.000Z
|
setup.py
|
bocadilloproject/bocadillo-cli
|
f11ec438504eb2edd3c4e8f5d2992e804b3da6b0
|
[
"MIT"
] | 10
|
2019-04-17T21:27:46.000Z
|
2019-06-17T05:45:51.000Z
|
setup.py
|
bocadilloproject/bocadillo-cli
|
f11ec438504eb2edd3c4e8f5d2992e804b3da6b0
|
[
"MIT"
] | 1
|
2019-05-12T17:32:45.000Z
|
2019-05-12T17:32:45.000Z
|
import os
import re
import setuptools
description = "Standard development tooling for Bocadillo"
with open("README.md", "r") as readme:
long_description = readme.read()
NAME = "bocadillo-cli"
PACKAGE = "bocadillo_cli"
GITHUB = "https://github.com/bocadilloproject/bocadillo-cli"
CHANGELOG = f"{GITHUB}/blob/master/CHANGELOG.md"
HERE = os.path.abspath(os.path.dirname(__file__))
setuptools.setup(
name=NAME,
version=find_version(PACKAGE, "version.py"),
author="Florimond Manca",
author_email="florimond.manca@gmail.com",
description=description,
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(exclude=["bocadillo_cli.templates"]),
include_package_data=True, # see MANIFEST.in
entry_points={"console_scripts": ["bocadillo=bocadillo_cli.main:cli"]},
install_requires=["click>=7.0, <8.0", "jinja2>=2.10.1"],
python_requires=">=3.6",
url=GITHUB,
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Utilities",
"Topic :: Software Development :: Code Generators",
],
)
| 33.090909
| 76
| 0.659341
|
import os
import re
import setuptools
description = "Standard development tooling for Bocadillo"
with open("README.md", "r") as readme:
long_description = readme.read()
NAME = "bocadillo-cli"
PACKAGE = "bocadillo_cli"
GITHUB = "https://github.com/bocadilloproject/bocadillo-cli"
CHANGELOG = f"{GITHUB}/blob/master/CHANGELOG.md"
HERE = os.path.abspath(os.path.dirname(__file__))
def find_version(*parts):
with open(os.path.join(HERE, *parts), "r") as fp:
content = fp.read()
version_match = re.match(r"^__version__ = ['\"]([^'\"]*)['\"]", content)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setuptools.setup(
name=NAME,
version=find_version(PACKAGE, "version.py"),
author="Florimond Manca",
author_email="florimond.manca@gmail.com",
description=description,
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(exclude=["bocadillo_cli.templates"]),
include_package_data=True, # see MANIFEST.in
entry_points={"console_scripts": ["bocadillo=bocadillo_cli.main:cli"]},
install_requires=["click>=7.0, <8.0", "jinja2>=2.10.1"],
python_requires=">=3.6",
url=GITHUB,
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Utilities",
"Topic :: Software Development :: Code Generators",
],
)
| 280
| 0
| 23
|
dc155e623350a743adcb1fb509940559b7b8f2f2
| 1,396
|
py
|
Python
|
7_dynamic_programming/easy/knapsack_DP_easy.py
|
itsEmShoji/TCScurriculum
|
bd27c8c2b26032e9e6f92a50b40ec19d45301988
|
[
"MIT"
] | null | null | null |
7_dynamic_programming/easy/knapsack_DP_easy.py
|
itsEmShoji/TCScurriculum
|
bd27c8c2b26032e9e6f92a50b40ec19d45301988
|
[
"MIT"
] | null | null | null |
7_dynamic_programming/easy/knapsack_DP_easy.py
|
itsEmShoji/TCScurriculum
|
bd27c8c2b26032e9e6f92a50b40ec19d45301988
|
[
"MIT"
] | null | null | null |
# TODO: imports
# REQUIRES: num_items >= 0, capacity >= 0,
# size of item_values >= num_items,
# size of item_weights >= num_items,
# item_values are all >= 0, item_weights are all >= 0
# EFFECTS: Computes the max value that can be obtained by picking
# from a set of num_items items without exceeding the given
# capacity. Choosing item i produces the value item_values[i]
# but uses weight item_weights[i] out of the available
# capacity.
# Must use dynamic programming!
# Build table K[][] in bottom up manner
# TODO: loop through items
# TODO: second loop through weights upto capacity
# TODO: if statement
# TODO: set item and weight to 0
# TODO: if item weights[item-1] is less than weight
K[cap][weight] = max(item_values[cap-1] +
K[cap-1][weight-item_weights[cap-1]], K[cap-1][weight])
# TODO: else
# TODO: set k[item][weight] to previously found answer
# TODO: return K at num_items and capacity
if __name__ == '__main__':
unittest.main(argv=['first-arg-is-ignored'], exit=False)
| 34.04878
| 81
| 0.657593
|
# TODO: imports
# REQUIRES: num_items >= 0, capacity >= 0,
# size of item_values >= num_items,
# size of item_weights >= num_items,
# item_values are all >= 0, item_weights are all >= 0
# EFFECTS: Computes the max value that can be obtained by picking
# from a set of num_items items without exceeding the given
# capacity. Choosing item i produces the value item_values[i]
# but uses weight item_weights[i] out of the available
# capacity.
# Must use dynamic programming!
def knapsack_dp(capacity, item_weights, item_values, num_items):
# TODO: declare rows and columns
K = [[0 for x in range(cols)] for y in range(rows)]
# Build table K[][] in bottom up manner
# TODO: loop through items
# TODO: second loop through weights upto capacity
# TODO: if statement
# TODO: set item and weight to 0
# TODO: if item weights[item-1] is less than weight
K[cap][weight] = max(item_values[cap-1] +
K[cap-1][weight-item_weights[cap-1]], K[cap-1][weight])
# TODO: else
# TODO: set k[item][weight] to previously found answer
# TODO: return K at num_items and capacity
class MyTest(unittest.TestCase):
def test(self):
self.assertEqual(knapsack_dp(0, [], [], 0), 0)
# TODO: Add more tests
if __name__ == '__main__':
unittest.main(argv=['first-arg-is-ignored'], exit=False)
| 217
| 11
| 72
|
a0127677f968c02630c0e8876bdfc54172dc9051
| 4,858
|
py
|
Python
|
EDMScripts/OldScripts/SimpleEDMLoop.py
|
gautampk/EDMSuite
|
e7b67336b45e679d7903d527f6d81080c6846166
|
[
"MIT"
] | 6
|
2017-02-02T17:54:23.000Z
|
2021-07-03T12:41:36.000Z
|
EDMScripts/OldScripts/SimpleEDMLoop.py
|
gautampk/EDMSuite
|
e7b67336b45e679d7903d527f6d81080c6846166
|
[
"MIT"
] | null | null | null |
EDMScripts/OldScripts/SimpleEDMLoop.py
|
gautampk/EDMSuite
|
e7b67336b45e679d7903d527f6d81080c6846166
|
[
"MIT"
] | 11
|
2015-03-19T18:23:38.000Z
|
2021-02-18T11:05:51.000Z
|
# Import a whole load of stuff
from System.IO import *
from System.Drawing import *
from System.Runtime.Remoting import *
from System.Threading import *
from System.Windows.Forms import *
from System.Xml.Serialization import *
from System import *
from DAQ.Environment import *
from EDMConfig import *
| 33.503448
| 109
| 0.735282
|
# Import a whole load of stuff
from System.IO import *
from System.Drawing import *
from System.Runtime.Remoting import *
from System.Threading import *
from System.Windows.Forms import *
from System.Xml.Serialization import *
from System import *
from DAQ.Environment import *
from EDMConfig import *
def saveBlockConfig(path, config):
fs = FileStream(path, FileMode.Create)
s = XmlSerializer(BlockConfig)
s.Serialize(fs,config)
fs.Close()
def loadBlockConfig(path):
fs = FileStream(path, FileMode.Open)
s = XmlSerializer(BlockConfig)
bc = s.Deserialize(fs)
fs.Close()
return bc
def writeLatestBlockNotificationFile(cluster, blockIndex):
fs = FileStream(Environs.FileSystem.Paths["settingsPath"] + "\\BlockHead\\latestBlock.txt", FileMode.Create)
sw = StreamWriter(fs)
sw.WriteLine(cluster + "\t" + str(blockIndex))
sw.Close()
fs.Close()
def checkYAGAndFix():
interlockFailed = hc.YAGInterlockFailed;
if (interlockFailed):
bh.StopPattern();
bh.StartPattern();
def prompt(text):
sys.stdout.write(text)
return sys.stdin.readline().strip()
def EDMGoReal(nullRun):
# Setup
f = None
fileSystem = Environs.FileSystem
dataPath = fileSystem.GetDataDirectory(fileSystem.Paths["edmDataPath"])
print("Data directory is : " + dataPath)
print("")
suggestedClusterName = fileSystem.GenerateNextDataFileName()
# User inputs data
cluster = prompt("Cluster name [" + suggestedClusterName +"]: ")
if cluster == "":
cluster = suggestedClusterName
print("Using cluster " + suggestedClusterName)
eState = Boolean.Parse(prompt("E-state: "))
bState = Boolean.Parse(prompt("B-state: "))
print("Measuring parameters ...")
hc.UpdateBCurrentMonitor()
hc.UpdateBCurrentMonitor()
hc.UpdateVMonitor()
# load a default BlockConfig and customise it appropriately
settingsPath = fileSystem.Paths["settingsPath"] + "\\BlockHead\\"
bc = loadBlockConfig(settingsPath + "default.xml")
bc.Settings["cluster"] = cluster
bc.Settings["eState"] = eState
bc.Settings["bState"] = bState
bc.Settings["ePlus"] = hc.CPlusMonitorVoltage * hc.CPlusMonitorScale
bc.Settings["eMinus"] = hc.CMinusMonitorVoltage * hc.CMinusMonitorScale
bc.GetModulationByName("B").Centre = (hc.BiasCurrent)/1000
bc.GetModulationByName("B").Step = abs(hc.FlipStepCurrent)/1000
bc.GetModulationByName("DB").Step = abs(hc.CalStepCurrent)/1000
print("V plus: " + str(hc.CPlusMonitorVoltage * hc.CPlusMonitorScale))
print("V minus: " + str(hc.CMinusMonitorVoltage * hc.CMinusMonitorScale))
print("Bias: " + str(hc.BiasCurrent))
print("B step: " + str(abs(hc.FlipStepCurrent)))
print("DB step: " + str(abs(hc.CalStepCurrent)))
print("Setting rf parameters ...")
bc.GetModulationByName("RF1A").Centre = hc.RF1AttCentre
bc.GetModulationByName("RF1A").Step = hc.RF1AttStep
bc.GetModulationByName("RF2A").Centre = hc.RF2AttCentre
bc.GetModulationByName("RF2A").Step = hc.RF2AttStep
bc.GetModulationByName("RF1F").Centre = hc.RF1FMCentre
bc.GetModulationByName("RF1F").Step = hc.RF1FMStep
bc.GetModulationByName("RF2F").Centre = hc.RF2FMCentre
bc.GetModulationByName("RF2F").Step = hc.RF2FMStep
print("Storing E switch parameters ...")
bc.Settings["eRampDownTime"] = hc.ERampDownTime
bc.Settings["eRampDownDelay"] = hc.ERampDownDelay
bc.Settings["eBleedTime"] = hc.EBleedTime
bc.Settings["eSwitchTime"] = hc.ESwitchTime
bc.Settings["eRampUpTime"] = hc.ERampUpTime
bc.Settings["eRampUpDelay"] = hc.ERampUpDelay
# this is for legacy analysis compatibility
bc.Settings["eDischargeTime"] = hc.ERampDownTime + hc.ERampDownDelay
bc.Settings["eChargeTime"] = hc.ERampUpTime + hc.ERampUpDelay
# loop and take data
bh.StartPattern()
blockIndex = 0
if nullRun:
maxBlockIndex = 10000
else:
maxBlockIndex = 60
while blockIndex < maxBlockIndex:
print("Acquiring block " + str(blockIndex) + " ...")
# save the block config and load into blockhead
print("Saving temp config.")
bc.Settings["clusterIndex"] = blockIndex
tempConfigFile ='%(p)stemp%(c)s_%(i)s.xml' % {'p': settingsPath, 'c': cluster, 'i': blockIndex}
saveBlockConfig(tempConfigFile, bc)
System.Threading.Thread.Sleep(500)
print("Loading temp config.")
bh.LoadConfig(tempConfigFile)
# take the block and save it
print("Running ...")
bh.AcquireAndWait()
print("Done.")
blockPath = '%(p)s%(c)s_%(i)s.zip' % {'p': dataPath, 'c': cluster, 'i': blockIndex}
bh.SaveBlock(blockPath)
print("Saved block "+ str(blockIndex) + ".")
# give mma a chance to analyse the block
print("Notifying Mathematica and waiting ...")
writeLatestBlockNotificationFile(cluster, blockIndex)
System.Threading.Thread.Sleep(5000)
print("Done.")
# increment and loop
File.Delete(tempConfigFile)
# if not nullRun:
checkYAGAndFix()
blockIndex = blockIndex + 1
bh.StopPattern()
def EDMGoNull():
EDMGoReal(True)
def EDMGo():
EDMGoReal(False)
def run_script():
EDMGo()
| 4,345
| 0
| 208
|
667fb47b49bdff6addfc9ed051899adc1ecd6337
| 1,558
|
py
|
Python
|
parent/venv/message_box.py
|
lugidm/FusariumUNet
|
853dc39848b2570a73504e1db57e3ccd26764573
|
[
"Unlicense"
] | null | null | null |
parent/venv/message_box.py
|
lugidm/FusariumUNet
|
853dc39848b2570a73504e1db57e3ccd26764573
|
[
"Unlicense"
] | null | null | null |
parent/venv/message_box.py
|
lugidm/FusariumUNet
|
853dc39848b2570a73504e1db57e3ccd26764573
|
[
"Unlicense"
] | null | null | null |
import tkinter
| 30.54902
| 109
| 0.610398
|
import tkinter
class Mbox(object):
def __init__(self, root, msg):
tki = tkinter
self.top = root
self.top.attributes("-topmost", True)
self.return_value = None
frm = tki.Frame(self.top, borderwidth=4, relief='ridge')
frm.pack(fill='both', expand=True)
label = tki.Label(frm, text=msg)
label.pack(padx=4, pady=4)
fuse_bt = tki.Button(frm, text='fuse both annotations', command=self.fuse, fg='green')
fuse_bt.pack()
new_file_bt = tki.Button(frm, text='create a new file', command=self.new_file, fg='green')
new_file_bt.pack()
overwrite_bt = tki.Button(frm, text='overwrite old annotations', command=self.overwrite, fg='orange')
overwrite_bt.pack()
discard_bt = tki.Button(frm, text='discard all new changes', command=self.discard, fg='red')
discard_bt.pack()
b_cancel = tki.Button(frm, text='Cancel', command=self.cancel)
b_cancel.pack(padx=4, pady=4)
def fuse(self):
self.top.destroy()
self.return_value = 'fuse'
def new_file(self):
self.top.destroy()
self.return_value = 'new_file'
def overwrite(self):
self.top.destroy()
self.return_value = 'overwrite'
def discard(self):
self.top.destroy()
self.return_value = 'discard'
def cancel(self):
self.top.destroy()
self.return_value = 'cancel'
def show(self):
self.top.deiconify()
self.top.wait_window()
return self.return_value
| 1,333
| -2
| 211
|
6a698ccac6fd0c7f76e5a8a4bb85aeca85cff63e
| 22,145
|
py
|
Python
|
mycroft/stt/__init__.py
|
NeonDaniel/HolmesV
|
ecf839156758d98d020f6272e4727aad60278004
|
[
"Apache-2.0"
] | 9
|
2021-05-06T18:04:18.000Z
|
2022-02-23T21:59:49.000Z
|
mycroft/stt/__init__.py
|
NeonDaniel/HolmesV
|
ecf839156758d98d020f6272e4727aad60278004
|
[
"Apache-2.0"
] | 5
|
2021-04-13T22:54:47.000Z
|
2021-04-18T14:24:25.000Z
|
mycroft/stt/__init__.py
|
HelloChatterbox/mycroft-lib
|
c417807d32eff629354ffe740da14caed2a1bee5
|
[
"Apache-2.0"
] | 4
|
2021-09-11T04:02:10.000Z
|
2021-12-18T23:57:19.000Z
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import json
from abc import ABCMeta, abstractmethod
from requests import post, put, exceptions
from speech_recognition import Recognizer
from queue import Queue
from threading import Thread
from mycroft.api import STTApi, HTTPError
from mycroft.configuration import Configuration
from mycroft.util.log import LOG
from mycroft.util.plugins import load_plugin
class STT(metaclass=ABCMeta):
"""STT Base class, all STT backends derive from this one. """
@staticmethod
def init_language(config_core):
"""Helper method to get language code from Mycroft config."""
lang = config_core.get("lang", "en-US")
langs = lang.split("-")
if len(langs) == 2:
return langs[0].lower() + "-" + langs[1].upper()
return lang
@abstractmethod
def execute(self, audio, language=None):
"""Implementation of STT functionallity.
This method needs to be implemented by the derived class to implement
the specific STT engine connection.
The method gets passed audio and optionally a language code and is
expected to return a text string.
Args:
audio (AudioData): audio recorded by mycroft.
language (str): optional language code
Returns:
str: parsed text
"""
class IBMSTT(TokenSTT):
"""
IBM Speech to Text
Enables IBM Speech to Text access using API key. To use IBM as a
service provider, it must be configured locally in your config file. An
IBM Cloud account with Speech to Text enabled is required (limited free
tier may be available). STT config should match the following format:
"stt": {
"module": "ibm",
"ibm": {
"credential": {
"token": "YOUR_API_KEY"
},
"url": "URL_FROM_SERVICE"
}
}
"""
class YandexSTT(STT):
"""
Yandex SpeechKit STT
To use create service account with role 'editor' in your cloud folder,
create API key for account and add it to local mycroft.conf file.
The STT config will look like this:
"stt": {
"module": "yandex",
"yandex": {
"lang": "en-US",
"credential": {
"api_key": "YOUR_API_KEY"
}
}
}
"""
def requires_pairing(func):
"""Decorator kicking of pairing sequence if client is not allowed access.
Checks the http status of the response if an HTTP error is recieved. If
a 401 status is detected returns "pair my device" to trigger the pairing
skill.
"""
return wrapper
class MycroftSTT(STT):
"""Default mycroft STT."""
@requires_pairing
class MycroftDeepSpeechSTT(STT):
"""Mycroft Hosted DeepSpeech"""
@requires_pairing
class DeepSpeechServerSTT(STT):
"""
STT interface for the deepspeech-server:
https://github.com/MainRo/deepspeech-server
use this if you want to host DeepSpeech yourself
"""
class StreamThread(Thread, metaclass=ABCMeta):
"""ABC class to be used with StreamingSTT class implementations.
This class reads audio chunks from a queue and sends it to a parsing
STT engine.
Args:
queue (Queue): Input Queue
language (str): language code for the current language.
"""
def _get_data(self):
"""Generator reading audio data from queue."""
while True:
d = self.queue.get()
if d is None:
break
yield d
self.queue.task_done()
def run(self):
"""Thread entry point."""
return self.handle_audio_stream(self._get_data(), self.language)
@abstractmethod
def handle_audio_stream(self, audio, language):
"""Handling of audio stream.
Needs to be implemented by derived class to process audio data and
optionally update `self.text` with the current hypothesis.
Argumens:
audio (bytes): raw audio data.
language (str): language code for the current session.
"""
class StreamingSTT(STT, metaclass=ABCMeta):
"""ABC class for threaded streaming STT implemenations."""
def stream_start(self, language=None):
"""Indicate start of new audio stream.
This creates a new thread for handling the incomming audio stream as
it's collected by Mycroft.
Args:
language (str): optional language code for the new stream.
"""
self.stream_stop()
language = language or self.lang
self.queue = Queue()
self.stream = self.create_streaming_thread()
self.stream.start()
def stream_data(self, data):
"""Receiver of audio data.
Args:
data (bytes): raw audio data.
"""
self.queue.put(data)
def stream_stop(self):
"""Indicate that the audio stream has ended.
This will tear down the processing thread and collect the result
Returns:
str: parsed text
"""
if self.stream is not None:
self.queue.put(None)
self.stream.join()
text = self.stream.text
self.stream = None
self.queue = None
return text
return None
def execute(self, audio, language=None):
"""End the parsing thread and collect data."""
return self.stream_stop()
@abstractmethod
def create_streaming_thread(self):
"""Create thread for parsing audio chunks.
This method should be implemented by the derived class to return an
instance derived from StreamThread to handle the audio stream and
send it to the STT engine.
Returns:
StreamThread: Thread to handle audio data.
"""
class DeepSpeechStreamServerSTT(StreamingSTT):
"""
Streaming STT interface for the deepspeech-server:
https://github.com/JPEWdev/deep-dregs
use this if you want to host DeepSpeech yourself
STT config will look like this:
"stt": {
"module": "deepspeech_stream_server",
"deepspeech_stream_server": {
"stream_uri": "http://localhost:8080/stt?format=16K_PCM16"
...
"""
class GoogleCloudStreamingSTT(StreamingSTT):
"""
Streaming STT interface for Google Cloud Speech-To-Text
To use pip install google-cloud-speech and add the
Google API key to local mycroft.conf file. The STT config
will look like this:
"stt": {
"module": "google_cloud_streaming",
"google_cloud_streaming": {
"credential": {
"json": {
# Paste Google API JSON here
...
"""
def load_stt_plugin(module_name):
"""Wrapper function for loading stt plugin.
Args:
module_name (str): Mycroft stt module name from config
Returns:
class: STT plugin class
"""
return load_plugin('mycroft.plugin.stt', module_name)
| 33.757622
| 79
| 0.603116
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import json
from abc import ABCMeta, abstractmethod
from requests import post, put, exceptions
from speech_recognition import Recognizer
from queue import Queue
from threading import Thread
from mycroft.api import STTApi, HTTPError
from mycroft.configuration import Configuration
from mycroft.util.log import LOG
from mycroft.util.plugins import load_plugin
class STT(metaclass=ABCMeta):
"""STT Base class, all STT backends derive from this one. """
def __init__(self):
config_core = Configuration.get()
self.lang = str(self.init_language(config_core))
config_stt = config_core.get("stt", {})
self.config = config_stt.get(config_stt.get("module"), {})
self.credential = self.config.get("credential", {})
self.recognizer = Recognizer()
self.can_stream = False
@staticmethod
def init_language(config_core):
"""Helper method to get language code from Mycroft config."""
lang = config_core.get("lang", "en-US")
langs = lang.split("-")
if len(langs) == 2:
return langs[0].lower() + "-" + langs[1].upper()
return lang
@abstractmethod
def execute(self, audio, language=None):
"""Implementation of STT functionallity.
This method needs to be implemented by the derived class to implement
the specific STT engine connection.
The method gets passed audio and optionally a language code and is
expected to return a text string.
Args:
audio (AudioData): audio recorded by mycroft.
language (str): optional language code
Returns:
str: parsed text
"""
class TokenSTT(STT, metaclass=ABCMeta):
def __init__(self):
super(TokenSTT, self).__init__()
self.token = self.credential.get("token")
class GoogleJsonSTT(STT, metaclass=ABCMeta):
def __init__(self):
super(GoogleJsonSTT, self).__init__()
self.json_credentials = json.dumps(self.credential.get("json"))
class BasicSTT(STT, metaclass=ABCMeta):
def __init__(self):
super(BasicSTT, self).__init__()
self.username = self.credential.get("username")
self.password = self.credential.get("password")
class KeySTT(STT, metaclass=ABCMeta):
def __init__(self):
super(KeySTT, self).__init__()
self.id = self.credential.get("client_id")
self.key = self.credential.get("client_key")
class GoogleSTT(TokenSTT):
def __init__(self):
super(GoogleSTT, self).__init__()
def execute(self, audio, language=None):
self.lang = language or self.lang
return self.recognizer.recognize_google(audio, self.token, self.lang)
class GoogleCloudSTT(GoogleJsonSTT):
def __init__(self):
super(GoogleCloudSTT, self).__init__()
# override language with module specific language selection
self.lang = self.config.get('lang') or self.lang
def execute(self, audio, language=None):
self.lang = language or self.lang
return self.recognizer.recognize_google_cloud(audio,
self.json_credentials,
self.lang)
class WITSTT(TokenSTT):
def __init__(self):
super(WITSTT, self).__init__()
def execute(self, audio, language=None):
LOG.warning("WITSTT language should be configured at wit.ai settings.")
return self.recognizer.recognize_wit(audio, self.token)
class IBMSTT(TokenSTT):
"""
IBM Speech to Text
Enables IBM Speech to Text access using API key. To use IBM as a
service provider, it must be configured locally in your config file. An
IBM Cloud account with Speech to Text enabled is required (limited free
tier may be available). STT config should match the following format:
"stt": {
"module": "ibm",
"ibm": {
"credential": {
"token": "YOUR_API_KEY"
},
"url": "URL_FROM_SERVICE"
}
}
"""
def __init__(self):
super(IBMSTT, self).__init__()
def execute(self, audio, language=None):
if not self.token:
raise ValueError('API key (token) for IBM Cloud is not defined.')
url_base = self.config.get('url', '')
if not url_base:
raise ValueError('URL for IBM Cloud is not defined.')
url = url_base + '/v1/recognize'
self.lang = language or self.lang
supported_languages = [
'ar-AR', 'pt-BR', 'zh-CN', 'nl-NL', 'en-GB', 'en-US', 'fr-FR',
'de-DE', 'it-IT', 'ja-JP', 'ko-KR', 'es-AR', 'es-ES', 'es-CL',
'es-CO', 'es-MX', 'es-PE'
]
if self.lang not in supported_languages:
raise ValueError(
'Unsupported language "{}" for IBM STT.'.format(self.lang))
audio_model = 'BroadbandModel'
if audio.sample_rate < 16000 and not self.lang == 'ar-AR':
audio_model = 'NarrowbandModel'
params = {
'model': '{}_{}'.format(self.lang, audio_model),
'profanity_filter': 'false'
}
headers = {
'Content-Type': 'audio/x-flac',
'X-Watson-Learning-Opt-Out': 'true'
}
response = post(url, auth=('apikey', self.token), headers=headers,
data=audio.get_flac_data(), params=params)
if response.status_code == 200:
result = json.loads(response.text)
if result.get('error_code') is None:
if ('results' not in result or len(result['results']) < 1 or
'alternatives' not in result['results'][0]):
raise Exception(
'Transcription failed. Invalid or empty results.')
transcription = []
for utterance in result['results']:
if 'alternatives' not in utterance:
raise Exception(
'Transcription failed. Invalid or empty results.')
for hypothesis in utterance['alternatives']:
if 'transcript' in hypothesis:
transcription.append(hypothesis['transcript'])
return '\n'.join(transcription)
elif response.status_code == 401: # Unauthorized
raise Exception('Invalid API key for IBM Cloud.')
else:
raise Exception(
'Request to IBM Cloud failed. Code: {} Body: {}'.format(
response.status_code, response.text))
class YandexSTT(STT):
"""
Yandex SpeechKit STT
To use create service account with role 'editor' in your cloud folder,
create API key for account and add it to local mycroft.conf file.
The STT config will look like this:
"stt": {
"module": "yandex",
"yandex": {
"lang": "en-US",
"credential": {
"api_key": "YOUR_API_KEY"
}
}
}
"""
def __init__(self):
super(YandexSTT, self).__init__()
self.lang = self.config.get('lang') or self.lang
self.api_key = self.credential.get("api_key")
if self.api_key is None:
raise ValueError("API key for Yandex STT is not defined")
def execute(self, audio, language=None):
self.lang = language or self.lang
if self.lang not in ["en-US", "ru-RU", "tr-TR"]:
raise ValueError(
"Unsupported language '{}' for Yandex STT".format(self.lang))
# Select sample rate based on source sample rate
# and supported sample rate list
supported_sample_rates = [8000, 16000, 48000]
sample_rate = audio.sample_rate
if sample_rate not in supported_sample_rates:
for supported_sample_rate in supported_sample_rates:
if audio.sample_rate < supported_sample_rate:
sample_rate = supported_sample_rate
break
if sample_rate not in supported_sample_rates:
sample_rate = supported_sample_rates[-1]
raw_data = audio.get_raw_data(convert_rate=sample_rate,
convert_width=2)
# Based on https://cloud.yandex.com/docs/speechkit/stt#request
url = "https://stt.api.cloud.yandex.net/speech/v1/stt:recognize"
headers = {"Authorization": "Api-Key {}".format(self.api_key)}
params = "&".join([
"lang={}".format(self.lang),
"format=lpcm",
"sampleRateHertz={}".format(sample_rate)
])
response = post(url + "?" + params, headers=headers, data=raw_data)
if response.status_code == 200:
result = json.loads(response.text)
if result.get("error_code") is None:
return result.get("result")
elif response.status_code == 401: # Unauthorized
raise Exception("Invalid API key for Yandex STT")
else:
raise Exception(
"Request to Yandex STT failed: code: {}, body: {}".format(
response.status_code, response.text))
def requires_pairing(func):
"""Decorator kicking of pairing sequence if client is not allowed access.
Checks the http status of the response if an HTTP error is recieved. If
a 401 status is detected returns "pair my device" to trigger the pairing
skill.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as e:
if e.response.status_code == 401:
LOG.warning('Access Denied at mycroft.ai')
# phrase to start the pairing process
return 'pair my device'
else:
raise
return wrapper
class MycroftSTT(STT):
"""Default mycroft STT."""
def __init__(self):
super(MycroftSTT, self).__init__()
self.api = STTApi("stt")
@requires_pairing
def execute(self, audio, language=None):
self.lang = language or self.lang
try:
return self.api.stt(audio.get_flac_data(convert_rate=16000),
self.lang, 1)[0]
except Exception:
return self.api.stt(audio.get_flac_data(), self.lang, 1)[0]
class MycroftDeepSpeechSTT(STT):
"""Mycroft Hosted DeepSpeech"""
def __init__(self):
super(MycroftDeepSpeechSTT, self).__init__()
self.api = STTApi("deepspeech")
@requires_pairing
def execute(self, audio, language=None):
language = language or self.lang
if not language.startswith("en"):
raise ValueError("Deepspeech is currently english only")
return self.api.stt(audio.get_wav_data(), self.lang, 1)
class DeepSpeechServerSTT(STT):
"""
STT interface for the deepspeech-server:
https://github.com/MainRo/deepspeech-server
use this if you want to host DeepSpeech yourself
"""
def __init__(self):
super(DeepSpeechServerSTT, self).__init__()
def execute(self, audio, language=None):
language = language or self.lang
response = post(self.config.get("uri"), data=audio.get_wav_data())
return response.text
class StreamThread(Thread, metaclass=ABCMeta):
"""ABC class to be used with StreamingSTT class implementations.
This class reads audio chunks from a queue and sends it to a parsing
STT engine.
Args:
queue (Queue): Input Queue
language (str): language code for the current language.
"""
def __init__(self, queue, language):
super().__init__()
self.language = language
self.queue = queue
self.text = None
def _get_data(self):
"""Generator reading audio data from queue."""
while True:
d = self.queue.get()
if d is None:
break
yield d
self.queue.task_done()
def run(self):
"""Thread entry point."""
return self.handle_audio_stream(self._get_data(), self.language)
@abstractmethod
def handle_audio_stream(self, audio, language):
"""Handling of audio stream.
Needs to be implemented by derived class to process audio data and
optionally update `self.text` with the current hypothesis.
Argumens:
audio (bytes): raw audio data.
language (str): language code for the current session.
"""
class StreamingSTT(STT, metaclass=ABCMeta):
"""ABC class for threaded streaming STT implemenations."""
def __init__(self):
super().__init__()
self.stream = None
self.can_stream = True
def stream_start(self, language=None):
"""Indicate start of new audio stream.
This creates a new thread for handling the incomming audio stream as
it's collected by Mycroft.
Args:
language (str): optional language code for the new stream.
"""
self.stream_stop()
language = language or self.lang
self.queue = Queue()
self.stream = self.create_streaming_thread()
self.stream.start()
def stream_data(self, data):
"""Receiver of audio data.
Args:
data (bytes): raw audio data.
"""
self.queue.put(data)
def stream_stop(self):
"""Indicate that the audio stream has ended.
This will tear down the processing thread and collect the result
Returns:
str: parsed text
"""
if self.stream is not None:
self.queue.put(None)
self.stream.join()
text = self.stream.text
self.stream = None
self.queue = None
return text
return None
def execute(self, audio, language=None):
"""End the parsing thread and collect data."""
return self.stream_stop()
@abstractmethod
def create_streaming_thread(self):
"""Create thread for parsing audio chunks.
This method should be implemented by the derived class to return an
instance derived from StreamThread to handle the audio stream and
send it to the STT engine.
Returns:
StreamThread: Thread to handle audio data.
"""
class DeepSpeechStreamThread(StreamThread):
def __init__(self, queue, language, url):
if not language.startswith("en"):
raise ValueError("Deepspeech is currently english only")
super().__init__(queue, language)
self.url = url
def handle_audio_stream(self, audio, language):
self.response = post(self.url, data=audio, stream=True)
self.text = self.response.text if self.response else None
return self.text
class DeepSpeechStreamServerSTT(StreamingSTT):
"""
Streaming STT interface for the deepspeech-server:
https://github.com/JPEWdev/deep-dregs
use this if you want to host DeepSpeech yourself
STT config will look like this:
"stt": {
"module": "deepspeech_stream_server",
"deepspeech_stream_server": {
"stream_uri": "http://localhost:8080/stt?format=16K_PCM16"
...
"""
def create_streaming_thread(self):
self.queue = Queue()
return DeepSpeechStreamThread(
self.queue,
self.lang,
self.config.get('stream_uri')
)
class GoogleStreamThread(StreamThread):
def __init__(self, queue, lang, client, streaming_config):
super().__init__(queue, lang)
self.client = client
self.streaming_config = streaming_config
def handle_audio_stream(self, audio, language):
req = (types.StreamingRecognizeRequest(audio_content=x) for x in audio)
responses = self.client.streaming_recognize(self.streaming_config, req)
for res in responses:
if res.results and res.results[0].is_final:
self.text = res.results[0].alternatives[0].transcript
return self.text
class GoogleCloudStreamingSTT(StreamingSTT):
"""
Streaming STT interface for Google Cloud Speech-To-Text
To use pip install google-cloud-speech and add the
Google API key to local mycroft.conf file. The STT config
will look like this:
"stt": {
"module": "google_cloud_streaming",
"google_cloud_streaming": {
"credential": {
"json": {
# Paste Google API JSON here
...
"""
def __init__(self):
global SpeechClient, types, enums, Credentials
from google.cloud.speech import SpeechClient, types, enums
from google.oauth2.service_account import Credentials
super(GoogleCloudStreamingSTT, self).__init__()
# override language with module specific language selection
self.language = self.config.get('lang') or self.lang
credentials = Credentials.from_service_account_info(
self.credential.get('json')
)
self.client = SpeechClient(credentials=credentials)
recognition_config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=16000,
language_code=self.language,
model='command_and_search',
max_alternatives=1,
)
self.streaming_config = types.StreamingRecognitionConfig(
config=recognition_config,
interim_results=True,
single_utterance=True,
)
def create_streaming_thread(self):
self.queue = Queue()
return GoogleStreamThread(
self.queue,
self.language,
self.client,
self.streaming_config
)
class KaldiSTT(STT):
def __init__(self):
super(KaldiSTT, self).__init__()
def execute(self, audio, language=None):
language = language or self.lang
response = post(self.config.get("uri"), data=audio.get_wav_data())
return self.get_response(response)
def get_response(self, response):
try:
hypotheses = response.json()["hypotheses"]
return re.sub(r'\s*\[noise\]\s*', '', hypotheses[0]["utterance"])
except Exception:
return None
class BingSTT(TokenSTT):
def __init__(self):
super(BingSTT, self).__init__()
def execute(self, audio, language=None):
self.lang = language or self.lang
return self.recognizer.recognize_bing(audio, self.token,
self.lang)
class HoundifySTT(KeySTT):
def __init__(self):
super(HoundifySTT, self).__init__()
def execute(self, audio, language=None):
self.lang = language or self.lang
return self.recognizer.recognize_houndify(audio, self.id, self.key)
class GoVivaceSTT(TokenSTT):
def __init__(self):
super(GoVivaceSTT, self).__init__()
self.default_uri = "https://services.govivace.com:49149/telephony"
if not self.lang.startswith("en") and not self.lang.startswith("es"):
LOG.error("GoVivace STT only supports english and spanish")
raise NotImplementedError
def execute(self, audio, language=None):
url = self.config.get("uri", self.default_uri) + "?key=" + \
self.token + "&action=find&format=8K_PCM16&validation_string="
response = put(url,
data=audio.get_wav_data(convert_rate=8000))
return self.get_response(response)
def get_response(self, response):
return response.json()["result"]["hypotheses"][0]["transcript"]
def load_stt_plugin(module_name):
"""Wrapper function for loading stt plugin.
Args:
module_name (str): Mycroft stt module name from config
Returns:
class: STT plugin class
"""
return load_plugin('mycroft.plugin.stt', module_name)
class STTFactory:
CLASSES = {
"mycroft": MycroftSTT,
"google": GoogleSTT,
"google_cloud": GoogleCloudSTT,
"google_cloud_streaming": GoogleCloudStreamingSTT,
"wit": WITSTT,
"ibm": IBMSTT,
"kaldi": KaldiSTT,
"bing": BingSTT,
"govivace": GoVivaceSTT,
"houndify": HoundifySTT,
"deepspeech_server": DeepSpeechServerSTT,
"deepspeech_stream_server": DeepSpeechStreamServerSTT,
"mycroft_deepspeech": MycroftDeepSpeechSTT,
"yandex": YandexSTT
}
@staticmethod
def create():
try:
config = Configuration.get().get("stt", {})
module = config.get("module", "mycroft")
if module in STTFactory.CLASSES:
clazz = STTFactory.CLASSES[module]
else:
clazz = load_stt_plugin(module)
LOG.info('Loaded the STT plugin {}'.format(module))
return clazz()
except Exception:
# The STT backend failed to start. Report it and fall back to
# default.
LOG.exception('The selected STT backend could not be loaded, '
'falling back to default...')
if module != 'mycroft':
return MycroftSTT()
else:
raise
| 12,249
| 730
| 1,407
|
6af4692a61fa738f40f505d58ac7cde5a59bfad9
| 1,698
|
py
|
Python
|
sdk/ingestion/microsoft/bing/commerce/ingestion/models/transformation_tryout_response_py3.py
|
microsoft/bing-commerce-sdk-for-python
|
cf555ea0bb14792708617d2435dd5aab1c4cbe90
|
[
"MIT"
] | 1
|
2020-05-04T09:58:07.000Z
|
2020-05-04T09:58:07.000Z
|
sdk/ingestion/microsoft/bing/commerce/ingestion/models/transformation_tryout_response_py3.py
|
microsoft/bing-commerce-sdk-for-python
|
cf555ea0bb14792708617d2435dd5aab1c4cbe90
|
[
"MIT"
] | 2
|
2020-04-21T02:32:32.000Z
|
2020-04-21T19:37:54.000Z
|
sdk/ingestion/microsoft/bing/commerce/ingestion/models/transformation_tryout_response_py3.py
|
microsoft/bing-commerce-sdk-for-python
|
cf555ea0bb14792708617d2435dd5aab1c4cbe90
|
[
"MIT"
] | 4
|
2020-07-31T10:39:22.000Z
|
2021-11-10T08:14:07.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TransformationTryoutResponse(Model):
"""A response representing the status of a transformation config tryout
requset.
:param status: The status of the transformation config tryout.
:type status: str
:param error_message: Any error messages that happened while transforming,
if any.
:type error_message: str
:param results: A list of records that the transformation config outputed.
:type results: list[str]
:param console_output: The console output of the transformation config, if
any. Can be useful for debugging.
:type console_output: list[str]
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'results': {'key': 'results', 'type': '[str]'},
'console_output': {'key': 'consoleOutput', 'type': '[str]'},
}
| 39.488372
| 123
| 0.604829
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TransformationTryoutResponse(Model):
"""A response representing the status of a transformation config tryout
requset.
:param status: The status of the transformation config tryout.
:type status: str
:param error_message: Any error messages that happened while transforming,
if any.
:type error_message: str
:param results: A list of records that the transformation config outputed.
:type results: list[str]
:param console_output: The console output of the transformation config, if
any. Can be useful for debugging.
:type console_output: list[str]
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'results': {'key': 'results', 'type': '[str]'},
'console_output': {'key': 'consoleOutput', 'type': '[str]'},
}
def __init__(self, *, status: str=None, error_message: str=None, results=None, console_output=None, **kwargs) -> None:
super(TransformationTryoutResponse, self).__init__(**kwargs)
self.status = status
self.error_message = error_message
self.results = results
self.console_output = console_output
| 319
| 0
| 29
|
c94779c915148b6ad1bdd49ed67c0ea57c1425eb
| 789
|
py
|
Python
|
rucken_todo/serializers/TodoProjectSerializer.py
|
site15/rucken-todo-django
|
bf30d4ad43be22bd8383447e07b151d6dc99da72
|
[
"MIT"
] | 3
|
2018-06-04T07:36:59.000Z
|
2019-10-07T05:33:56.000Z
|
rucken_todo/serializers/TodoProjectSerializer.py
|
site15/rucken-todo-django-example
|
bf30d4ad43be22bd8383447e07b151d6dc99da72
|
[
"MIT"
] | 428
|
2017-11-24T20:19:39.000Z
|
2022-03-26T04:13:25.000Z
|
rucken_todo/serializers/TodoProjectSerializer.py
|
rucken/todo-django
|
bf30d4ad43be22bd8383447e07b151d6dc99da72
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from dynamic_rest.serializers import (
DynamicModelSerializer,
DynamicRelationField
)
from ..models import TodoProject
from .ShortUserSerializer import ShortUserSerializer
from .ShortTodoStatusSerializer import ShortTodoStatusSerializer
| 35.863636
| 109
| 0.735108
|
from __future__ import unicode_literals
from dynamic_rest.serializers import (
DynamicModelSerializer,
DynamicRelationField
)
from ..models import TodoProject
from .ShortUserSerializer import ShortUserSerializer
from .ShortTodoStatusSerializer import ShortTodoStatusSerializer
class TodoProjectSerializer(DynamicModelSerializer):
users = DynamicRelationField(ShortUserSerializer, many=True, embed=True)
statuses = DynamicRelationField(ShortTodoStatusSerializer, source='todostatus_set', many=True,
embed=True)
class Meta:
model = TodoProject
fields = ('id', 'title', 'description', 'is_public', 'users', 'statuses', 'created_at', 'updated_at')
read_only_fields = ('created_at', 'updated_at', 'statuses')
| 0
| 478
| 23
|
fb3c596fe6612b15cdd72e0d834f7c922c8070d3
| 2,102
|
py
|
Python
|
src/detection/visualisation.py
|
wdoppenberg/crater-detection
|
471d1bc508dee873cc5d05329147dfc5314bc15d
|
[
"MIT"
] | 8
|
2021-06-09T15:07:16.000Z
|
2021-12-22T09:39:29.000Z
|
src/detection/visualisation.py
|
wdoppenberg/crater-detection
|
471d1bc508dee873cc5d05329147dfc5314bc15d
|
[
"MIT"
] | 3
|
2021-04-23T12:29:40.000Z
|
2021-06-10T11:01:05.000Z
|
src/detection/visualisation.py
|
wdoppenberg/crater-detection
|
471d1bc508dee873cc5d05329147dfc5314bc15d
|
[
"MIT"
] | null | null | null |
from typing import Union
import cv2
import numpy as np
import torch
from matplotlib import pyplot as plt, patches
| 37.535714
| 111
| 0.572312
|
from typing import Union
import cv2
import numpy as np
import torch
from matplotlib import pyplot as plt, patches
def draw_patches(
img: Union[np.ndarray, torch.Tensor],
bboxes: Union[np.ndarray, torch.Tensor],
labels: Union[np.ndarray, torch.Tensor],
scores: Union[np.ndarray, torch.Tensor],
masks: Union[np.ndarray, torch.Tensor] = None,
min_score: float = 0.,
ax=None,
return_fig: bool = False,
figsize=(10, 10)
):
img, bboxes, labels, scores, masks = map(lambda arr: arr.numpy() if isinstance(arr, torch.Tensor) else arr,
(img, bboxes, labels, scores, masks))
return_fig_check = False
if ax is None:
return_fig_check = True
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.imshow(img[0], cmap='gray')
for (xmin, ymin, xmax, ymax), s in zip(bboxes, scores):
if s < min_score: continue
cx, cy, w, h = (xmin + xmax) / 2, (ymin + ymax) / 2, xmax - xmin, ymax - ymin
patch = patches.Rectangle((cx - 0.5 * w, cy - 0.5 * h),
w, h, fill=False, color="r")
ax.add_patch(patch)
bbox_props = dict(boxstyle="round", fc="cyan", ec="0.5", alpha=0.5)
if w < 15:
ax.text(cx - 1.2 * w, cy, f"{s:.0%}", ha="center", va="center", size=8, bbox=bbox_props)
else:
ax.text(cx, cy, f"{s:.0%}", ha="center", va="center", size=8, bbox=bbox_props)
if return_fig and return_fig_check:
return fig
def draw_detections(df, shape=(256, 256)):
img_ellipses = np.zeros(shape)
for i, r in df.iterrows():
center_coordinates = (round(r['x_pix']), round(r['y_pix']))
axes_length = (round(r['a_pix']), round(r['b_pix']))
angle = round(r['angle_pix'])
img_ellipses = cv2.ellipse(img_ellipses, center_coordinates, axes_length,
angle, 0, 360, (255, 255, 255), 1)
img_ellipses = cv2.circle(img_ellipses, center_coordinates, 0, (255, 255, 255), 1)
return img_ellipses
| 1,940
| 0
| 46
|
e1e37ca4bd3f37516c74ca148f8f13300997a396
| 1,502
|
py
|
Python
|
quorapy/scraper.py
|
djunehor/quorapy
|
0496a362d7ecd0b2422288a26290a8503d562128
|
[
"MIT"
] | null | null | null |
quorapy/scraper.py
|
djunehor/quorapy
|
0496a362d7ecd0b2422288a26290a8503d562128
|
[
"MIT"
] | null | null | null |
quorapy/scraper.py
|
djunehor/quorapy
|
0496a362d7ecd0b2422288a26290a8503d562128
|
[
"MIT"
] | null | null | null |
"""Module for scraping"""
from bs4 import BeautifulSoup
import dateparser
import datetime
class Scraper:
"""Scraper class"""
def get_urls(self):
"""Scrapes posts on a page"""
all_urls = []
# infinite scroll
urls = self.soup.find_all("a", class_="SQnoC3ObvgnGjWt90zD9Z _2INHSNB8V5eaWp4P0rY_mE")
for url in urls:
all_urls.append(url['href'])
return all_urls
| 28.339623
| 94
| 0.561252
|
"""Module for scraping"""
from bs4 import BeautifulSoup
import dateparser
import datetime
class Scraper:
"""Scraper class"""
def __init__(self, html):
self.html = html
self.soup = BeautifulSoup(html, "lxml")
print(f"[Scraper] Retrieved page")
def get_urls(self):
"""Scrapes posts on a page"""
all_urls = []
# infinite scroll
urls = self.soup.find_all("a", class_="SQnoC3ObvgnGjWt90zD9Z _2INHSNB8V5eaWp4P0rY_mE")
for url in urls:
all_urls.append(url['href'])
return all_urls
def get_details(self):
data = {}
thread = self.soup.find_all('div', 'pagedlist_item')
user_div = thread.pop()
a = user_div.find('a', class_="user")
data['text'] = self.soup.find('span', class_='rendered_qtext').get_text()
data['user'] = {}
try:
data['user']['url'] = "https://quora.com"+a['href']
except:
pass
data['user']['name'] = a.get_text()
try:
text = self.soup.find('p', class_="log_action_bar").get_text()
split = text.split(' · ')
date_time = split.pop().rstrip()
data['datetime'] = str(dateparser.parse(date_time))
except:
data['datetime'] = str(datetime.datetime.now())
return data
def get_followers(self):
links = self.soup.find_all("span", class_='list_count')
span = links.pop()
return span.text
| 994
| 0
| 81
|
0d64ae13b623685600630d0fe9894fd2c441936e
| 890
|
py
|
Python
|
test/test_key.py
|
emartech/python-easy-crypto
|
ef09b42e43fb6649498bfb7b5ffbbf490a94d85d
|
[
"MIT"
] | 3
|
2019-11-03T18:26:35.000Z
|
2021-03-07T02:37:52.000Z
|
test/test_key.py
|
emartech/python-easy-crypto
|
ef09b42e43fb6649498bfb7b5ffbbf490a94d85d
|
[
"MIT"
] | 4
|
2019-06-05T01:48:19.000Z
|
2019-07-19T11:53:51.000Z
|
test/test_key.py
|
emartech/python-easy-crypto
|
ef09b42e43fb6649498bfb7b5ffbbf490a94d85d
|
[
"MIT"
] | 2
|
2019-07-11T08:59:03.000Z
|
2022-02-17T19:41:21.000Z
|
import unittest
import os
from easycrypto.key import Key
| 29.666667
| 59
| 0.652809
|
import unittest
import os
from easycrypto.key import Key
class KeyTest(unittest.TestCase):
def test_key_length_is_as_specified(self):
key, _ = Key.generate('pwd', 12)
self.assertEqual(len(key), 32)
def test_generate_uses_different_salt_every_time(self):
self.assertNotEqual(
Key.generate('weakpassword', 12),
Key.generate('weakpassword', 12)
)
def test_generate_with_salt_is_deterministic(self):
salt = os.urandom(12)
self.assertEqual(
Key.generate_with_salt('weakpassword', salt),
Key.generate_with_salt('weakpassword', salt)
)
def test_generate_with_salt_uses_password(self):
salt = os.urandom(12)
self.assertNotEqual(
Key.generate_with_salt('weakpassword', salt),
Key.generate_with_salt('anotherpassword', salt)
)
| 690
| 12
| 130
|
0320bc714098189f04667b3582a960eb0486b15d
| 5,128
|
py
|
Python
|
readinglistreader.py
|
treese/ReadingListReader
|
d9e56ec2225400994706ad9c2e9d4815fe4f509d
|
[
"MIT"
] | 62
|
2015-01-08T03:22:03.000Z
|
2021-12-22T00:46:30.000Z
|
readinglistreader.py
|
treese/ReadingListReader
|
d9e56ec2225400994706ad9c2e9d4815fe4f509d
|
[
"MIT"
] | 7
|
2015-02-16T14:12:02.000Z
|
2021-07-22T06:03:47.000Z
|
readinglistreader.py
|
treese/ReadingListReader
|
d9e56ec2225400994706ad9c2e9d4815fe4f509d
|
[
"MIT"
] | 16
|
2015-04-19T22:15:24.000Z
|
2021-09-14T13:08:57.000Z
|
#!/usr/bin/env python
import os
import argparse
import datetime
from readinglistlib import ReadingListReader
# Configure CLI
fields = ['title', 'url', 'preview', 'date', 'added', 'viewed', 'uuid', 'synckey', 'syncserverid']
ap = argparse.ArgumentParser(description='This script outputs the contents of your Safari Reading List, a queue of temporary bookmarks representing articles you intend to read. By default, it prints the title and url of unread articles in chronological order, beginning with the oldest bookmark. Default output is compliant with CSV conventions.')
ap.add_argument('--separator', action='store', default=',', metavar='SEP', help='Separates field values. Specify \'tab\' to use an actual tab character. Defaults to \',\'.')
ap.add_argument('--quote', action='store', default='"', help='Specify \'\' to suppress quoting. Defaults to \'"\'.')
ap.add_argument('--forcequotes', action='store_true', default=False, help="Quote all field values. By default, only quote empty fields or values containing SEP, QUOTE, or newlines.")
ap.add_argument('--fields', action='store', nargs='+', default=['title', 'url'], choices=fields, metavar='FIELD', help='Controls format of output record. Acceptable fields are title, url, preview, date, added, viewed, uuid, synckey, and syncserverid. Defaults to title and url. (Date is date article was originally bookmarked. If defined, added is date bookmark was synced via iCloud. If defined, viewed is date article was read.)')
ap.add_argument('--header', action='store_true', default=False, help='Output a header record containing field labels.')
ap.add_argument('--timestamp', action='store', default='%a %b %d %H:%M:%S %Y', metavar='FORMAT', help='Controls format of date, added, and viewed fields. Understands strftime directives. Defaults to \'%%a %%b %%d %%H:%%M:%%S %%Y\' (eg, \'' + datetime.datetime.now().strftime('%a %b %d %H:%M:%S %Y') + '\').')
ap.add_argument('--bookmarks', action='store_true', default=False, help='Output items in Netscape bookmarks file format. Overrides preceding tabular output options.')
ap.add_argument('--show', action='store', default='unread', choices=['unread', 'read', 'all'], metavar='FILTER', help='Control which items to output. Acceptable FILTER values are unread, read, or all. Defaults to unread.')
ap.add_argument('--sortfield', action='store', default='date', choices=fields, metavar='FIELD', help="Controls how output is sorted. Defaults to date.")
ap.add_argument('--sortorder', action='store', default='ascending', choices=['ascending', 'descending'], metavar='ORDER', help='May be ascending or descending. Defaults to ascending.')
ap.add_argument('--output', action='store', type=argparse.FileType('w'), default='-', help='Output file path. Defaults to stdout.')
ap.add_argument('--input', action='store', default=os.path.expanduser('~/Library/Safari/Bookmarks.plist'), help='Input file path. Assumed to be a Safari bookmarks file formatted as a binary property list. Defaults to ~/Library/Safari/Bookmarks.plist')
args = ap.parse_args()
# Reinterpretation of fiddly options
if 'tab' == args.separator:
args.separator = '\t'
# Input
if not os.path.exists(args.input):
raise SystemExit, "The input file does not exist: %s" % args.input
rlr = ReadingListReader(args.input)
bookmarks = rlr.read(
show = None if 'all' == args.show else args.show,
sortfield = args.sortfield,
ascending = True if 'ascending' == args.sortorder else False,
dateformat = args.timestamp)
if args.bookmarks:
# Netscape Bookmarks File formatted output
# eg http://msdn.microsoft.com/en-us/library/ie/aa753582(v=vs.85).aspx
print >> args.output, '<!DOCTYPE NETSCAPE-Bookmark-file-1>\n<HTML>\n<META HTTP-EQUIV="CONTENT-TYPE" CONTENT="text/html; charset=UTF-8">\n<Title>Bookmarks</Title>\n<H1>Bookmarks</H1>\n<DT><H3 FOLDED>Reading List Bookmarks</H3>\n<DL>'
for bookmark in bookmarks:
print >> args.output, ' <DT><A HREF="%s">%s</A>' % (bookmark['url'].encode('utf-8'), bookmark['title'].encode('utf-8'))
print >> args.output, '</DL>\n</HTML>'
else:
# CSV or custom tabular formatted output
# Accepts a value. Tests if it should be quoted and, if so, returns quoted
# value with any quote characters escaped via duplication.
# Quoting rules derived from:
# https://tools.ietf.org/html/rfc4180
# http://www.creativyst.com/Doc/Articles/CSV/CSV01.htm
# Accepts a list of values. Prints record with separators and, if required, quotes.
# Header record
if True == args.header:
output_record(args.fields)
for bookmark in bookmarks:
field_values = []
for field in args.fields:
field_value = bookmark[field]
field_values.append(field_value.encode('utf-8'))
output_record(field_values)
| 61.783133
| 433
| 0.719189
|
#!/usr/bin/env python
import os
import argparse
import datetime
from readinglistlib import ReadingListReader
# Configure CLI
fields = ['title', 'url', 'preview', 'date', 'added', 'viewed', 'uuid', 'synckey', 'syncserverid']
ap = argparse.ArgumentParser(description='This script outputs the contents of your Safari Reading List, a queue of temporary bookmarks representing articles you intend to read. By default, it prints the title and url of unread articles in chronological order, beginning with the oldest bookmark. Default output is compliant with CSV conventions.')
ap.add_argument('--separator', action='store', default=',', metavar='SEP', help='Separates field values. Specify \'tab\' to use an actual tab character. Defaults to \',\'.')
ap.add_argument('--quote', action='store', default='"', help='Specify \'\' to suppress quoting. Defaults to \'"\'.')
ap.add_argument('--forcequotes', action='store_true', default=False, help="Quote all field values. By default, only quote empty fields or values containing SEP, QUOTE, or newlines.")
ap.add_argument('--fields', action='store', nargs='+', default=['title', 'url'], choices=fields, metavar='FIELD', help='Controls format of output record. Acceptable fields are title, url, preview, date, added, viewed, uuid, synckey, and syncserverid. Defaults to title and url. (Date is date article was originally bookmarked. If defined, added is date bookmark was synced via iCloud. If defined, viewed is date article was read.)')
ap.add_argument('--header', action='store_true', default=False, help='Output a header record containing field labels.')
ap.add_argument('--timestamp', action='store', default='%a %b %d %H:%M:%S %Y', metavar='FORMAT', help='Controls format of date, added, and viewed fields. Understands strftime directives. Defaults to \'%%a %%b %%d %%H:%%M:%%S %%Y\' (eg, \'' + datetime.datetime.now().strftime('%a %b %d %H:%M:%S %Y') + '\').')
ap.add_argument('--bookmarks', action='store_true', default=False, help='Output items in Netscape bookmarks file format. Overrides preceding tabular output options.')
ap.add_argument('--show', action='store', default='unread', choices=['unread', 'read', 'all'], metavar='FILTER', help='Control which items to output. Acceptable FILTER values are unread, read, or all. Defaults to unread.')
ap.add_argument('--sortfield', action='store', default='date', choices=fields, metavar='FIELD', help="Controls how output is sorted. Defaults to date.")
ap.add_argument('--sortorder', action='store', default='ascending', choices=['ascending', 'descending'], metavar='ORDER', help='May be ascending or descending. Defaults to ascending.')
ap.add_argument('--output', action='store', type=argparse.FileType('w'), default='-', help='Output file path. Defaults to stdout.')
ap.add_argument('--input', action='store', default=os.path.expanduser('~/Library/Safari/Bookmarks.plist'), help='Input file path. Assumed to be a Safari bookmarks file formatted as a binary property list. Defaults to ~/Library/Safari/Bookmarks.plist')
args = ap.parse_args()
# Reinterpretation of fiddly options
if 'tab' == args.separator:
args.separator = '\t'
# Input
if not os.path.exists(args.input):
raise SystemExit, "The input file does not exist: %s" % args.input
rlr = ReadingListReader(args.input)
bookmarks = rlr.read(
show = None if 'all' == args.show else args.show,
sortfield = args.sortfield,
ascending = True if 'ascending' == args.sortorder else False,
dateformat = args.timestamp)
if args.bookmarks:
# Netscape Bookmarks File formatted output
# eg http://msdn.microsoft.com/en-us/library/ie/aa753582(v=vs.85).aspx
print >> args.output, '<!DOCTYPE NETSCAPE-Bookmark-file-1>\n<HTML>\n<META HTTP-EQUIV="CONTENT-TYPE" CONTENT="text/html; charset=UTF-8">\n<Title>Bookmarks</Title>\n<H1>Bookmarks</H1>\n<DT><H3 FOLDED>Reading List Bookmarks</H3>\n<DL>'
for bookmark in bookmarks:
print >> args.output, ' <DT><A HREF="%s">%s</A>' % (bookmark['url'].encode('utf-8'), bookmark['title'].encode('utf-8'))
print >> args.output, '</DL>\n</HTML>'
else:
# CSV or custom tabular formatted output
# Accepts a value. Tests if it should be quoted and, if so, returns quoted
# value with any quote characters escaped via duplication.
# Quoting rules derived from:
# https://tools.ietf.org/html/rfc4180
# http://www.creativyst.com/Doc/Articles/CSV/CSV01.htm
def quotify(value):
if (args.forcequotes or '' == value or -1 != value.find(args.separator) or -1 != value.find(args.quote) or -1 != value.find('\n')) and '' != args.quote:
return '%s%s%s' % (args.quote, value.replace(args.quote, '%s%s' % (args.quote, args.quote)), args.quote)
else:
return value
# Accepts a list of values. Prints record with separators and, if required, quotes.
def output_record(values):
print >> args.output, args.separator.join(map(quotify, values))
# Header record
if True == args.header:
output_record(args.fields)
for bookmark in bookmarks:
field_values = []
for field in args.fields:
field_value = bookmark[field]
field_values.append(field_value.encode('utf-8'))
output_record(field_values)
| 356
| 0
| 46
|
85890f25969655f8b43b5f8b0ea57d7e21d01c40
| 745
|
py
|
Python
|
exercicio_py/ex0004_conta_quantidade_caractere/main_v0.py
|
danielle8farias/Exercicios-Python-3
|
f2fe9b6ca63536df1d83fd10162cfc04de36b830
|
[
"MIT"
] | null | null | null |
exercicio_py/ex0004_conta_quantidade_caractere/main_v0.py
|
danielle8farias/Exercicios-Python-3
|
f2fe9b6ca63536df1d83fd10162cfc04de36b830
|
[
"MIT"
] | null | null | null |
exercicio_py/ex0004_conta_quantidade_caractere/main_v0.py
|
danielle8farias/Exercicios-Python-3
|
f2fe9b6ca63536df1d83fd10162cfc04de36b830
|
[
"MIT"
] | null | null | null |
########
# autora: danielle8farias@gmail.com
# repositório: https://github.com/danielle8farias
# Descrição: Usuário informa nome e sobrenome.
# O programa retorna quantas letras o nome completo possui (excluindo espaços)
# e quantas letras o primeiro nome possui.
########
nome_completo = input('Digite seu nome completo: ')
#count(' ') conta os espaços em branco
#len() retorna o tamanho da string
# retirando espaço entre os nomes
tamanho_completo = len(nome_completo) - nome_completo.count(' ')
print(f'Seu nome completo possui: {tamanho_completo} letras.')
#find() retorna a posição de um caractere
# nesse caso queremos encontrar o primeiro espaço
num = nome_completo.find(' ')
print(f'Seu primeiro nome possui: {num} letras.\n')
| 37.25
| 79
| 0.739597
|
########
# autora: danielle8farias@gmail.com
# repositório: https://github.com/danielle8farias
# Descrição: Usuário informa nome e sobrenome.
# O programa retorna quantas letras o nome completo possui (excluindo espaços)
# e quantas letras o primeiro nome possui.
########
nome_completo = input('Digite seu nome completo: ')
#count(' ') conta os espaços em branco
#len() retorna o tamanho da string
# retirando espaço entre os nomes
tamanho_completo = len(nome_completo) - nome_completo.count(' ')
print(f'Seu nome completo possui: {tamanho_completo} letras.')
#find() retorna a posição de um caractere
# nesse caso queremos encontrar o primeiro espaço
num = nome_completo.find(' ')
print(f'Seu primeiro nome possui: {num} letras.\n')
| 0
| 0
| 0
|
67ce0d92034f71c96a8e965173c68e6445d1be29
| 31
|
py
|
Python
|
techrachit.py
|
eastmest/Termux-Megapackage
|
08f610d25539cfdc5992893996aa4c89630c9a07
|
[
"MIT"
] | null | null | null |
techrachit.py
|
eastmest/Termux-Megapackage
|
08f610d25539cfdc5992893996aa4c89630c9a07
|
[
"MIT"
] | null | null | null |
techrachit.py
|
eastmest/Termux-Megapackage
|
08f610d25539cfdc5992893996aa4c89630c9a07
|
[
"MIT"
] | null | null | null |
print ("tech rachit are here")
| 15.5
| 30
| 0.709677
|
print ("tech rachit are here")
| 0
| 0
| 0
|
3fa62522c102c532d31c74e82903732fc572abac
| 390
|
py
|
Python
|
feedback/urls.py
|
asb29/Redundant
|
ee816fd41f9217610bd11f757cf9175288723c70
|
[
"MIT"
] | null | null | null |
feedback/urls.py
|
asb29/Redundant
|
ee816fd41f9217610bd11f757cf9175288723c70
|
[
"MIT"
] | null | null | null |
feedback/urls.py
|
asb29/Redundant
|
ee816fd41f9217610bd11f757cf9175288723c70
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from . import views
app_name = 'feedback'
urlpatterns = [
url(r'^article/(?P<slug>[-\w]+)/comment$',
views.comment,
name='comment'),
url(r'^reviewcomment/(?P<comment_id>[0-9]+)/$',
views.reviewcomment,
name='review-comment'),
url(r'^article/(?P<slug>[-\w]+)/like$',
views.like,
name='like'),
]
| 21.666667
| 51
| 0.561538
|
from django.conf.urls import url
from . import views
app_name = 'feedback'
urlpatterns = [
url(r'^article/(?P<slug>[-\w]+)/comment$',
views.comment,
name='comment'),
url(r'^reviewcomment/(?P<comment_id>[0-9]+)/$',
views.reviewcomment,
name='review-comment'),
url(r'^article/(?P<slug>[-\w]+)/like$',
views.like,
name='like'),
]
| 0
| 0
| 0
|
2fc23c16f3cf180ba3fd5c3369c888fc32045d14
| 27,933
|
py
|
Python
|
pref/webapp/views/property.py
|
ahampt/Pref
|
6a6b44c751da4358d97c7f170237b8fc0a4bc3d0
|
[
"MIT"
] | null | null | null |
pref/webapp/views/property.py
|
ahampt/Pref
|
6a6b44c751da4358d97c7f170237b8fc0a4bc3d0
|
[
"MIT"
] | 7
|
2015-08-02T20:58:23.000Z
|
2016-05-02T03:25:21.000Z
|
pref/webapp/views/property.py
|
ahampt/Pref
|
6a6b44c751da4358d97c7f170237b8fc0a4bc3d0
|
[
"MIT"
] | null | null | null |
import logging, sys
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.mail import send_mail
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import HttpResponse, Http404
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from webapp.tools.misc_tools import create_movie_property, person_is_relevant, genre_is_relevant, generate_header_dict, set_msg, check_and_get_session_info, get_type_dict
from webapp.models import Profiles, People, Genres, Movies, Properties, Associations
property_logger = logging.getLogger('log.property')
associate_logger = logging.getLogger('log.associate')
# Display people list
# Person tools including view, delete, edit, suggestion, and movie association tools (add, remove)
# Display genre list
# Genre tools including view, delete, edit, suggestion, and movie association tools (add, remove)
| 67.798544
| 315
| 0.666667
|
import logging, sys
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.mail import send_mail
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import HttpResponse, Http404
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from webapp.tools.misc_tools import create_movie_property, person_is_relevant, genre_is_relevant, generate_header_dict, set_msg, check_and_get_session_info, get_type_dict
from webapp.models import Profiles, People, Genres, Movies, Properties, Associations
property_logger = logging.getLogger('log.property')
associate_logger = logging.getLogger('log.associate')
# Display people list
def people(request):
try:
logged_in_profile_info = { }
permission_response = check_and_get_session_info(request, logged_in_profile_info)
if permission_response != True:
return permission_response
'''*****************************************************************************
Display people list page
PATH: webapp.views.propery.people; METHOD: none; PARAMS: none; MISC: none;
*****************************************************************************'''
people = None
people_list = People.objects.all().order_by('Name')
length = int(request.GET.get('length')) if request.GET.get('length') and request.GET.get('length').isdigit() else 25
length = length if length <= 100 else 100
paginator = Paginator(people_list, length)
page = request.GET.get('page')
try:
people = paginator.page(page)
except PageNotAnInteger:
people = paginator.page(1)
except EmptyPage:
people = paginator.page(paginator.num_pages)
return render_to_response('property/view_people_list.html', {'header' : generate_header_dict(request, 'People List'), 'people' : people}, RequestContext(request))
except Exception:
property_logger.error('Unexpected error: ' + str(sys.exc_info()[0]))
return render_to_response('500.html', {'header' : generate_header_dict(request, 'Error')}, RequestContext(request))
# Person tools including view, delete, edit, suggestion, and movie association tools (add, remove)
def person(request, urlname):
try:
logged_in_profile_info = { }
permission_response = check_and_get_session_info(request, logged_in_profile_info)
if permission_response != True:
return permission_response
# Get all movie associations with person and get all profile associations with said movies
type_dict = get_type_dict()
person = People.objects.get(UrlName=urlname)
directed_properties = Properties.objects.select_related().filter(ConsumeableTypeId=type_dict['CONSUMEABLE_MOVIE'], PropertyTypeId=type_dict['PROPERTY_DIRECTOR'], PropertyId=person.id).order_by('-ConsumeableId__Year', 'ConsumeableId__Title')
written_properties = Properties.objects.select_related().filter(ConsumeableTypeId=type_dict['CONSUMEABLE_MOVIE'], PropertyTypeId=type_dict['PROPERTY_WRITER'], PropertyId=person.id).order_by('-ConsumeableId__Year', 'ConsumeableId__Title')
acted_properties = Properties.objects.select_related().filter(ConsumeableTypeId=type_dict['CONSUMEABLE_MOVIE'], PropertyTypeId=type_dict['PROPERTY_ACTOR'], PropertyId=person.id).order_by('-ConsumeableId__Year', 'ConsumeableId__Title')
directed_movies, written_movies, acted_movies = [], [], []
directed_movies_tuples, written_movies_tuples, acted_movies_tuples = [], [], []
for prop in directed_properties:
movie = prop.ConsumeableId
directed_movies.append(movie)
try:
association = Associations.objects.get(ProfileId = logged_in_profile_info['id'], ConsumeableId = movie, ConsumeableTypeId = type_dict['CONSUMEABLE_MOVIE'])
directed_movies_tuples.append((movie, True))
except Exception:
directed_movies_tuples.append((movie, False))
for prop in written_properties:
movie = prop.ConsumeableId
written_movies.append(movie)
try:
association = Associations.objects.get(ProfileId = logged_in_profile_info['id'], ConsumeableId = movie, ConsumeableTypeId = type_dict['CONSUMEABLE_MOVIE'])
written_movies_tuples.append((movie, True))
except Exception:
written_movies_tuples.append((movie, False))
for prop in acted_properties:
movie = prop.ConsumeableId
acted_movies.append(movie)
try:
association = Associations.objects.get(ProfileId = logged_in_profile_info['id'], ConsumeableId = movie, ConsumeableTypeId = type_dict['CONSUMEABLE_MOVIE'])
acted_movies_tuples.append((movie, True))
except Exception:
acted_movies_tuples.append((movie, False))
if request.GET.get('suggestion'):
if request.method == 'POST':
'''*****************************************************************************
Send suggestion/comment/correction email and redirect to person page
PATH: webapp.views.property.person urlname; METHOD: post; PARAMS: get - suggestion; MISC: none;
*****************************************************************************'''
profile = Profiles.objects.get(id=logged_in_profile_info['id'])
email_from = settings.DEFAULT_FROM_EMAIL
email_subject = 'Profile: ' + str(profile.Username) + ' Id: ' + str(profile.id) + ' PersonId: ' + str(person.id)
email_message = request.POST.get('message') if request.POST.get('message') else None
set_msg(request, 'Thank you for your feedback!', 'We have recieved your suggestion/comment/correction and will react to it appropriately.', 'success')
if email_message:
# send email
send_mail(email_subject, email_message, email_from, [settings.DEFAULT_TO_EMAIL], fail_silently=False)
else:
pass
return redirect('webapp.views.property.person', urlname=person.UrlName)
else:
'''*****************************************************************************
Display suggestion/comment/correction page
PATH: webapp.views.property.person urlname; METHOD: not post; PARAMS: get - suggestion; MISC: none;
*****************************************************************************'''
return render_to_response('site/suggestion_form.html', {'header' : generate_header_dict(request, 'Suggestion/Comment/Correction'), 'person' : person}, RequestContext(request))
elif logged_in_profile_info['admin'] and request.GET.get('edit'):
if request.method == 'POST':
'''*****************************************************************************
Save changes made to person and redirect to person page
PATH: webapp.views.property.person urlname; METHOD: post; PARAMS: get - edit; MISC: logged_in_profile.IsAdmin;
*****************************************************************************'''
person.Name = request.POST.get('name')
person.RottenTomatoesId = None if (not request.POST.get('rottentomatoes') or (request.POST.get('rottentomatoes') and request.POST.get('rottentomatoes') == 'None')) else request.POST.get('rottentomatoes')
try:
person.full_clean()
person.save()
property_logger.info(person.UrlName + ' Update Success by ' + logged_in_profile_info['username'])
set_msg(request, 'Person Updated!', person.Name + ' has successfully been updated.', 'success')
return redirect('webapp.views.property.person', urlname=person.UrlName)
except ValidationError as e:
property_logger.info(person.UrlName + ' Update Failure by ' + logged_in_profile_info['username'])
error_msg = e.message_dict
for key in error_msg:
error_msg[key] = str(error_msg[key][0])
return render_to_response('property/edit_person.html', {'header' : generate_header_dict(request, 'Update'), 'person' : person, 'directed_movies' : directed_movies, 'written_movies' : written_movies, 'acted_movies' : acted_movies, 'error_msg' : error_msg}, RequestContext(request))
else:
'''*****************************************************************************
Display edit page
PATH: webapp.views.property.person urlname; METHOD: not post; PARAMS: get - edit; MISC: logged_in_profile.IsAdmin;
*****************************************************************************'''
return render_to_response('property/edit_person.html', {'header' : generate_header_dict(request, 'Update'), 'person' : person, 'directed_movies' : directed_movies, 'written_movies' : written_movies, 'acted_movies' : acted_movies}, RequestContext(request))
elif logged_in_profile_info['admin'] and request.GET.get('delete'):
'''*****************************************************************************
Delete person and redirect to home
PATH: webapp.views.property.person urlname; METHOD: none; PARAMS: get - delete; MISC: logged_in_profile.IsAdmin;
*****************************************************************************'''
# Delete all movie associations with person
for prop in directed_properties:
prop.delete()
associate_logger.info(prop.ConsumeableId.UrlTitle + ' Disassociated ' + person.UrlName + ' Success by ' + logged_in_profile_info['username'])
for prop in written_properties:
prop.delete()
associate_logger.info(prop.ConsumeableId.UrlTitle + ' Disassociated ' + person.UrlName + ' Success by ' + logged_in_profile_info['username'])
for prop in acted_properties:
prop.delete()
associate_logger.info(prop.ConsumeableId.UrlTitle + ' Disassociated ' + person.UrlName + ' Success by ' + logged_in_profile_info['username'])
# Delete person
person.delete()
property_logger.info(person.UrlName + ' Delete Success by ' + logged_in_profile_info['username'])
set_msg(request, 'Person Deleted!', person.Name + ' has successfully been deleted.', 'danger')
return redirect('webapp.views.site.home')
elif logged_in_profile_info['admin'] and request.GET.get('add') and request.method == 'POST':
'''*****************************************************************************
Create movie association with person and redirect to edit page
PATH: webapp.views.property.person urlname; METHOD: post; PARAMS: get - add; MISC: logged_in_profile.IsAdmin;
*****************************************************************************'''
try:
value = request.POST.get('add')
title = value[:len(value) - 7] if value and len(value) > 7 else None
year = int(value[len(value) - 5:len(value)-1]) if value and len(value) > 7 and value[len(value) - 5:len(value)-1].isdigit() else None
type = str(request.GET.get('t')) if request.GET.get('t') else None
movie = Movies.objects.get(Title=title, Year=year)
create_movie_property(movie, person.id, person.UrlName, type, logged_in_profile_info['username'])
directed_properties = Properties.objects.select_related().filter(ConsumeableTypeId=type_dict['CONSUMEABLE_MOVIE'], PropertyTypeId=type_dict['PROPERTY_DIRECTOR'], PropertyId=person.id)
written_properties = Properties.objects.select_related().filter(ConsumeableTypeId=type_dict['CONSUMEABLE_MOVIE'], PropertyTypeId=type_dict['PROPERTY_WRITER'], PropertyId=person.id)
acted_properties = Properties.objects.select_related().filter(ConsumeableTypeId=type_dict['CONSUMEABLE_MOVIE'], PropertyTypeId=type_dict['PROPERTY_ACTOR'], PropertyId=person.id)
directed_movies, written_movies, acted_movies = [], [], []
for prop in directed_properties:
directed_movies.append(prop.ConsumeableId)
for prop in written_properties:
written_movies.append(prop.ConsumeableId)
for prop in acted_properties:
acted_movies.append(prop.ConsumeableId)
set_msg(request, 'Movie Added!', movie.Title + ' has successfully been added to ' + person.Name + '\'s career.', 'success')
return render_to_response('property/edit_person.html', {'header' : generate_header_dict(request, 'Update'), 'person' : person, 'directed_movies' : directed_movies, 'written_movies' : written_movies, 'acted_movies' : acted_movies}, RequestContext(request))
except ObjectDoesNotExist:
property_logger.info(value + ' Added to ' + person.UrlName + ' Failure by ' + logged_in_profile_info['username'])
return render_to_response('property/edit_person.html', {'header' : generate_header_dict(request, 'Update'), 'person' : person, 'directed_movies' : directed_movies, 'written_movies' : written_movies, 'acted_movies' : acted_movies, 'error_msg' : {'MovieTitle' : 'Movie does not exist.'}}, RequestContext(request))
except Exception:
property_logger.info(value + ' Added to ' + person.UrlName + ' Failure by ' + logged_in_profile_info['username'])
return render_to_response('property/edit_person.html', {'header' : generate_header_dict(request, 'Update'), 'person' : person, 'directed_movies' : directed_movies, 'written_movies' : written_movies, 'acted_movies' : acted_movies, 'error_msg' : {'Movie' : 'Movie not found.'}}, RequestContext(request))
elif logged_in_profile_info['admin'] and request.GET.get('remove'):
'''*****************************************************************************
Remove property association with movie and redirect to home or edit page appropriately
PATH: webapp.views.property.person urlname; METHOD: none; PARAMS: get - remove; MISC: logged_in_profile.IsAdmin;
*****************************************************************************'''
re = True if request.GET.get('movie') else False
id = request.GET.get('i')
type = str(request.GET.get('t')) if request.GET.get('t') else None
movie = Movies.objects.get(UrlTitle=id)
prop = Properties.objects.get(ConsumeableTypeId=type_dict['CONSUMEABLE_MOVIE'], ConsumeableId=movie, PropertyId=person.id, PropertyTypeId=type_dict['PROPERTY_' + type])
prop.delete()
associate_logger.info(movie.UrlTitle + ' Disassociated ' + person.UrlName + ' Success by ' + logged_in_profile_info['username'])
if person_is_relevant(person):
if re:
set_msg(request, 'Person Removed!', person.Name + ' has successfully been removed from ' + movie.Title + '.', 'warning')
response = redirect('webapp.views.movie.view', urltitle=movie.UrlTitle)
response['Location'] += '?edit=1'
return response
else:
set_msg(request, 'Movie Removed!', movie.Title + ' has successfully been removed from ' + person.Name + ' \'s career.', 'warning')
response = redirect('webapp.views.property.person', urlname=person.UrlName)
response['Location'] += '?edit=1'
return response
else:
if re:
person.delete()
property_logger.info(person.Name + ' Delete Success by' + logged_in_profile_info['username'])
set_msg(request, 'Person Deleted!', person.Name + ' has successfully been deleted due to the removal of them from ' + movie.Title + '.', 'danger')
response = redirect('webapp.views.movie.view', urltitle=movie.UrlTitle)
response['Location'] += '?edit=1'
return response
else:
person.delete()
property_logger.info(person.Name + ' Delete Success by' + logged_in_profile_info['username'])
set_msg(request, 'Person Deleted!', person.Name + ' has successfully been deleted due to the removal of ' + movie.Title + ' from their career.', 'danger')
return redirect('webapp.views.site.home')
else:
'''*****************************************************************************
Display person page
PATH: webapp.views.property.person urltitle; METHOD: none; PARAMS: none; MISC: none;
*****************************************************************************'''
return render_to_response('property/view_person.html', {'header' : generate_header_dict(request, person.Name), 'person' : person, 'directed_movies' : directed_movies_tuples, 'written_movies' : written_movies_tuples, 'acted_movies' : acted_movies_tuples}, RequestContext(request))
except ObjectDoesNotExist:
raise Http404
except Exception:
property_logger.error('Unexpected error: ' + str(sys.exc_info()[0]))
return render_to_response('500.html', {'header' : generate_header_dict(request, 'Error')}, RequestContext(request))
# Display genre list
def genres(request):
try:
logged_in_profile_info = { }
permission_response = check_and_get_session_info(request, logged_in_profile_info)
if permission_response != True:
return permission_response
'''*****************************************************************************
Display genre list page
PATH: webapp.views.property.genres; METHOD: none; PARAMS: none; MISC: none;
*****************************************************************************'''
genres = None
genre_list = Genres.objects.all().order_by('Description')
length = int(request.GET.get('length')) if request.GET.get('length') and request.GET.get('length').isdigit() else 25
length = length if length <= 100 else 100
paginator = Paginator(genre_list, length)
page = request.GET.get('page')
try:
genres = paginator.page(page)
except PageNotAnInteger:
genres = paginator.page(1)
except EmptyPage:
genres = paginator.page(paginator.num_pages)
return render_to_response('property/view_genre_list.html', {'header' : generate_header_dict(request, 'Genre List'), 'genres' : genres}, RequestContext(request))
except Exception:
property_logger.error('Unexpected error: ' + str(sys.exc_info()[0]))
return render_to_response('500.html', {'header' : generate_header_dict(request, 'Error')}, RequestContext(request))
# Genre tools including view, delete, edit, suggestion, and movie association tools (add, remove)
def genre(request, description):
try:
logged_in_profile_info = { }
permission_response = check_and_get_session_info(request, logged_in_profile_info)
if permission_response != True:
return permission_response
type_dict = get_type_dict()
genre = Genres.objects.get(Description=description)
properties = Properties.objects.select_related().filter(ConsumeableTypeId=type_dict['CONSUMEABLE_MOVIE'], PropertyTypeId=type_dict['PROPERTY_GENRE'], PropertyId=genre.id).order_by('-ConsumeableId__Year', 'ConsumeableId__Title')
length = int(request.GET.get('length')) if request.GET.get('length') and request.GET.get('length').isdigit() else 25
length = length if length <= 100 else 100
paginator = Paginator(properties, length)
page = request.GET.get('page')
try:
genre_movies = paginator.page(page)
except PageNotAnInteger:
genre_movies = paginator.page(1)
except EmptyPage:
genre_movies = paginator.page(paginator.num_pages)
movies, movies_tuples = [], []
for prop in genre_movies:
movie = prop.ConsumeableId
movies.append(movie)
try:
association = Associations.objects.get(ProfileId = logged_in_profile_info['id'], ConsumeableId = movie, ConsumeableTypeId = type_dict['CONSUMEABLE_MOVIE'])
movies_tuples.append((movie, True))
except Exception:
movies_tuples.append((movie, False))
if request.GET.get('suggestion'):
if request.method == 'POST':
'''*****************************************************************************
Send suggestion/comment/correction email and redirect to genre page
PATH: webapp.views.property.genre description; METHOD: post; PARAMS: get - suggestion; MISC: none;
*****************************************************************************'''
profile = Profiles.objects.get(id=logged_in_profile_info['id'])
email_from = settings.DEFAULT_FROM_EMAIL
email_subject = 'Profile: ' + str(profile.Username) + ' Id: ' + str(profile.id) + ' GenreId: ' + str(genre.id)
email_message = request.POST.get('message') if request.POST.get('message') else None
set_msg(request, 'Thank you for your feedback!', 'We have recieved your suggestion/comment/correction and will react to it appropriately.', 'success')
if email_message:
# send email
send_mail(email_subject, email_message, email_from, [settings.DEFAULT_TO_EMAIL], fail_silently=False)
else:
pass
return redirect('webapp.views.property.genre', description=genre.Description)
else:
'''*****************************************************************************
Display suggestion/comment/correction page
PATH: webapp.views.property.genre description; METHOD: not post; PARAMS: get - suggestion; MISC: none;
*****************************************************************************'''
return render_to_response('site/suggestion_form.html', {'header' : generate_header_dict(request, 'Suggestion/Comment/Correction'), 'genre' : genre}, RequestContext(request))
elif logged_in_profile_info['admin'] and request.GET.get('edit'):
if request.method == 'POST':
'''*****************************************************************************
Save changes made to genre and redirect to genre page
PATH: webapp.views.property.genre description; METHOD: post; PARAMS: get - edit; MISC: logged_in_profile.IsAdmin;
*****************************************************************************'''
genre.Description = request.POST.get('description')
try:
genre.full_clean()
genre.save()
property_logger.info(genre.Description + ' Update Success by ' + logged_in_profile_info['username'])
set_msg(request, 'Genre Updated!', genre.Description + ' has successfully been updated.', 'success')
return redirect('webapp.views.property.genre', description=genre.Description)
except ValidationError as e:
property_logger.info(genre.Description + ' Update Failure by ' + logged_in_profile_info['username'])
error_msg = e.message_dict
for key in error_msg:
error_msg[key] = str(error_msg[key][0])
return render_to_response('property/edit_genre.html', {'header' : generate_header_dict(request, 'Update'), 'genre' : genre, 'movies' : movies, 'error_msg' : error_msg}, RequestContext(request))
else:
'''*****************************************************************************
Display edit page
PATH: webapp.views.property.genre description; METHOD: not post; PARAMS: get - edit; MISC: logged_in_profile.IsAdmin;
*****************************************************************************'''
return render_to_response('property/edit_genre.html', {'header' : generate_header_dict(request, 'Update'), 'genre' : genre, 'movies' : movies}, RequestContext(request))
elif logged_in_profile_info['admin'] and request.GET.get('delete'):
'''*****************************************************************************
Delete genre and redirect to home
PATH: webapp.views.property.genre description; METHOD: none; PARAMS: get - delete; MISC: logged_in_profile.IsAdmin;
*****************************************************************************'''
for prop in properties:
prop.delete()
associate_logger.info(prop.ConsumeableId.UrlTitle + ' Disassociated ' + genre.Description + ' Success by ' + logged_in_profile_info['username'])
genre.delete()
property_logger.info(genre.Description + ' Delete Success by' + logged_in_profile_info['username'])
set_msg(request, 'Genre Deleted!', genre.Description + ' has successfully been deleted.', 'danger')
return redirect('webapp.views.site.home')
elif logged_in_profile_info['admin'] and request.GET.get('add') and request.method == 'POST':
try:
'''*****************************************************************************
Create movie association with genre and redirect to edit page
PATH: webapp.views.property.genre description; METHOD: post; PARAMS: get - add; MISC: logged_in_profile.IsAdmin;
*****************************************************************************'''
value = request.POST.get('add')
title = value[:len(value) - 7] if value and len(value) > 7 else None
year = int(value[len(value) - 5:len(value)-1]) if value and len(value) > 7 and value[len(value) - 5:len(value)-1].isdigit() else None
movie = Movies.objects.get(Title=title, Year=year)
create_movie_property(movie, genre.id, genre.Description, 3, logged_in_profile_info['username'])
properties = Properties.objects.select_related().filter(ConsumeableTypeId=type_dict['CONSUMEABLE_MOVIE'], PropertyTypeId=type_dict['PROPERTY_GENRE'], PropertyId=genre.id)
movies = []
for prop in properties:
movies.append(prop.ConsumeableId)
set_msg(request, 'Movie Added!', movie.Title + ' has successfully been added to' + genre.Description + ' movies.', 'warning')
return render_to_response('property/edit_genre.html', {'header' : generate_header_dict(request, 'Update'), 'genre' : genre, 'movies' : movies}, RequestContext(request))
except ObjectDoesNotExist:
property_logger.info(value + ' Added to ' + genre.Description + ' Failure by ' + logged_in_profile_info['username'])
return render_to_response('property/edit_genre.html', {'header' : generate_header_dict(request, 'Update'), 'genre' : genre, 'movies' : movies, 'error_msg' : {'MovieTitle' : 'Movie does not exist.'}}, RequestContext(request))
except Exception:
property_logger.info(value + ' Added to ' + genre.Description + ' Failure by ' + logged_in_profile_info['username'])
return render_to_response('property/edit_genre.html', {'header' : generate_header_dict(request, 'Update'), 'genre' : genre, 'movies' : movies, 'error_msg' : {'MovieTitle' : 'Year must be between 1901 and 2155 (inclusive).'}}, RequestContext(request))
elif logged_in_profile_info['admin'] and request.GET.get('remove'):
'''*****************************************************************************
Remove property association with movie and redirect to home or edit page appropriately
PATH: webapp.views.property.genre descsription; METHOD: none; PARAMS: get - remove; MISC: logged_in_profile.IsAdmin;
*****************************************************************************'''
re = True if request.GET.get('movie') else False
id = request.GET.get('i')
movie = Movies.objects.get(UrlTitle=id)
prop = Properties.objects.get(ConsumeableId=movie, ConsumeableTypeId=type_dict['CONSUMEABLE_MOVIE'], PropertyId=genre.id, PropertyTypeId=type_dict['PROPERTY_GENRE'])
prop.delete()
associate_logger.info(movie.UrlTitle + ' Disassociated ' + genre.Description + ' Success by ' + logged_in_profile_info['username'])
if genre_is_relevant(genre):
if re:
set_msg(request, 'Genre Removed!', genre.Description + ' has successfully been removed from ' + movie.Title + '.', 'warning')
response = redirect('webapp.views.movie.view', urltitle=movie.UrlTitle)
response['Location'] += '?edit=1'
return response
else:
set_msg(request, 'Movie Removed!', movie.Title + ' has successfully been removed from' + genre.Description + 'movies.', 'warning')
response = redirect('webapp.views.property.genre', description=genre.Description)
response['Location'] += '?edit=1'
return resposne
else:
if re:
genre.delete()
property_logger.info(genre.Description + ' Delete Success by' + logged_in_profile_info['username'])
set_msg(request, 'Genre Deleted!', genre.Description + ' has successfully been deleted due to the removal of it from ' + movie.Title + '.', 'danger')
response = redirect('webapp.views.movie.view', urltitle=movie.UrlTitle)
response['Location'] += '?edit=1'
return response
else:
genre.delete()
property_logger.info(genre.Description + ' Delete Success by' + logged_in_profile_info['username'])
set_msg(request, 'Genre Deleted!', genre.Description + ' has successfully been deleted due to the removal of ' + movie.Title + ' from this genre.', 'danger')
return redirect('webapp.views.site.home')
else:
return render_to_response('property/view_genre.html', {'header' : generate_header_dict(request, genre.Description), 'genre' : genre, 'movies' : movies_tuples, 'page' : genre_movies}, RequestContext(request))
except ObjectDoesNotExist:
raise Http404
except Exception:
property_logger.error('Unexpected error: ' + str(sys.exc_info()[0]))
return render_to_response('500.html', {'header' : generate_header_dict(request, 'Error')}, RequestContext(request))
| 26,854
| 0
| 88
|
a77e4ba1d9535c20aa67f0efe32a4d0cd4d16c2b
| 6,253
|
py
|
Python
|
src/motor/3rdparty/compute/CUDA/mak/build.py
|
motor-dev/Motor
|
98cb099fe1c2d31e455ed868cc2a25eae51e79f0
|
[
"BSD-3-Clause"
] | null | null | null |
src/motor/3rdparty/compute/CUDA/mak/build.py
|
motor-dev/Motor
|
98cb099fe1c2d31e455ed868cc2a25eae51e79f0
|
[
"BSD-3-Clause"
] | null | null | null |
src/motor/3rdparty/compute/CUDA/mak/build.py
|
motor-dev/Motor
|
98cb099fe1c2d31e455ed868cc2a25eae51e79f0
|
[
"BSD-3-Clause"
] | null | null | null |
from waflib import Task
from waflib.TaskGen import feature, before_method, taskgen_method, extension
from waflib.Tools import c_preproc
try:
import cPickle as pickle
except ImportError:
import pickle
template_kernel = """
_MOTOR_PLUGIN_EXPORT void _%(kernel)s(const u32 index, const u32 total,
const minitl::array< minitl::weak<const Motor::KernelScheduler::IMemoryBuffer> >& /*argv*/)
{
motor_forceuse(index);
motor_forceuse(total);
}
_MOTOR_REGISTER_METHOD_NAMED(MOTOR_KERNEL_ID, _%(kernel)s, _%(kernel)s);
"""
template_cpp = """
%(pch)s
#include <motor/config/config.hh>
#include <motor/kernel/simd.hh>
#include <motor/kernel/input/input.hh>
#include <motor/plugin/dynobjectlist.hh>
#include <motor/minitl/array.hh>
#include <motor/plugin.compute.cuda/memorybuffer.hh>
#include <motor/scheduler/kernel/parameters/parameters.hh>
using namespace Kernel;
_MOTOR_REGISTER_PLUGIN(MOTOR_KERNEL_ID, MOTOR_KERNEL_NAME);
%(kernels)s
"""
class nvcc(Task.Task):
"nvcc"
run_str = '${NVCC_CXX} ${NVCC_CXXFLAGS} --fatbin ${NVCC_FRAMEWORKPATH_ST:FRAMEWORKPATH} ${NVCC_CPPPATH_ST:INCPATHS} -DMOTOR_COMPUTE=1 ${NVCC_DEFINES_ST:DEFINES} -D_NVCC=1 ${NVCC_CXX_SRC_F}${SRC[0].abspath()} ${NVCC_CXX_TGT_F} ${TGT}'
ext_out = ['.fatbin']
color = 'GREEN'
class cudac(Task.Task):
"Generates a CUDA binder to call the C++ kernel"
color = 'CYAN'
@feature('motor:cuda:kernel_create')
@before_method('process_source')
@feature('motor:preprocess')
| 39.828025
| 237
| 0.611866
|
from waflib import Task
from waflib.TaskGen import feature, before_method, taskgen_method, extension
from waflib.Tools import c_preproc
try:
import cPickle as pickle
except ImportError:
import pickle
template_kernel = """
_MOTOR_PLUGIN_EXPORT void _%(kernel)s(const u32 index, const u32 total,
const minitl::array< minitl::weak<const Motor::KernelScheduler::IMemoryBuffer> >& /*argv*/)
{
motor_forceuse(index);
motor_forceuse(total);
}
_MOTOR_REGISTER_METHOD_NAMED(MOTOR_KERNEL_ID, _%(kernel)s, _%(kernel)s);
"""
template_cpp = """
%(pch)s
#include <motor/config/config.hh>
#include <motor/kernel/simd.hh>
#include <motor/kernel/input/input.hh>
#include <motor/plugin/dynobjectlist.hh>
#include <motor/minitl/array.hh>
#include <motor/plugin.compute.cuda/memorybuffer.hh>
#include <motor/scheduler/kernel/parameters/parameters.hh>
using namespace Kernel;
_MOTOR_REGISTER_PLUGIN(MOTOR_KERNEL_ID, MOTOR_KERNEL_NAME);
%(kernels)s
"""
class nvcc(Task.Task):
"nvcc"
run_str = '${NVCC_CXX} ${NVCC_CXXFLAGS} --fatbin ${NVCC_FRAMEWORKPATH_ST:FRAMEWORKPATH} ${NVCC_CPPPATH_ST:INCPATHS} -DMOTOR_COMPUTE=1 ${NVCC_DEFINES_ST:DEFINES} -D_NVCC=1 ${NVCC_CXX_SRC_F}${SRC[0].abspath()} ${NVCC_CXX_TGT_F} ${TGT}'
ext_out = ['.fatbin']
def scan(self):
try:
incn = self.generator.includes_nodes
except AttributeError:
raise Errors.WafError('%r is missing a feature such as "c", "cxx" or "includes": ' % self.generator)
nodepaths = [x for x in incn if x.is_child_of(x.ctx.srcnode) or x.is_child_of(x.ctx.bldnode)]
nodepaths.append(self.generator.bld.motornode.make_node('src/plugin/compute/cuda/api.cuda'))
tmp = c_preproc.c_parser(nodepaths)
tmp.start(self.inputs[0], self.env)
return (tmp.nodes, tmp.names)
color = 'GREEN'
class cudac(Task.Task):
"Generates a CUDA binder to call the C++ kernel"
color = 'CYAN'
def sig_vars(self):
Task.Task.sig_vars(self)
self.m.update(template_kernel.encode('utf-8'))
self.m.update(template_cpp.encode('utf-8'))
self.m.update((self.generator.pchstop if self.generator.pchstop else '').encode('utf-8'))
def scan(self):
return ([], [])
def run(self):
with open(self.inputs[0].abspath(), 'rb') as input_file:
kernel_name, includes, source, kernel_methods = pickle.load(input_file)
kernels = []
for method, namespace, args in kernel_methods:
args = []
for arg in method.parameters[2:]:
args.append((arg.name, arg.type))
kernel_params = {
'args': ',\n '.join('%s(0, 0, 0)' % arg[1] for i, arg in enumerate(args)),
'kernel': method.name,
'kernelnamespace': '::'.join(namespace)
}
kernels.append(template_kernel % kernel_params)
params = {
'pch': '#include <%s>\n' % self.generator.pchstop if self.generator.pchstop else '',
'kernels': '\n\n'.join(kernels)
}
with open(self.outputs[0].abspath(), 'w') as out:
out.write(template_cpp % params)
@feature('motor:cuda:kernel_create')
@before_method('process_source')
def build_cuda_kernels(task_gen):
for f in getattr(task_gen, 'features', []):
task_gen.env.append_value('NVCC_CXXFLAGS', task_gen.env['NVCC_CXXFLAGS_%s' % f])
ast = task_gen.kernel_ast
cuda_source = task_gen.kernel_source
out_cc = ast.change_ext('.cudacall.cc')
task_gen.create_task('cudac', [ast], [out_cc])
task_gen.source.append(out_cc)
cuda_bin = task_gen.make_bld_node('obj', cuda_source.parent, cuda_source.name[:-2] + 'fatbin')
cuda_cc = task_gen.make_bld_node('src', cuda_source.parent, cuda_source.name[:-2] + 'cc')
task_gen.create_task('nvcc', [cuda_source], [cuda_bin])
task_gen.create_task('bin2c', [cuda_bin], [cuda_cc], var='%s_cudaKernel' % '_'.join(task_gen.kernel))
task_gen.source.append(cuda_cc)
@feature('motor:preprocess')
def create_cuda_kernels(task_gen):
for kernel, kernel_source, kernel_path, kernel_ast in task_gen.kernels:
kernel_target = '.'.join([task_gen.parent, '.'.join(kernel), 'cuda'])
kernel_gens = []
for env in task_gen.bld.multiarch_envs:
for kernel_type, toolchain in env.KERNEL_TOOLCHAINS:
if kernel_type != 'cuda':
continue
kernel_env = task_gen.bld.all_envs[toolchain]
tgen = task_gen.bld.get_tgen_by_name(env.ENV_PREFIX % task_gen.parent)
kernel_task_gen = task_gen.bld(
env=kernel_env.derive(),
bld_env=env,
target=env.ENV_PREFIX % kernel_target,
target_name=env.ENV_PREFIX % task_gen.parent,
safe_target_name=kernel_target.replace('.', '_').replace('-', '_'),
kernel=kernel,
features=[
'cxx', task_gen.bld.env.STATIC and 'cxxobjects' or 'cxxshlib', 'motor:cxx', 'motor:kernel',
'motor:cuda:kernel_create'
],
pchstop=tgen.preprocess.pchstop,
defines=tgen.defines + [
'MOTOR_KERNEL_ID=%s_%s' % (task_gen.parent.replace('.', '_'), kernel_target.replace('.', '_')),
'MOTOR_KERNEL_NAME=%s' % (kernel_target),
'MOTOR_KERNEL_TARGET=%s' % kernel_type,
],
includes=tgen.includes,
kernel_source=kernel_source,
kernel_ast=kernel_ast,
use=tgen.use + [env.ENV_PREFIX % 'plugin.compute.cuda'],
uselib=tgen.uselib,
source_nodes=tgen.source_nodes,
)
kernel_task_gen.env.PLUGIN = kernel_task_gen.env.plugin_name
kernel_gens.append(kernel_task_gen)
task_gen.bld.multiarch(kernel_target, kernel_gens)
def build(bld):
cuda = bld.thirdparty('motor.3rdparty.compute.CUDA')
if cuda:
cuda.export_lib += ['cudart_static']
| 4,551
| 0
| 175
|
773ebb715bc24daa595c7968a1ae63723535d201
| 1,023
|
py
|
Python
|
sols/1022.py
|
Paul11100/LeetCode
|
9896c579dff1812c0c76964db8d60603ee715e35
|
[
"MIT"
] | null | null | null |
sols/1022.py
|
Paul11100/LeetCode
|
9896c579dff1812c0c76964db8d60603ee715e35
|
[
"MIT"
] | null | null | null |
sols/1022.py
|
Paul11100/LeetCode
|
9896c579dff1812c0c76964db8d60603ee715e35
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# DFS and convert binary (Accepted), O(n) time and space
# Recursion (Top Voted), O(n) time and space
| 31.96875
| 87
| 0.540567
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
# DFS and convert binary (Accepted), O(n) time and space
def sumRootToLeaf(self, root: Optional[TreeNode]) -> int:
self.res = 0
def dfs(node, cur):
cur *= 2
cur += node.val
if node.left:
dfs(node.left, cur)
if node.right:
dfs(node.right, cur)
if not node.left and not node.right:
self.res += cur
dfs(root, 0)
return self.res
# Recursion (Top Voted), O(n) time and space
def sumRootToLeaf(self, root: Optional[TreeNode], val=0) -> int:
if not root:
return 0
val = val * 2 + root.val
if root.left == root.right:
return val
return self.sumRootToLeaf(root.left, val) + self.sumRootToLeaf(root.right, val)
| 652
| -6
| 74
|
82d289627c3ee9209ff2dfa6c1d190469fb034cb
| 286
|
py
|
Python
|
tests/__init__.py
|
sturmianseq/zenmake
|
44f1131c1ab677d8c3c930150c63a7dde4ef7de0
|
[
"BSD-3-Clause"
] | 2
|
2019-10-14T05:05:34.000Z
|
2022-03-28T04:55:00.000Z
|
tests/__init__.py
|
sturmianseq/zenmake
|
44f1131c1ab677d8c3c930150c63a7dde4ef7de0
|
[
"BSD-3-Clause"
] | 42
|
2020-08-25T07:59:32.000Z
|
2021-11-15T03:12:29.000Z
|
tests/__init__.py
|
sturmianseq/zenmake
|
44f1131c1ab677d8c3c930150c63a7dde4ef7de0
|
[
"BSD-3-Clause"
] | 1
|
2021-08-13T13:59:51.000Z
|
2021-08-13T13:59:51.000Z
|
import sys
from os import path
ZENMAKE_DIR = path.dirname(path.abspath(__file__))
ZENMAKE_DIR = path.normpath(path.join(ZENMAKE_DIR, path.pardir, 'src', 'zenmake'))
if ZENMAKE_DIR not in sys.path:
sys.path.insert(1, ZENMAKE_DIR)
# for test 'testLoadPyModule()'
something = 'qaz'
| 23.833333
| 82
| 0.744755
|
import sys
from os import path
ZENMAKE_DIR = path.dirname(path.abspath(__file__))
ZENMAKE_DIR = path.normpath(path.join(ZENMAKE_DIR, path.pardir, 'src', 'zenmake'))
if ZENMAKE_DIR not in sys.path:
sys.path.insert(1, ZENMAKE_DIR)
# for test 'testLoadPyModule()'
something = 'qaz'
| 0
| 0
| 0
|
0a2c7252d45b9a2d213668ef67304ff04ab1a631
| 5,680
|
py
|
Python
|
Drivers/THINGSPEAK.py
|
bbaumg/Sensor
|
95eae629a7d36099cf0786e4d3a46c826ed93099
|
[
"MIT"
] | null | null | null |
Drivers/THINGSPEAK.py
|
bbaumg/Sensor
|
95eae629a7d36099cf0786e4d3a46c826ed93099
|
[
"MIT"
] | null | null | null |
Drivers/THINGSPEAK.py
|
bbaumg/Sensor
|
95eae629a7d36099cf0786e4d3a46c826ed93099
|
[
"MIT"
] | null | null | null |
# Author: Barrett Baumgartner
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import requests
import logging
THINGSPEAK_URL = 'https://api.thingspeak.com/'
| 33.609467
| 85
| 0.687324
|
# Author: Barrett Baumgartner
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import requests
import logging
THINGSPEAK_URL = 'https://api.thingspeak.com/'
class thingspeak(object):
def __init__(self, channel, apiKey, tsURL=THINGSPEAK_URL):
"""
Initalization "constructor" for the object.
Initially populates the "fields" list.
Defines the variables inside the object for holding most recent
values for the update call
"""
logging.info("Instantiating a thingspeak object")
self.tsRUL = tsURL # URL for the thingspeak server
logging.info("Thingspeak URL = " + str(self.tsRUL))
self.apiKey = apiKey # API Key for the Channel
logging.info("API Key = " + str(self.apiKey))
self.channel = channel # Channel Number
logging.info("Channel = " + str(self.channel))
self.fields = dict() # List of fields and their description
logging.debug("List of fields as 'fields[]' = " + str(self.fields))
self.field = dict() # Fields and their most recent saved value
logging.debug("List of field values as 'field[]' = " + str(self.field))
#~ self.last = dict()
#~ logging.debug("List of last written (non-null) values written" + str(self.last))
try:
results = requests.get(self.tsRUL)
if results.ok != True:
logging.error("The URL didn't return a 200")
exit(1)
except:
logging.error("Error reaching the thingspeak URL = " + str(self.tsRUL))
exit(1)
self.fields = self.get_fields()
logging.debug(self.fields)
self.clear_field_values()
logging.debug(self.field)
def get_fields(self):
"""Get the list of fields and their description and return them"""
logging.debug("Beginning")
options=dict(api_key = self.apiKey, results = 0)
url = '{ts}channels/{id}/feeds.json'.format(
ts=self.tsRUL,
id=self.channel
)
try:
results = requests.get(url, params=options)
if results.ok != True:
logging.error("The URL didn't return a 200")
return
except:
logging.error("Error calling the thingspeak URL")
return
resultsJson = results.json()
channelsJson = resultsJson['channel']
fields = dict()
for i in range(1,8):
if 'field'+str(i) in channelsJson:
fields['field'+str(i)] = channelsJson['field'+str(i)]
return fields
def get_last_channel(self):
# ---------------------
# WARNING: Not working
# ---------------------
logging.debug("Beginning")
options=dict(api_key = self.apiKey, results = 100)
url = '{ts}channels/{id}/feeds.json'.format(
ts=self.tsRUL,
id=self.channel
)
results = requests.get(url, params=options)
print(results.json())
return
def get_last_field(self, field):
# ---------------------
# WARNING: Not working
# ---------------------
logging.debug("Getting last value for field = " + str(field))
print(field[-1:])
options=dict(api_key = self.apiKey, results = 100)
url = '{ts}channels/{id}/fields/{fld}.json'.format(
ts=self.tsRUL,
id=self.channel,
fld=field[-1:]
)
results = requests.get(url, params=options)
resultsJson = results.json()
channel = resultsJson['channel']
feeds = resultsJson['feeds']
lastEntry = channel['last_entry_id']
print(lastEntry)
return
def post_update(self):
"""POST an update to the channel for all fields with values"""
logging.info("Beginning")
options=dict(
api_key = self.apiKey
)
counter = 0
for key, value in self.field.items():
if value != None:
counter += 1
options[key] = value
if counter == 0:
logging.error("There was nothing to update. Check the field values")
return
url = '{ts}update'.format(
ts=self.tsRUL,
)
logging.debug("Options = " + str(options))
try:
results = requests.post(url, params=options)
if results.ok != True:
logging.error("The update failed")
return False
except:
logging.error("There was an error trying to update the values")
return False
self.clear_field_values()
return True
def clear_field_values(self):
"""Clear out any saved values in the fields within the object"""
logging.info("Clearing values in the field[] dictionary of the object")
logging.debug("Before = " + str(self.field))
for key, value in self.fields.items():
self.field[str(key)] = None
logging.debug("After = " + str(self.field))
return
def field_name(self, name):
"""returns the name of field when description is passed in"""
logging.info("Getting the field name " + str(name))
try:
fieldName = self.fields.keys()[self.fields.values().index(name)]
logging.info("The field name for " + str(name) + " is " + str(fieldName))
return fieldName
except:
logging.error(str(name)+ " Field Name was not found")
return False
| 883
| 3,601
| 23
|
c8aa1d30b2fb5fede83f69d67c6dcdcea0d94f4b
| 83
|
py
|
Python
|
build/lib/psearcher/__init__.py
|
iridesc/psearcher
|
ee64a6c2cf9d326d562ccb06ec0c9e08b5862b2e
|
[
"MIT"
] | 1
|
2019-12-04T15:54:50.000Z
|
2019-12-04T15:54:50.000Z
|
psearcher/__init__.py
|
iridesc/psearcher
|
ee64a6c2cf9d326d562ccb06ec0c9e08b5862b2e
|
[
"MIT"
] | null | null | null |
psearcher/__init__.py
|
iridesc/psearcher
|
ee64a6c2cf9d326d562ccb06ec0c9e08b5862b2e
|
[
"MIT"
] | null | null | null |
from .BaseEngine import BaseEngine
from .Bing import Bing
from .Baidu import Baidu
| 20.75
| 34
| 0.819277
|
from .BaseEngine import BaseEngine
from .Bing import Bing
from .Baidu import Baidu
| 0
| 0
| 0
|
623fad3932fdae3b4cc05b052d24851f20dc14b2
| 157
|
py
|
Python
|
server/app/services/device_data/views/_reports_type/__init__.py
|
goodfree/ActorCloud
|
e8db470830ea6f6f208ad43c2e56a2e8976bc468
|
[
"Apache-2.0"
] | 173
|
2019-06-10T07:14:49.000Z
|
2022-03-31T08:42:36.000Z
|
server/app/services/device_data/views/_reports_type/__init__.py
|
zlyz12345/ActorCloud
|
9c34b371c23464981323ef9865d9913bde1fe09c
|
[
"Apache-2.0"
] | 27
|
2019-06-12T08:25:29.000Z
|
2022-02-26T11:37:15.000Z
|
server/app/services/device_data/views/_reports_type/__init__.py
|
zlyz12345/ActorCloud
|
9c34b371c23464981323ef9865d9913bde1fe09c
|
[
"Apache-2.0"
] | 67
|
2019-06-10T08:40:05.000Z
|
2022-03-09T03:43:56.000Z
|
from .aggr_events import devices_event_aggr_data
__all__ = ['REPORTS_TYPE_FUNC']
REPORTS_TYPE_FUNC = {
'devicesEventAggr': devices_event_aggr_data
}
| 15.7
| 48
| 0.789809
|
from .aggr_events import devices_event_aggr_data
__all__ = ['REPORTS_TYPE_FUNC']
REPORTS_TYPE_FUNC = {
'devicesEventAggr': devices_event_aggr_data
}
| 0
| 0
| 0
|
ba536605d96d30f13a42d4df687616345dedf4d2
| 6,657
|
py
|
Python
|
school/school/migrations/0001_initial.py
|
firemark/sample-school-django
|
3b9758ba62e4a37915e69e607d2a2948f1dc0204
|
[
"MIT"
] | null | null | null |
school/school/migrations/0001_initial.py
|
firemark/sample-school-django
|
3b9758ba62e4a37915e69e607d2a2948f1dc0204
|
[
"MIT"
] | null | null | null |
school/school/migrations/0001_initial.py
|
firemark/sample-school-django
|
3b9758ba62e4a37915e69e607d2a2948f1dc0204
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-18 23:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 138.6875
| 4,712
| 0.700916
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-18 23:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('coutry_code', models.IntegerField(db_index=True)),
('district_code', models.IntegerField(db_index=True)),
('comm_code', models.IntegerField(db_index=True)),
],
),
migrations.CreateModel(
name='School',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('school_code', models.IntegerField(choices=[(1, b'Przedszkole'), (3, b'Szko\xc5\x82a podstawowa'), (4, b'Gimnazjum'), (13, b'Zasadnicza szko\xc5\x82a zawodowa'), (14, b'Liceum og\xc3\xb3lnokszta\xc5\x82c\xc4\x85ce'), (15, b'Liceum profilowane'), (16, b'Technikum'), (17, b'Liceum og\xc3\xb3lnokszta\xc5\x82c\xc4\x85ce uzupe\xc5\x82niaj\xc4\x85ce dla absolwent\xc3\xb3w zasadniczych szk\xc3\xb3\xc5\x82 zawodowych'), (18, b'Technikum uzupe\xc5\x82niaj\xc4\x85ce dla absolwent\xc3\xb3w zasadniczych szk\xc3\xb3\xc5\x82 zawodowych'), (19, b'Szko\xc5\x82a policealna (ponadgimnazjalna)'), (20, b'Szko\xc5\x82a specjalna'), (21, b'Sze\xc5\x9bcioletnia og\xc3\xb3lnokszta\xc5\x82c\xc4\x85ca szko\xc5\x82a muzyczna I stopnia'), (22, b'Sze\xc5\x9bcioletnia szko\xc5\x82a muzyczna I stopnia'), (23, b'Czteroletnia szko\xc5\x82a muzyczna I stopnia'), (24, b'Sze\xc5\x9bcioletnia og\xc3\xb3lnokszta\xc5\x82c\xc4\x85ca szko\xc5\x82a muzyczna II stopnia'), (25, b'Sze\xc5\x9bcioletnia szko\xc5\x82a muzyczna II stopnia'), (26, b'Sze\xc5\x9bcioletnia og\xc3\xb3lnokszta\xc5\x82c\xc4\x85ca szko\xc5\x82a sztuk pi\xc4\x99knych'), (27, b'Czteroletnie liceum plastyczne'), (29, b'Dziewi\xc4\x99cioletnia og\xc3\xb3lnokszta\xc5\x82c\xc4\x85ca szko\xc5\x82a baletowa'), (30, b'Sze\xc5\x9bcioletnia szko\xc5\x82a sztuki ta\xc5\x84ca'), (31, b'Czteroletnia szko\xc5\x82a sztuki cyrkowej'), (32, b'Policealna szko\xc5\x82a artystyczna'), (33, b'Szko\xc5\x82a pomaturalna bibliotekarska'), (34, b'Kolegium nauczycielskie'), (35, b'Nauczycielskie Kolegium J\xc4\x99zyk\xc3\xb3w Obcych'), (36, b'O\xc5\x9brodek politechniczny'), (37, b'Pa\xc5\x82ac m\xc5\x82odzie\xc5\xbcy'), (38, b'M\xc5\x82odzie\xc5\xbcowy dom kultury'), (39, b'Ognisko pracy pozaszkolnej'), (40, b'Mi\xc4\x99dzyszkolny o\xc5\x9brodek sportowy'), (41, b'Ogr\xc3\xb3d jordanowski'), (42, b'Pozaszkolna plac\xc3\xb3wka specjalistyczna'), (43, b'Szkolne schronisko m\xc5\x82odzie\xc5\xbcowe'), (44, b'Plac\xc3\xb3wki artystyczne (ognisko artystyczne)'), (45, b'Centrum Kszta\xc5\x82cenia Praktycznego'), (46, b'Centrum Kszta\xc5\x82cenia Ustawicznego ze szko\xc5\x82ami'), (47, b'O\xc5\x9brodek dokszta\xc5\x82cania i doskonalenia zawodowego'), (48, b'Poradnia psychologiczno-pedagogiczna'), (49, b'Poradnia specjalistyczna'), (50, b'Specjalny O\xc5\x9brodek Wychowawczy'), (51, b'Specjalny O\xc5\x9brodek Szkolno-Wychowawczy'), (52, b'O\xc5\x9brodek Rewalidacyjno-Wychowawczy dla upo\xc5\x9bledzonych umys\xc5\x82owo'), (53, b'M\xc5\x82odzie\xc5\xbcowy O\xc5\x9brodek Wychowawczy'), (54, b'M\xc5\x82odzie\xc5\xbcowy O\xc5\x9brodek Socjoterapii ze szko\xc5\x82ami'), (55, b'Bursa'), (56, b'Dom wczas\xc3\xb3w dzieci\xc4\x99cych'), (57, b'Plac\xc3\xb3wka doskonalenia nauczycieli'), (58, b'Biblioteki pedagogiczne'), (59, b'Publiczna plac\xc3\xb3wka opieku\xc5\x84czo-wychowawcza w systemie pomocy spo\xc5\x82ecznej'), (60, b'Zak\xc5\x82ad poprawczy'), (61, b'Schronisko dla nieletnich'), (62, b'Rodzinny o\xc5\x9brodek diagnostyczno-konsultacyjny'), (63, b'Publiczny o\xc5\x9brodek adopcyjno-opieku\xc5\x84czy'), (64, b'Niepubliczna plac\xc3\xb3wka o\xc5\x9bwiatowo-wychowawcza w systemie o\xc5\x9bwiaty'), (65, b'Kolegium Pracownik\xc3\xb3w S\xc5\x82u\xc5\xbcb Spo\xc5\x82ecznych'), (66, b'Szko\xc5\x82a pomaturalna animator\xc3\xb3w kultury'), (67, b'Delegatura'), (68, b'Internat'), (69, b'Czteroletnia szko\xc5\x82a muzyczna II stopnia'), (70, b'Dziewi\xc4\x99cioletnia szko\xc5\x82a sztuki ta\xc5\x84ca'), (73, b'Szko\xc5\x82a specjalna przysposabiaj\xc4\x85ca do pracy na podbudowie 8-letniej szko\xc5\x82y podstawowej'), (74, b'Centrum Kszta\xc5\x82cenia Ustawicznego - bez szk\xc3\xb3\xc5\x82'), (75, b'Niepubliczna plac\xc3\xb3wka kszta\xc5\x82cenia ustawicznego i praktycznego'), (76, b'M\xc5\x82odzie\xc5\xbcowy O\xc5\x9brodek Socjoterapii bez szk\xc3\xb3\xc5\x82'), (80, b'Zesp\xc3\xb3\xc5\x82 wychowania przedszkolnego'), (81, b'Punkt przedszkolny'), (82, b'Pozna\xc5\x84ska szko\xc5\x82a ch\xc3\xb3ralna '), (83, b'Niepubliczna plac\xc3\xb3wka kszta\xc5\x82cenia ustawicznego i praktycznego ze szko\xc5\x82ami'), (90, b'Bednarska Szko\xc5\x82a Realna'), (100, b'Zesp\xc3\xb3\xc5\x82 szk\xc3\xb3\xc5\x82 i plac\xc3\xb3wek o\xc5\x9bwiatowych'), (101, b'Minister'), (102, b'Kuratorium o\xc5\x9bwiaty (wraz z delegaturami)'), (103, b'Jednostka Samorz\xc4\x85du Terytorialnego'), (104, b'ZEAS (zesp\xc3\xb3\xc5\x82 obs\xc5\x82ugi ekonomiczno-administracyjnej szk\xc3\xb3\xc5\x82)'), (105, b'Specjalistyczna jednostka nadzoru'), (106, b'Organ sprawuj\xc4\x85cy nadz\xc3\xb3r pedagogiczny nad zak\xc5\x82adami poprawczymi(\xe2\x80\xa6)'), (107, b'Centralna Komisja Egzaminacyjna'), (108, b'Okr\xc4\x99gowa Komisja Egzaminacyjna')], db_index=True)),
('name', models.CharField(max_length=100)),
('street', models.CharField(max_length=100)),
('street_nr', models.CharField(max_length=10)),
('zip_code', models.CharField(max_length=10)),
('patron', models.CharField(blank=True, max_length=150, null=True)),
('site', models.CharField(blank=True, max_length=200, null=True)),
('telephone', models.CharField(blank=True, max_length=30, null=True)),
('boys', models.IntegerField(default=0)),
('girls', models.IntegerField(default=0)),
('agencies', models.IntegerField(default=0)),
('fulltime_teachers', models.IntegerField(default=0)),
('nonfulltime_teachers', models.IntegerField(default=0)),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='school.City')),
],
),
]
| 0
| 6,447
| 23
|
a3beca4cb5005e736958d53aabeebe0736bd64f0
| 108
|
py
|
Python
|
run.py
|
TinLe/fantasticsearch
|
dfd43f56d70c33738f69f08fd9d613401dbc5634
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
TinLe/fantasticsearch
|
dfd43f56d70c33738f69f08fd9d613401dbc5634
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
TinLe/fantasticsearch
|
dfd43f56d70c33738f69f08fd9d613401dbc5634
|
[
"Apache-2.0"
] | null | null | null |
#!flask/bin/python
from fantasticsearch import fantasticsearch
fantasticsearch.run(debug=True, port=5001)
| 18
| 43
| 0.824074
|
#!flask/bin/python
from fantasticsearch import fantasticsearch
fantasticsearch.run(debug=True, port=5001)
| 0
| 0
| 0
|
2b0a3505c24baa750d6115f2ad0f6544ab52f9c6
| 201
|
py
|
Python
|
ci.py
|
arelemegha/python-programs
|
c9af116c0db45dcd13d97e80a32733df372fe2d4
|
[
"CC0-1.0"
] | null | null | null |
ci.py
|
arelemegha/python-programs
|
c9af116c0db45dcd13d97e80a32733df372fe2d4
|
[
"CC0-1.0"
] | null | null | null |
ci.py
|
arelemegha/python-programs
|
c9af116c0db45dcd13d97e80a32733df372fe2d4
|
[
"CC0-1.0"
] | null | null | null |
p = float(input("Enter the principle amount : "))
r = float(input("Enter the rate : "))
t = int(input("Enter the time: "))
a = p * (pow((1 + r / 100), t))
ci = a-p
print("compound interest is : ", ci)
| 28.714286
| 49
| 0.587065
|
p = float(input("Enter the principle amount : "))
r = float(input("Enter the rate : "))
t = int(input("Enter the time: "))
a = p * (pow((1 + r / 100), t))
ci = a-p
print("compound interest is : ", ci)
| 0
| 0
| 0
|
ec82a1fc36f35ef6fe3a1fa212ae968fe8f68e2b
| 386
|
py
|
Python
|
src/browserist/browser/get/element.py
|
jakob-bagterp/browserist
|
76bd916dd217b7da3759fd6ec3374191002dc091
|
[
"Apache-2.0"
] | 2
|
2022-02-20T10:03:19.000Z
|
2022-03-22T11:17:10.000Z
|
src/browserist/browser/get/element.py
|
jakob-bagterp/browserist
|
76bd916dd217b7da3759fd6ec3374191002dc091
|
[
"Apache-2.0"
] | null | null | null |
src/browserist/browser/get/element.py
|
jakob-bagterp/browserist
|
76bd916dd217b7da3759fd6ec3374191002dc091
|
[
"Apache-2.0"
] | null | null | null |
from selenium.webdriver.common.by import By
from ...constant import timeout
from ...model.type.xpath import XPath
from ..wait.for_element import wait_for_element
| 32.166667
| 86
| 0.753886
|
from selenium.webdriver.common.by import By
from ...constant import timeout
from ...model.type.xpath import XPath
from ..wait.for_element import wait_for_element
def get_element(driver: object, xpath: str, timeout: int = timeout.DEFAULT) -> object:
xpath = XPath(xpath)
wait_for_element(driver, xpath, timeout)
return driver.find_element(By.XPATH, xpath) # type: ignore
| 199
| 0
| 23
|
034de1341e667b7696b2129e6182000c170e9094
| 73
|
py
|
Python
|
AnalyzeSystem/f0_test.py
|
keel-210/AutoSinger
|
c9a855570b9222befca11072f47632b4ddebe294
|
[
"MIT"
] | null | null | null |
AnalyzeSystem/f0_test.py
|
keel-210/AutoSinger
|
c9a855570b9222befca11072f47632b4ddebe294
|
[
"MIT"
] | null | null | null |
AnalyzeSystem/f0_test.py
|
keel-210/AutoSinger
|
c9a855570b9222befca11072f47632b4ddebe294
|
[
"MIT"
] | null | null | null |
import pyworld as pyworld
import pysptk as sptk
import pyreaper as reaper
| 24.333333
| 25
| 0.849315
|
import pyworld as pyworld
import pysptk as sptk
import pyreaper as reaper
| 0
| 0
| 0
|
f9ed84d75a8201857334c9dfb6e24601b014935d
| 1,622
|
py
|
Python
|
pages/themes/DBMS-Lecture2/examples/mongo_CRUD.py
|
WWWCourses/PythonCourseNetIT-Slides
|
78dbb5eb7695cc64042b71a1911d4ef3feddb074
|
[
"MIT"
] | null | null | null |
pages/themes/DBMS-Lecture2/examples/mongo_CRUD.py
|
WWWCourses/PythonCourseNetIT-Slides
|
78dbb5eb7695cc64042b71a1911d4ef3feddb074
|
[
"MIT"
] | null | null | null |
pages/themes/DBMS-Lecture2/examples/mongo_CRUD.py
|
WWWCourses/PythonCourseNetIT-Slides
|
78dbb5eb7695cc64042b71a1911d4ef3feddb074
|
[
"MIT"
] | null | null | null |
import pymongo
# ------------------------- Connect to MongoDB Server ------------------------ #
# connect to MongoDB server:
client = pymongo.MongoClient("mongodb://localhost:27017")
# ----------------------- Switch context to a database ----------------------- #
# get "python_course" database:
db = client.python_course
# ------------------- Show all Collections in the database: ------------------ #
# get all collections in the database:
collections = db.list_collection_names()
# print(collections)
# ---------------------------------- Create ---------------------------------- #
# insert a new document into "todos" collection:
res = db.todos.insert_one({"title": "Learn MongoDB", "done": False})
# get the id of the inserted document:
# print(res.inserted_id)
# insert multiple documents into "todos" collection:
res = db.todos.insert_many([
{"title": "Learn Python", "done": True},
{"title": "Learn Flask", "done": False},
{"title": "Learn Flask-MongoDB", "done": False}
])
# print(res.inserted_ids)
# insert multiple documents with different shape into "todos" collection:
res = db.todos.insert_many([
{"title": "Learn Python", "done": True},
{"title": "Learn Flask", "description":"Learn Flask to develop quick and easy web applications with the ability to scale up."},
{"title": "Learn MongoDB", "due": "2021-12-31"}
])
# print(list(db.todos.find())[-3:-1])
# ----------------------------------- Read ----------------------------------- #
# find first document in "todos" collection:
print(db.todos.find_one())
# find all documents in "todos" collection:
for todo in db.todos.find():
print(todo)
| 36.044444
| 128
| 0.586313
|
import pymongo
# ------------------------- Connect to MongoDB Server ------------------------ #
# connect to MongoDB server:
client = pymongo.MongoClient("mongodb://localhost:27017")
# ----------------------- Switch context to a database ----------------------- #
# get "python_course" database:
db = client.python_course
# ------------------- Show all Collections in the database: ------------------ #
# get all collections in the database:
collections = db.list_collection_names()
# print(collections)
# ---------------------------------- Create ---------------------------------- #
# insert a new document into "todos" collection:
res = db.todos.insert_one({"title": "Learn MongoDB", "done": False})
# get the id of the inserted document:
# print(res.inserted_id)
# insert multiple documents into "todos" collection:
res = db.todos.insert_many([
{"title": "Learn Python", "done": True},
{"title": "Learn Flask", "done": False},
{"title": "Learn Flask-MongoDB", "done": False}
])
# print(res.inserted_ids)
# insert multiple documents with different shape into "todos" collection:
res = db.todos.insert_many([
{"title": "Learn Python", "done": True},
{"title": "Learn Flask", "description":"Learn Flask to develop quick and easy web applications with the ability to scale up."},
{"title": "Learn MongoDB", "due": "2021-12-31"}
])
# print(list(db.todos.find())[-3:-1])
# ----------------------------------- Read ----------------------------------- #
# find first document in "todos" collection:
print(db.todos.find_one())
# find all documents in "todos" collection:
for todo in db.todos.find():
print(todo)
| 0
| 0
| 0
|
9b780f6fac390d0cc6693e2c3cd1ca78f31f2eb6
| 348
|
py
|
Python
|
opensenate/tests/parliamentarians_test.py
|
g0ulartleo/opendata-senado
|
091d060d55d49f844d192baa1c0aef1aa039f1c0
|
[
"MIT"
] | null | null | null |
opensenate/tests/parliamentarians_test.py
|
g0ulartleo/opendata-senado
|
091d060d55d49f844d192baa1c0aef1aa039f1c0
|
[
"MIT"
] | null | null | null |
opensenate/tests/parliamentarians_test.py
|
g0ulartleo/opendata-senado
|
091d060d55d49f844d192baa1c0aef1aa039f1c0
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from ..parliamentarians import SenatorClient
| 29
| 45
| 0.695402
|
from unittest import TestCase
from ..parliamentarians import SenatorClient
class TestParliamentarians(TestCase):
def test_get_senator(self):
senator_client = SenatorClient()
senators = senator_client.get()
self.assertIsInstance(senators, list)
if len(senators) <= 0:
raise Exception("Empty list!")
| 207
| 16
| 49
|
d06ea409af79d80d23b209f9aa5dc10f5c6cf8f0
| 733
|
py
|
Python
|
molecule/blockbook/tests/test_chain.py
|
trustwallet/ansible-collection-blockchain
|
4ea4ae041dc4625ba914925cccafebefd99c672f
|
[
"MIT"
] | 1
|
2022-03-24T21:27:27.000Z
|
2022-03-24T21:27:27.000Z
|
molecule/blockbook/tests/test_chain.py
|
trustwallet/ansible-collection-blockchain
|
4ea4ae041dc4625ba914925cccafebefd99c672f
|
[
"MIT"
] | null | null | null |
molecule/blockbook/tests/test_chain.py
|
trustwallet/ansible-collection-blockchain
|
4ea4ae041dc4625ba914925cccafebefd99c672f
|
[
"MIT"
] | 2
|
2022-03-19T17:34:56.000Z
|
2022-03-23T19:08:55.000Z
|
import os
import pytest
import testinfra.utils.ansible_runner
from ansible.template import Templar
from ansible.parsing.dataloader import DataLoader
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.chain("firo")
@pytest.mark.chain("firo")
@pytest.mark.chain("firo")
| 27.148148
| 63
| 0.768076
|
import os
import pytest
import testinfra.utils.ansible_runner
from ansible.template import Templar
from ansible.parsing.dataloader import DataLoader
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.chain("firo")
def test_blockbook_firo_running_and_enabled(host):
s = host.service('blockbook-firo')
assert s.is_enabled
assert s.is_running
@pytest.mark.chain("firo")
def test_backend_firo_running_and_enabled(host):
s = host.service('backend-firo')
assert s.is_enabled
assert s.is_running
@pytest.mark.chain("firo")
def test_http_is_listening(host):
s = host.socket("tcp://0.0.0.0:9150")
assert s.is_listening
| 308
| 0
| 66
|
a131c678638de05ea4d2a12998c4a9866393a59c
| 1,008
|
py
|
Python
|
networks/spm.py
|
kamiLight/CADepth-master
|
8251f12f21393aae3261c3765218063cea1cae30
|
[
"MIT"
] | 11
|
2022-01-03T14:56:40.000Z
|
2022-03-01T06:37:05.000Z
|
networks/spm.py
|
kamiLight/CADepth-master
|
8251f12f21393aae3261c3765218063cea1cae30
|
[
"MIT"
] | 1
|
2022-03-23T01:28:56.000Z
|
2022-03-24T00:46:44.000Z
|
networks/spm.py
|
kamiLight/CADepth-master
|
8251f12f21393aae3261c3765218063cea1cae30
|
[
"MIT"
] | 1
|
2022-01-14T08:25:50.000Z
|
2022-01-14T08:25:50.000Z
|
import torch
import torch.nn as nn
class SPM(nn.Module):
""" Structure Perception Module """
def forward(self,x):
"""
inputs :
x : input feature maps(B X C X H X W)
returns :
out : attention value + input feature
attention: B X C X C
"""
m_batchsize, C, height, width = x.size()
proj_query = x.view(m_batchsize, C, -1)
proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy)-energy
attention = self.softmax(energy_new)
proj_value = x.view(m_batchsize, C, -1)
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, C, height, width)
out = out + x
return out
| 32.516129
| 84
| 0.5625
|
import torch
import torch.nn as nn
class SPM(nn.Module):
""" Structure Perception Module """
def __init__(self, in_dim):
super(SPM, self).__init__()
self.chanel_in = in_dim
self.softmax = nn.Softmax(dim=-1)
def forward(self,x):
"""
inputs :
x : input feature maps(B X C X H X W)
returns :
out : attention value + input feature
attention: B X C X C
"""
m_batchsize, C, height, width = x.size()
proj_query = x.view(m_batchsize, C, -1)
proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy)-energy
attention = self.softmax(energy_new)
proj_value = x.view(m_batchsize, C, -1)
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, C, height, width)
out = out + x
return out
| 116
| 0
| 26
|
d5ee6b368b0d4584ed1bd6908ed0752e007fd580
| 558
|
py
|
Python
|
examples/serialized_declare.py
|
jmptbl/puka
|
bc6e845c8a5f4319ffc54cc71959c56a8893edaf
|
[
"MIT"
] | 81
|
2015-01-22T10:09:37.000Z
|
2022-01-25T04:38:29.000Z
|
examples/serialized_declare.py
|
ov7a/puka
|
d2a1a8747b417021d6f18df7ee73d336f670f5a9
|
[
"MIT"
] | 17
|
2015-05-13T01:51:53.000Z
|
2021-03-05T04:01:39.000Z
|
examples/serialized_declare.py
|
ov7a/puka
|
d2a1a8747b417021d6f18df7ee73d336f670f5a9
|
[
"MIT"
] | 25
|
2015-02-12T14:02:41.000Z
|
2020-05-15T14:08:43.000Z
|
#!/usr/bin/env python
import sys
sys.path.append("..")
import logging
FORMAT_CONS = '%(asctime)s %(name)-12s %(levelname)8s\t%(message)s'
logging.basicConfig(level=logging.DEBUG, format=FORMAT_CONS)
import puka
client = puka.Client("amqp://localhost/")
promise = client.connect()
client.wait(promise)
for i in range(1000):
promise = client.queue_declare(queue='a%04i' % i)
client.wait(promise)
for i in range(1000):
promise = client.queue_delete(queue='a%04i' % i)
client.wait(promise)
promise = client.close()
client.wait(promise)
| 19.241379
| 67
| 0.706093
|
#!/usr/bin/env python
import sys
sys.path.append("..")
import logging
FORMAT_CONS = '%(asctime)s %(name)-12s %(levelname)8s\t%(message)s'
logging.basicConfig(level=logging.DEBUG, format=FORMAT_CONS)
import puka
client = puka.Client("amqp://localhost/")
promise = client.connect()
client.wait(promise)
for i in range(1000):
promise = client.queue_declare(queue='a%04i' % i)
client.wait(promise)
for i in range(1000):
promise = client.queue_delete(queue='a%04i' % i)
client.wait(promise)
promise = client.close()
client.wait(promise)
| 0
| 0
| 0
|
21beef38496aa0556afe362af0fdf823475e15cb
| 3,065
|
py
|
Python
|
tssimul.py
|
jli05/CS229-TimeSeries-LSTM
|
366b87d81bbdfecd74c97ffb5387622c0d60d43d
|
[
"Apache-2.0"
] | null | null | null |
tssimul.py
|
jli05/CS229-TimeSeries-LSTM
|
366b87d81bbdfecd74c97ffb5387622c0d60d43d
|
[
"Apache-2.0"
] | null | null | null |
tssimul.py
|
jli05/CS229-TimeSeries-LSTM
|
366b87d81bbdfecd74c97ffb5387622c0d60d43d
|
[
"Apache-2.0"
] | null | null | null |
''' Simulate ARIMA(p, 0, q) model '''
import argparse
import numpy as np
from scipy import stats
def simulate_eps(sigma, size, dist='normal', df=None):
''' Simulate innovation '''
n_samples, length_sample = size
if dist.startswith('n'):
eps = np.random.standard_normal(size) * sigma
elif dist.startswith('t'):
eps = np.random.standard_t(df, size) / np.sqrt(df / (df - 2)) * sigma
elif dist.startswith('exp'):
eps = np.random.exponential(sigma, size) - sigma
else:
raise ValueError(f'Unrecognised distribution "{dist}"')
return eps
def simulate_arima_given_innov(ar, ma, eps):
''' Simulate ARIMA '''
n_samples, length_sample = eps.shape
order_p = len(ar)
order_q = len(ma)
assert order_p >= order_q
samples = np.zeros_like(eps)
samples_pre_innov = np.zeros_like(eps)
for i in range(order_p, length_sample):
samples[:, i] = (samples[:, (i - order_p):i].dot(ar[::-1]) + eps[:, i]
+ eps[:, (i - order_q):i].dot(ma[::-1]))
samples_pre_innov[:, order_p:] = samples[:, order_p:] - eps[:, order_p:]
return np.concatenate([samples[:, None, :], samples_pre_innov[:, None, :],
eps[:, None, :]], axis=1)
def simulate_arima(ar, ma, sigma, size, dist='normal', df=None):
''' Simulate ARIMA '''
eps = simulate_eps(sigma, size, dist, df)
ts = simulate_arima_given_innov(ar, ma, eps)
return ts
def simulate_sv(beta, sigma, intercept, size):
''' Simulate log-variance with AR1 '''
n_samples, length_sample = size
# Generate first the variational part with zero intercept
# Finally add intercept to the entire array
logvar = np.zeros(size)
eps = np.random.standard_normal(size) * sigma
logvar[:, 0] = 3 * eps[:, 0]
for i in range(1, length_sample):
logvar[:, i] = beta * logvar[:, i - 1] + eps[:, i]
var = np.exp(intercept + logvar)
sv = np.random.normal(0, np.sqrt(var / 255), size)
return sv, np.sqrt(var)
def simulate_rs(p0, p00, p10, mu, sigma, size):
''' Simulate regime-switching model '''
n_samples, length_sample = size
eps = [np.random.normal(m, s / np.sqrt(255), size)
for m, s in zip(mu, sigma)]
eps = np.stack(eps, axis=2)
# Simulate regimes
prob0 = np.zeros(size)
regime = np.zeros(size)
prob0[:, 0] = p0
regime[:, 0] = 1 - np.random.binomial(1, p0, n_samples)
for i in range(1, length_sample):
prob0[:, i] = np.where(regime[:, i - 1] == 0, p00, p10)
regime[:, i] = 1 - np.random.binomial(1, prob0[:, i], n_samples)
ts = np.where(regime == 0, eps[:, :, 0], eps[:, :, 1])
return ts, prob0, regime
if __name__ == '__main__':
ar = [0.0868, 0.3667]
ma = [-0.1150, -0.4068]
sigma = .0112
simulation = simulate_arima(5000, 1000, ar, ma, sigma)
simulation_test = simulate_arima(500, 1000, ar, ma, sigma)
np.savez_compressed('simulation', data=simulation)
np.savez_compressed('simulation_test', data=simulation_test)
| 32.606383
| 78
| 0.604894
|
''' Simulate ARIMA(p, 0, q) model '''
import argparse
import numpy as np
from scipy import stats
def simulate_eps(sigma, size, dist='normal', df=None):
''' Simulate innovation '''
n_samples, length_sample = size
if dist.startswith('n'):
eps = np.random.standard_normal(size) * sigma
elif dist.startswith('t'):
eps = np.random.standard_t(df, size) / np.sqrt(df / (df - 2)) * sigma
elif dist.startswith('exp'):
eps = np.random.exponential(sigma, size) - sigma
else:
raise ValueError(f'Unrecognised distribution "{dist}"')
return eps
def simulate_arima_given_innov(ar, ma, eps):
''' Simulate ARIMA '''
n_samples, length_sample = eps.shape
order_p = len(ar)
order_q = len(ma)
assert order_p >= order_q
samples = np.zeros_like(eps)
samples_pre_innov = np.zeros_like(eps)
for i in range(order_p, length_sample):
samples[:, i] = (samples[:, (i - order_p):i].dot(ar[::-1]) + eps[:, i]
+ eps[:, (i - order_q):i].dot(ma[::-1]))
samples_pre_innov[:, order_p:] = samples[:, order_p:] - eps[:, order_p:]
return np.concatenate([samples[:, None, :], samples_pre_innov[:, None, :],
eps[:, None, :]], axis=1)
def simulate_arima(ar, ma, sigma, size, dist='normal', df=None):
''' Simulate ARIMA '''
eps = simulate_eps(sigma, size, dist, df)
ts = simulate_arima_given_innov(ar, ma, eps)
return ts
def simulate_sv(beta, sigma, intercept, size):
''' Simulate log-variance with AR1 '''
n_samples, length_sample = size
# Generate first the variational part with zero intercept
# Finally add intercept to the entire array
logvar = np.zeros(size)
eps = np.random.standard_normal(size) * sigma
logvar[:, 0] = 3 * eps[:, 0]
for i in range(1, length_sample):
logvar[:, i] = beta * logvar[:, i - 1] + eps[:, i]
var = np.exp(intercept + logvar)
sv = np.random.normal(0, np.sqrt(var / 255), size)
return sv, np.sqrt(var)
def simulate_rs(p0, p00, p10, mu, sigma, size):
''' Simulate regime-switching model '''
n_samples, length_sample = size
eps = [np.random.normal(m, s / np.sqrt(255), size)
for m, s in zip(mu, sigma)]
eps = np.stack(eps, axis=2)
# Simulate regimes
prob0 = np.zeros(size)
regime = np.zeros(size)
prob0[:, 0] = p0
regime[:, 0] = 1 - np.random.binomial(1, p0, n_samples)
for i in range(1, length_sample):
prob0[:, i] = np.where(regime[:, i - 1] == 0, p00, p10)
regime[:, i] = 1 - np.random.binomial(1, prob0[:, i], n_samples)
ts = np.where(regime == 0, eps[:, :, 0], eps[:, :, 1])
return ts, prob0, regime
if __name__ == '__main__':
ar = [0.0868, 0.3667]
ma = [-0.1150, -0.4068]
sigma = .0112
simulation = simulate_arima(5000, 1000, ar, ma, sigma)
simulation_test = simulate_arima(500, 1000, ar, ma, sigma)
np.savez_compressed('simulation', data=simulation)
np.savez_compressed('simulation_test', data=simulation_test)
| 0
| 0
| 0
|
c150f8cf6799b8c70596657c3001737e829dd448
| 24,931
|
py
|
Python
|
dm_alchemy/types/unity_python_conversion.py
|
locross93/dm_alchemy
|
35449de51d56c427959ae6a3be13d6c6ab738be5
|
[
"Apache-2.0"
] | 182
|
2021-02-08T15:25:06.000Z
|
2022-03-31T00:46:23.000Z
|
dm_alchemy/types/unity_python_conversion.py
|
locross93/dm_alchemy
|
35449de51d56c427959ae6a3be13d6c6ab738be5
|
[
"Apache-2.0"
] | 6
|
2021-02-12T10:42:51.000Z
|
2022-03-14T23:59:45.000Z
|
dm_alchemy/types/unity_python_conversion.py
|
locross93/dm_alchemy
|
35449de51d56c427959ae6a3be13d6c6ab738be5
|
[
"Apache-2.0"
] | 18
|
2021-02-08T20:37:22.000Z
|
2022-03-15T20:54:14.000Z
|
# Lint as python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Code to convert between unity types and python types."""
import copy
import itertools
from typing import Any, Dict, Sequence, Tuple
from dm_alchemy.protos import alchemy_pb2
from dm_alchemy.protos import hypercube_pb2
from dm_alchemy.types import graphs
from dm_alchemy.types import stones_and_potions
from dm_alchemy.types import utils
import frozendict
import numpy as np
from dm_alchemy.protos import color_info_pb2
from dm_alchemy.protos import unity_types_pb2
PotionMap = stones_and_potions.PotionMap
StoneMap = stones_and_potions.StoneMap
AlignedStone = stones_and_potions.AlignedStone
PerceivedStone = stones_and_potions.PerceivedStone
PerceivedPotion = stones_and_potions.PerceivedPotion
LatentStone = stones_and_potions.LatentStone
LatentPotion = stones_and_potions.LatentPotion
MapsAndGraph = Tuple[PotionMap, StoneMap, graphs.Graph]
COLOR_TYPE = alchemy_pb2.PerceptualMappingApplicator.Type.COLOR
SIZE_TYPE = alchemy_pb2.PerceptualMappingApplicator.Type.SIZE
ROUNDNESS_TYPE = alchemy_pb2.PerceptualMappingApplicator.Type.ROUNDNESS
# Colours are defined in AlchemyColors.asset
_STONE_COLOURS = frozendict.frozendict({
'purple': unity_types_pb2.Color(
r=0.52156866, g=0.22745098, b=0.6313726, a=1.0),
'blurple': unity_types_pb2.Color(
r=0.2608, g=0.2667, b=0.5941, a=1.0),
'blue': unity_types_pb2.Color(
r=0.0, g=0.30588236, b=0.5568628, a=1.0)
})
_POTION_COLOURS = frozendict.frozendict({
'green': unity_types_pb2.Color(
r=0.24394463, g=0.6911765, b=0.35806578, a=1.0),
'red': unity_types_pb2.Color(
r=0.9647059, g=0.015686275, b=0.06666667, a=1.0),
'yellow': unity_types_pb2.Color(
r=0.9411765, g=0.84705883, b=0.078431375, a=1.0),
'orange': unity_types_pb2.Color(
r=0.9764706, g=0.4, b=0.10980392, a=1.0),
'turquoise': unity_types_pb2.Color(
r=0.21176471, g=0.72156864, b=0.7411765, a=1.0),
'pink': unity_types_pb2.Color(
r=0.9843137, g=0.43529412, b=0.43529412, a=1.0)
})
# This is the order of perceived axes in unity.
PERCEIVED_AXIS = (COLOR_TYPE, SIZE_TYPE, ROUNDNESS_TYPE)
AXIS_NUMBER = frozendict.frozendict({
a: i for i, a in enumerate(PERCEIVED_AXIS)})
SIZE_NAME_AT_COORD = frozendict.frozendict(
{-1: 'small', 0: 'medium', 1: 'large'})
_STONE_SIZES = frozendict.frozendict(
{'small': 1.0, 'medium': 1.4, 'large': 1.8})
SIZE_AT_COORD = frozendict.frozendict(
{coord: _STONE_SIZES[name] for coord, name in SIZE_NAME_AT_COORD.items()})
_COORD_AT_SIZE = frozendict.frozendict({v: k for k, v in SIZE_AT_COORD.items()})
ROUNDNESS_NAME_AT_COORD = frozendict.frozendict(
{-1: 'pointy', 0: 'somewhat pointy', 1: 'round'})
_STONE_ROUNDNESSES = frozendict.frozendict(
{'pointy': 0.0, 'somewhat pointy': 0.5, 'round': 1.0})
ROUNDNESS_AT_COORD = frozendict.frozendict(
{coord: _STONE_ROUNDNESSES[name]
for coord, name in ROUNDNESS_NAME_AT_COORD.items()})
_COORD_AT_ROUNDNESS = frozendict.frozendict({
v: k for k, v in ROUNDNESS_AT_COORD.items()})
# The colour proto is not hashable so convert to a type which is.
COLOUR_NAME_AT_COORD = frozendict.frozendict(
{-1: 'purple', 0: 'blurple', 1: 'blue'})
COLOUR_AT_COORD = frozendict.frozendict({
coord: _STONE_COLOURS[name]
for coord, name in COLOUR_NAME_AT_COORD.items()})
_COORD_AT_COLOUR = frozendict.frozendict(
{colour_proto_to_hashable(v): k for k, v in COLOUR_AT_COORD.items()})
POTION_COLOUR_AT_PERCEIVED_POTION = frozendict.frozendict({
PerceivedPotion(0, 1): 'green',
PerceivedPotion(0, -1): 'red',
PerceivedPotion(1, 1): 'yellow',
PerceivedPotion(1, -1): 'orange',
PerceivedPotion(2, 1): 'turquoise',
PerceivedPotion(2, -1): 'pink',
})
_PERCEIVED_POTION_AT_POTION_COLOUR = frozendict.frozendict({
colour_proto_to_hashable(_POTION_COLOURS[v]): k
for k, v in POTION_COLOUR_AT_PERCEIVED_POTION.items()})
def to_stone_unity_properties(
perceived_stone: PerceivedStone, latent_stone: LatentStone
) -> alchemy_pb2.StoneProperties:
"""Convert a perceived and latent stone to StoneProperties."""
return alchemy_pb2.StoneProperties(
reward=15 if perceived_stone.reward > 2 else perceived_stone.reward,
latent=latent_stone_to_unity(latent_stone),
**perceptual_features(perceived_stone))
def unity_to_perceived_stone(
stone_properties: alchemy_pb2.StoneProperties
) -> PerceivedStone:
"""Convert StoneProperties to a perceived stone."""
size = _COORD_AT_SIZE[round(stone_properties.size, 1)]
roundness = _COORD_AT_ROUNDNESS[round(stone_properties.roundness, 1)]
colour = _COORD_AT_COLOUR[colour_proto_to_hashable(stone_properties.color)]
# Use numpy object type to store python ints rather than numpy ints.
perceived_coords = np.array([0, 0, 0], dtype=np.float)
perceived_coords[AXIS_NUMBER[SIZE_TYPE]] = size
perceived_coords[AXIS_NUMBER[ROUNDNESS_TYPE]] = roundness
perceived_coords[AXIS_NUMBER[COLOR_TYPE]] = colour
latent_stone = _unity_to_latent_stone(stone_properties.latent)
return PerceivedStone(latent_stone.reward(), perceived_coords)
def _from_stone_unity_properties(
stone_properties: alchemy_pb2.StoneProperties,
rotation: np.ndarray
) -> Tuple[PerceivedStone, AlignedStone, LatentStone]:
"""Convert StoneProperties to a perceived and latent stone."""
latent_stone = _unity_to_latent_stone(stone_properties.latent)
perceived_stone = unity_to_perceived_stone(stone_properties)
aligned_stone = stones_and_potions.align(perceived_stone, rotation)
return perceived_stone, aligned_stone, latent_stone
def to_potion_unity_properties(
perceived_potion: PerceivedPotion, latent_potion: LatentPotion,
graph: graphs.Graph
) -> alchemy_pb2.PotionProperties:
"""Convert a perceived and latent potion and graph to PotionProperties."""
colour_name = POTION_COLOUR_AT_PERCEIVED_POTION[perceived_potion]
colour = get_colour_info((colour_name, _POTION_COLOURS[colour_name]))
reactions = set()
for startnode, endnodes in graph.edge_list.edges.items():
expected_end_coords = copy.deepcopy(startnode.coords)
expected_end_coords[latent_potion.latent_dim] = (
startnode.coords[latent_potion.latent_dim] + 2 *
latent_potion.latent_dir)
expected_end_node = graph.node_list.get_node_by_coords(
expected_end_coords)
if not expected_end_node:
continue
if expected_end_node in endnodes:
reactions.add((startnode.idx, expected_end_node.idx))
reactions = [alchemy_pb2.PotionReaction(from_stone_index=from_stone,
to_stone_index=to_stone)
for from_stone, to_stone in reactions]
sorted_reactions = sorted(
reactions, key=lambda reaction: reaction.from_stone_index)
return alchemy_pb2.PotionProperties(
label=latent_potion_to_unity(latent_potion), reward=0, color=colour,
glow_color=colour, reactions=sorted_reactions)
def _potions_from_potion_unity_properties(
potion: alchemy_pb2.PotionProperties
) -> Tuple[PerceivedPotion, LatentPotion]:
"""Convert the unity representation to a perceived and latent potion."""
return (unity_to_perceived_potion(potion),
_unity_to_latent_potion(potion.label))
def graphs_from_potion_unity_properties(
potions: Sequence[alchemy_pb2.PotionProperties]) -> graphs.Graph:
"""Convert a sequence of PotionProperties to a Graph."""
node_list = graphs.all_nodes_in_graph()
edge_list = graphs.EdgeList()
for i, potion in enumerate(potions):
_, latent = _potions_from_potion_unity_properties(potion)
utils_potion = stones_and_potions.Potion(
i, latent.latent_dim, latent.latent_dir)
for reaction in potion.reactions:
edge_list.add_edge(
node_list.get_node_by_idx(reaction.from_stone_index),
node_list.get_node_by_idx(reaction.to_stone_index),
utils_potion)
return graphs.Graph(node_list, edge_list)
def to_unity_chemistry(
chemistry: utils.Chemistry
) -> Tuple[alchemy_pb2.Chemistry, alchemy_pb2.RotationMapping]:
"""Convert from python types to unity Chemistry object."""
# Latent stones and potions are always in the same places.
latent_stones = stones_and_potions.possible_latent_stones()
latent_potions = stones_and_potions.possible_latent_potions()
# Apply the dimension swapping map between latent stones in unity and latent
# stones in python (see from_unity_chemistry for more explanation).
python_to_unity = PythonToUnityDimMap(chemistry)
python_latent_stones = [python_to_unity.apply_to_stone(latent_stone)
for latent_stone in latent_stones]
python_latent_potions = [python_to_unity.apply_to_potion(latent_potion)
for latent_potion in latent_potions]
# Apply the stone map to them to get perceptual stones.
aligned_stones = [chemistry.stone_map.apply_inverse(stone)
for stone in python_latent_stones]
perceived_stones = [
stones_and_potions.unalign(stone, chemistry.rotation)
for stone in aligned_stones]
unity_stones = [to_stone_unity_properties(perceived, latent)
for perceived, latent in zip(perceived_stones, latent_stones)]
# Apply the potion map to them to get perceptual potions.
perceived_potions = [chemistry.potion_map.apply_inverse(potion)
for potion in python_latent_potions]
unity_potions = [
to_potion_unity_properties(perceived, latent, python_to_unity.graph)
for perceived, latent in zip(perceived_potions, latent_potions)]
unity_chemistry = alchemy_pb2.Chemistry(
stones=unity_stones, potions=unity_potions)
rotation_mapping = rotation_to_unity(python_to_unity.rotation)
return unity_chemistry, rotation_mapping
def rotation_from_unity(
rotation_mapping: alchemy_pb2.RotationMapping
) -> np.ndarray:
"""Get the transformation to undo rotation from unity."""
# Rotate back
angles = [-int(rotation_mapping.rotation_angles.x),
-int(rotation_mapping.rotation_angles.y),
-int(rotation_mapping.rotation_angles.z)]
return stones_and_potions.rotation_from_angles(angles)
def rotation_to_unity(rotation: np.ndarray) -> alchemy_pb2.RotationMapping:
"""Convert the transformation to undo rotation to unity."""
angles = stones_and_potions.rotation_to_angles(rotation)
return alchemy_pb2.RotationMapping(rotation_angles=unity_types_pb2.Vector3(
**{axis: -round(a) for axis, a in zip('xyz', angles)}))
def potion_map_from_potions(
latent_potions: Sequence[LatentPotion],
perceived_potions: Sequence[PerceivedPotion]
) -> PotionMap:
"""Calculate potion map relating latent and perceived potions."""
dimension_map = [-1, -1, -1]
direction_map = [0, 0, 0]
for perceived_potion, latent_potion in zip(perceived_potions, latent_potions):
dimension_map[perceived_potion.perceived_dim] = latent_potion.latent_dim
if latent_potion.latent_dir == perceived_potion.perceived_dir:
direction_map[latent_potion.latent_dim] = 1
else:
direction_map[latent_potion.latent_dim] = -1
return PotionMap(dim_map=dimension_map, dir_map=direction_map)
def find_dim_map_and_stone_map(
chemistry: utils.Chemistry
) -> Tuple[np.ndarray, StoneMap, np.ndarray]:
"""Find a dimension map and stone map which map latent stones to perceived."""
latent_stones = stones_and_potions.possible_latent_stones()
aligned_stones = [chemistry.stone_map.apply_inverse(stone)
for stone in latent_stones]
perceived_stones = [stones_and_potions.unalign(stone, chemistry.rotation)
for stone in aligned_stones]
for dim_map in [np.eye(3, dtype=np.int)[p, :] for p in itertools.permutations(
[0, 1, 2])]:
for stone_map in stones_and_potions.possible_stone_maps():
sm = np.diag(stone_map.latent_pos_dir.astype(np.int))
# Since we do rotation before reflection in this case we must allow
# rotation forwards and backwards to get all cases.
# Because of the scaling this is not just the inverse matrix.
inverse_rotation = stones_and_potions.rotation_from_angles(
[-a for a in stones_and_potions.rotation_to_angles(
chemistry.rotation)])
for rotation in [chemistry.rotation, inverse_rotation]:
all_match = True
for ls, ps in zip(latent_stones, perceived_stones):
new_ls = np.matmul(dim_map, ls.latent_coords.astype(np.int))
ps_prime = np.matmul(sm, np.matmul(np.linalg.inv(rotation), new_ls))
if not all(abs(a - b) < 0.0001 for a, b in zip(
ps_prime, ps.perceived_coords.astype(np.int))):
all_match = False
break
if all_match:
return np.linalg.inv(dim_map), stone_map, rotation
assert False, (
'No dimension map and stone map takes latent stones to the passed '
'perceived stones with the passed rotation.')
def _apply_dim_map_to_graph(
dim_map: np.ndarray, graph: graphs.Graph
) -> graphs.Graph:
"""Swap latent dimensions in graph."""
edge_list = graphs.EdgeList()
for start_node, end_nodes in graph.edge_list.edges.items():
start_coords = np.matmul(dim_map, np.array(start_node.coords)).tolist()
new_start_node = graph.node_list.get_node_by_coords(start_coords)
for end_node, edge in end_nodes.items():
end_coords = np.matmul(dim_map, np.array(end_node.coords)).tolist()
new_end_node = graph.node_list.get_node_by_coords(end_coords)
new_potion = stones_and_potions.Potion(
edge[1].idx, np.where(dim_map[edge[1].dimension, :])[0][0],
edge[1].direction)
edge_list.add_edge(new_start_node, new_end_node, new_potion)
return graphs.Graph(graph.node_list, edge_list)
class PythonToUnityDimMap:
"""Convert from python method of mapping to unity method."""
def from_unity_chemistry(
chemistry: alchemy_pb2.Chemistry,
rotation_mapping: alchemy_pb2.RotationMapping
) -> utils.Chemistry:
"""Convert from unity Chemistry object to corresponding python types.
Args:
chemistry: A chemistry object received from the alchemy unity environment.
rotation_mapping: A rotation mapping object received from the alchemy unity
environment.
Returns:
A PotionMap describing the transformation from potion perceptual space to
latent space.
A StoneMap describing the transformation from stone aligned perceptual space
to latent space.
A Graph describing the available edges in latent space.
A np.ndarray describing the rotation from stone aligned perceptual space to
stone perceptual space.
"""
# In unity the latent stones are (possibly) rotated and then "perceptual
# mapping applicators" are applied to say how this is represented on screen,
# e.g. -1 in the first latent dimension is purple and +1 is blue.
# By only considering 7 possible rotations (0 rotation and 45 degrees
# clockise or anticlockwise about each axis) and just considering in what
# direction perceptual attributes change, when this is combined with the
# mapping of potion pairs to latent space dimensions and assigning a direction
# to that potion pair, we get all mappings which are 45 degrees offset on one
# axis (note that latent variables have the same effect on the reward so
# swapping latent space dimensions has no effect). We get duplicates because
# after rotating, one dimension of the max reward stone will have value 0 so
# reflecting about this does not change the value. However, the configuration
# is such that the task distribution is as it would be if we avoided
# duplicates.
# An alternative way to generate all these mappings without the duplicates
# would be to take the stones latent coordinates and first apply a mapping
# which changes the positive direction and then rotate these positions by 45
# degrees clockwise (excluding anticlockwise rotations).
# It is easier to run algorithms like the ideal observer assuming the second
# breakdown of the mapping because the rotation does not effect the best
# action to take so we can take the perceived coordinates and undo the
# rotation using any plausible rotation (even if it is not the correct one)
# and then maintain a belief state over the remaining aspects of the
# chemistry and update the belief state if we find the rotation was wrong.
# We can switch between these equivalent breakdowns by possibly rotating in
# the opposite direction.
# From unity we get
# perceived_stone = sm * r * latent_stone
# where r rotates plus or minus 45 degrees and sm changes directions, we want
# perceived_stone = r_prime * sm * latent_stone
# where r_prime is rotating clockwise about the axis that r rotates around.
rotation = rotation_from_unity(rotation_mapping)
abs_rotation = stones_and_potions.rotation_from_angles(
[-abs(a) for a in stones_and_potions.rotation_to_angles(rotation)])
python_stones = [_from_stone_unity_properties(stone, abs_rotation)
for stone in chemistry.stones]
python_potions = [_potions_from_potion_unity_properties(potion)
for potion in chemistry.potions]
graph = graphs_from_potion_unity_properties(chemistry.potions)
# So sm_prime is diagonal with elements in {-1, 1} and dim_map is such that
# the sum of each row and each column is 1 with non zero elements 1.
# Let a := sm_prime * dim_map
# a := [a11 a12 a13]
# [a21 a22 a23]
# [a31 a32 a33]
# a * [1, 1, 1] = [a11 + a12 + a13, a21 + a22 + a23, a31 + a32 + a33]
sum_of_each_row = _get_aligned_coords_matching_latent(
python_stones, [1, 1, 1])
stone_map = StoneMap(pos_dir=sum_of_each_row)
sm_prime = np.diag(sum_of_each_row)
# a * [1, 1, 1] - a * [-1, 1, 1] = 2 * [a11, a21, a31]
first_column = ((sum_of_each_row - _get_aligned_coords_matching_latent(
python_stones, [-1, 1, 1]))/2).astype(np.int)
second_column = ((sum_of_each_row - _get_aligned_coords_matching_latent(
python_stones, [1, -1, 1]))/2).astype(np.int)
third_column = ((sum_of_each_row - _get_aligned_coords_matching_latent(
python_stones, [1, 1, -1]))/2).astype(np.int)
a = np.hstack((first_column.reshape((3, 1)), second_column.reshape((3, 1)),
third_column.reshape((3, 1))))
dim_map = np.rint(np.matmul(np.linalg.inv(sm_prime), a)).astype(np.int)
latent_stones = [latent_stone for _, _, latent_stone in python_stones]
aligned_stones = [aligned_stone for _, aligned_stone, _ in python_stones]
latent_stones = [_apply_dim_map_to_stone(dim_map, latent_stone)
for latent_stone in latent_stones]
latent_potions = [latent_potion for _, latent_potion in python_potions]
latent_potions = [_apply_dim_map_to_potion(dim_map, latent_potion)
for latent_potion in latent_potions]
perceived_potions = [perceived_potion
for perceived_potion, _ in python_potions]
graph = _apply_dim_map_to_graph(dim_map, graph)
for aligned_stone, latent_stone in zip(aligned_stones, latent_stones):
assert stone_map.apply(aligned_stone) == latent_stone, (
'Applying the stone map to the aligned stone did not give the '
'expected latent stone.\n{aligned_stone}\n{latent_stone}\n'
'{stone_map}\n{chemistry}'.format(
aligned_stone=aligned_stone, latent_stone=latent_stone,
stone_map=stone_map, chemistry=chemistry))
potion_map = potion_map_from_potions(latent_potions, perceived_potions)
for perceived_potion, latent_potion in zip(perceived_potions, latent_potions):
assert potion_map.apply(perceived_potion) == latent_potion, (
'Applying the potion map to the perceived potion did not give the '
'expected latent potion.{perceived_potion}\n{latent_potion}\n'
'{potion_map}\n{chemistry}'.format(
perceived_potion=perceived_potion, latent_potion=latent_potion,
potion_map=potion_map, chemistry=chemistry))
return utils.Chemistry(potion_map, stone_map, graph, abs_rotation)
| 43.661996
| 80
| 0.740083
|
# Lint as python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Code to convert between unity types and python types."""
import copy
import itertools
from typing import Any, Dict, Sequence, Tuple
from dm_alchemy.protos import alchemy_pb2
from dm_alchemy.protos import hypercube_pb2
from dm_alchemy.types import graphs
from dm_alchemy.types import stones_and_potions
from dm_alchemy.types import utils
import frozendict
import numpy as np
from dm_alchemy.protos import color_info_pb2
from dm_alchemy.protos import unity_types_pb2
PotionMap = stones_and_potions.PotionMap
StoneMap = stones_and_potions.StoneMap
AlignedStone = stones_and_potions.AlignedStone
PerceivedStone = stones_and_potions.PerceivedStone
PerceivedPotion = stones_and_potions.PerceivedPotion
LatentStone = stones_and_potions.LatentStone
LatentPotion = stones_and_potions.LatentPotion
MapsAndGraph = Tuple[PotionMap, StoneMap, graphs.Graph]
COLOR_TYPE = alchemy_pb2.PerceptualMappingApplicator.Type.COLOR
SIZE_TYPE = alchemy_pb2.PerceptualMappingApplicator.Type.SIZE
ROUNDNESS_TYPE = alchemy_pb2.PerceptualMappingApplicator.Type.ROUNDNESS
# Colours are defined in AlchemyColors.asset
_STONE_COLOURS = frozendict.frozendict({
'purple': unity_types_pb2.Color(
r=0.52156866, g=0.22745098, b=0.6313726, a=1.0),
'blurple': unity_types_pb2.Color(
r=0.2608, g=0.2667, b=0.5941, a=1.0),
'blue': unity_types_pb2.Color(
r=0.0, g=0.30588236, b=0.5568628, a=1.0)
})
_POTION_COLOURS = frozendict.frozendict({
'green': unity_types_pb2.Color(
r=0.24394463, g=0.6911765, b=0.35806578, a=1.0),
'red': unity_types_pb2.Color(
r=0.9647059, g=0.015686275, b=0.06666667, a=1.0),
'yellow': unity_types_pb2.Color(
r=0.9411765, g=0.84705883, b=0.078431375, a=1.0),
'orange': unity_types_pb2.Color(
r=0.9764706, g=0.4, b=0.10980392, a=1.0),
'turquoise': unity_types_pb2.Color(
r=0.21176471, g=0.72156864, b=0.7411765, a=1.0),
'pink': unity_types_pb2.Color(
r=0.9843137, g=0.43529412, b=0.43529412, a=1.0)
})
# This is the order of perceived axes in unity.
PERCEIVED_AXIS = (COLOR_TYPE, SIZE_TYPE, ROUNDNESS_TYPE)
AXIS_NUMBER = frozendict.frozendict({
a: i for i, a in enumerate(PERCEIVED_AXIS)})
SIZE_NAME_AT_COORD = frozendict.frozendict(
{-1: 'small', 0: 'medium', 1: 'large'})
_STONE_SIZES = frozendict.frozendict(
{'small': 1.0, 'medium': 1.4, 'large': 1.8})
SIZE_AT_COORD = frozendict.frozendict(
{coord: _STONE_SIZES[name] for coord, name in SIZE_NAME_AT_COORD.items()})
_COORD_AT_SIZE = frozendict.frozendict({v: k for k, v in SIZE_AT_COORD.items()})
ROUNDNESS_NAME_AT_COORD = frozendict.frozendict(
{-1: 'pointy', 0: 'somewhat pointy', 1: 'round'})
_STONE_ROUNDNESSES = frozendict.frozendict(
{'pointy': 0.0, 'somewhat pointy': 0.5, 'round': 1.0})
ROUNDNESS_AT_COORD = frozendict.frozendict(
{coord: _STONE_ROUNDNESSES[name]
for coord, name in ROUNDNESS_NAME_AT_COORD.items()})
_COORD_AT_ROUNDNESS = frozendict.frozendict({
v: k for k, v in ROUNDNESS_AT_COORD.items()})
# The colour proto is not hashable so convert to a type which is.
def colour_proto_to_hashable(
colour: unity_types_pb2.Color) -> Tuple[float, float, float, float]:
return (round(colour.r, 2), round(colour.g, 2), round(colour.b, 2),
round(colour.a, 2))
COLOUR_NAME_AT_COORD = frozendict.frozendict(
{-1: 'purple', 0: 'blurple', 1: 'blue'})
COLOUR_AT_COORD = frozendict.frozendict({
coord: _STONE_COLOURS[name]
for coord, name in COLOUR_NAME_AT_COORD.items()})
_COORD_AT_COLOUR = frozendict.frozendict(
{colour_proto_to_hashable(v): k for k, v in COLOUR_AT_COORD.items()})
POTION_COLOUR_AT_PERCEIVED_POTION = frozendict.frozendict({
PerceivedPotion(0, 1): 'green',
PerceivedPotion(0, -1): 'red',
PerceivedPotion(1, 1): 'yellow',
PerceivedPotion(1, -1): 'orange',
PerceivedPotion(2, 1): 'turquoise',
PerceivedPotion(2, -1): 'pink',
})
_PERCEIVED_POTION_AT_POTION_COLOUR = frozendict.frozendict({
colour_proto_to_hashable(_POTION_COLOURS[v]): k
for k, v in POTION_COLOUR_AT_PERCEIVED_POTION.items()})
def get_colour_info(
name_and_colour: Tuple[str, unity_types_pb2.Color]
) -> color_info_pb2.ColorInfo:
return color_info_pb2.ColorInfo(
color=name_and_colour[1], name=name_and_colour[0])
def latent_stone_to_unity(
latent_stone: LatentStone) -> hypercube_pb2.HypercubeVertex:
return hypercube_pb2.HypercubeVertex(
index=latent_stone.index(),
coordinates=latent_stone.latent_coords.tolist())
def _unity_to_latent_stone(
latent: hypercube_pb2.HypercubeVertex) -> LatentStone:
# Use numpy object type to store python ints rather than numpy ints.
return LatentStone(np.array([int(coord) for coord in latent.coordinates],
dtype=np.object))
def perceptual_features(perceived_stone: PerceivedStone) -> Dict[str, Any]:
return {
'size': SIZE_AT_COORD[perceived_stone.perceived_coords[AXIS_NUMBER[
SIZE_TYPE]]],
'roundness': ROUNDNESS_AT_COORD[perceived_stone.perceived_coords[
AXIS_NUMBER[ROUNDNESS_TYPE]]],
'color': COLOUR_AT_COORD[perceived_stone.perceived_coords[AXIS_NUMBER[
COLOR_TYPE]]],
}
def to_stone_unity_properties(
perceived_stone: PerceivedStone, latent_stone: LatentStone
) -> alchemy_pb2.StoneProperties:
"""Convert a perceived and latent stone to StoneProperties."""
return alchemy_pb2.StoneProperties(
reward=15 if perceived_stone.reward > 2 else perceived_stone.reward,
latent=latent_stone_to_unity(latent_stone),
**perceptual_features(perceived_stone))
def unity_to_perceived_stone(
stone_properties: alchemy_pb2.StoneProperties
) -> PerceivedStone:
"""Convert StoneProperties to a perceived stone."""
size = _COORD_AT_SIZE[round(stone_properties.size, 1)]
roundness = _COORD_AT_ROUNDNESS[round(stone_properties.roundness, 1)]
colour = _COORD_AT_COLOUR[colour_proto_to_hashable(stone_properties.color)]
# Use numpy object type to store python ints rather than numpy ints.
perceived_coords = np.array([0, 0, 0], dtype=np.float)
perceived_coords[AXIS_NUMBER[SIZE_TYPE]] = size
perceived_coords[AXIS_NUMBER[ROUNDNESS_TYPE]] = roundness
perceived_coords[AXIS_NUMBER[COLOR_TYPE]] = colour
latent_stone = _unity_to_latent_stone(stone_properties.latent)
return PerceivedStone(latent_stone.reward(), perceived_coords)
def _from_stone_unity_properties(
stone_properties: alchemy_pb2.StoneProperties,
rotation: np.ndarray
) -> Tuple[PerceivedStone, AlignedStone, LatentStone]:
"""Convert StoneProperties to a perceived and latent stone."""
latent_stone = _unity_to_latent_stone(stone_properties.latent)
perceived_stone = unity_to_perceived_stone(stone_properties)
aligned_stone = stones_and_potions.align(perceived_stone, rotation)
return perceived_stone, aligned_stone, latent_stone
def latent_potion_to_unity(
latent_potion: LatentPotion) -> hypercube_pb2.EdgeLabel:
if latent_potion.latent_dir == 1:
direction = hypercube_pb2.EdgeLabel.Direction.POSITIVE
else:
direction = hypercube_pb2.EdgeLabel.Direction.NEGATIVE
return hypercube_pb2.EdgeLabel(
dimension_index=latent_potion.latent_dim, direction=direction)
def _unity_to_latent_potion(
edge_label: hypercube_pb2.EdgeLabel) -> LatentPotion:
if edge_label.direction == hypercube_pb2.EdgeLabel.Direction.POSITIVE:
latent_dir = 1
else:
latent_dir = -1
return LatentPotion(
latent_dim=edge_label.dimension_index, latent_dir=latent_dir)
def to_potion_unity_properties(
perceived_potion: PerceivedPotion, latent_potion: LatentPotion,
graph: graphs.Graph
) -> alchemy_pb2.PotionProperties:
"""Convert a perceived and latent potion and graph to PotionProperties."""
colour_name = POTION_COLOUR_AT_PERCEIVED_POTION[perceived_potion]
colour = get_colour_info((colour_name, _POTION_COLOURS[colour_name]))
reactions = set()
for startnode, endnodes in graph.edge_list.edges.items():
expected_end_coords = copy.deepcopy(startnode.coords)
expected_end_coords[latent_potion.latent_dim] = (
startnode.coords[latent_potion.latent_dim] + 2 *
latent_potion.latent_dir)
expected_end_node = graph.node_list.get_node_by_coords(
expected_end_coords)
if not expected_end_node:
continue
if expected_end_node in endnodes:
reactions.add((startnode.idx, expected_end_node.idx))
reactions = [alchemy_pb2.PotionReaction(from_stone_index=from_stone,
to_stone_index=to_stone)
for from_stone, to_stone in reactions]
sorted_reactions = sorted(
reactions, key=lambda reaction: reaction.from_stone_index)
return alchemy_pb2.PotionProperties(
label=latent_potion_to_unity(latent_potion), reward=0, color=colour,
glow_color=colour, reactions=sorted_reactions)
def unity_to_perceived_potion(
potion: alchemy_pb2.PotionProperties
) -> PerceivedPotion:
return _PERCEIVED_POTION_AT_POTION_COLOUR[
colour_proto_to_hashable(potion.color.color)]
def _potions_from_potion_unity_properties(
potion: alchemy_pb2.PotionProperties
) -> Tuple[PerceivedPotion, LatentPotion]:
"""Convert the unity representation to a perceived and latent potion."""
return (unity_to_perceived_potion(potion),
_unity_to_latent_potion(potion.label))
def graphs_from_potion_unity_properties(
potions: Sequence[alchemy_pb2.PotionProperties]) -> graphs.Graph:
"""Convert a sequence of PotionProperties to a Graph."""
node_list = graphs.all_nodes_in_graph()
edge_list = graphs.EdgeList()
for i, potion in enumerate(potions):
_, latent = _potions_from_potion_unity_properties(potion)
utils_potion = stones_and_potions.Potion(
i, latent.latent_dim, latent.latent_dir)
for reaction in potion.reactions:
edge_list.add_edge(
node_list.get_node_by_idx(reaction.from_stone_index),
node_list.get_node_by_idx(reaction.to_stone_index),
utils_potion)
return graphs.Graph(node_list, edge_list)
def to_unity_chemistry(
chemistry: utils.Chemistry
) -> Tuple[alchemy_pb2.Chemistry, alchemy_pb2.RotationMapping]:
"""Convert from python types to unity Chemistry object."""
# Latent stones and potions are always in the same places.
latent_stones = stones_and_potions.possible_latent_stones()
latent_potions = stones_and_potions.possible_latent_potions()
# Apply the dimension swapping map between latent stones in unity and latent
# stones in python (see from_unity_chemistry for more explanation).
python_to_unity = PythonToUnityDimMap(chemistry)
python_latent_stones = [python_to_unity.apply_to_stone(latent_stone)
for latent_stone in latent_stones]
python_latent_potions = [python_to_unity.apply_to_potion(latent_potion)
for latent_potion in latent_potions]
# Apply the stone map to them to get perceptual stones.
aligned_stones = [chemistry.stone_map.apply_inverse(stone)
for stone in python_latent_stones]
perceived_stones = [
stones_and_potions.unalign(stone, chemistry.rotation)
for stone in aligned_stones]
unity_stones = [to_stone_unity_properties(perceived, latent)
for perceived, latent in zip(perceived_stones, latent_stones)]
# Apply the potion map to them to get perceptual potions.
perceived_potions = [chemistry.potion_map.apply_inverse(potion)
for potion in python_latent_potions]
unity_potions = [
to_potion_unity_properties(perceived, latent, python_to_unity.graph)
for perceived, latent in zip(perceived_potions, latent_potions)]
unity_chemistry = alchemy_pb2.Chemistry(
stones=unity_stones, potions=unity_potions)
rotation_mapping = rotation_to_unity(python_to_unity.rotation)
return unity_chemistry, rotation_mapping
def rotation_from_unity(
rotation_mapping: alchemy_pb2.RotationMapping
) -> np.ndarray:
"""Get the transformation to undo rotation from unity."""
# Rotate back
angles = [-int(rotation_mapping.rotation_angles.x),
-int(rotation_mapping.rotation_angles.y),
-int(rotation_mapping.rotation_angles.z)]
return stones_and_potions.rotation_from_angles(angles)
def rotation_to_unity(rotation: np.ndarray) -> alchemy_pb2.RotationMapping:
"""Convert the transformation to undo rotation to unity."""
angles = stones_and_potions.rotation_to_angles(rotation)
return alchemy_pb2.RotationMapping(rotation_angles=unity_types_pb2.Vector3(
**{axis: -round(a) for axis, a in zip('xyz', angles)}))
def potion_map_from_potions(
latent_potions: Sequence[LatentPotion],
perceived_potions: Sequence[PerceivedPotion]
) -> PotionMap:
"""Calculate potion map relating latent and perceived potions."""
dimension_map = [-1, -1, -1]
direction_map = [0, 0, 0]
for perceived_potion, latent_potion in zip(perceived_potions, latent_potions):
dimension_map[perceived_potion.perceived_dim] = latent_potion.latent_dim
if latent_potion.latent_dir == perceived_potion.perceived_dir:
direction_map[latent_potion.latent_dim] = 1
else:
direction_map[latent_potion.latent_dim] = -1
return PotionMap(dim_map=dimension_map, dir_map=direction_map)
def _get_aligned_coords_matching_latent(
python_stones: Sequence[Tuple[PerceivedStone, AlignedStone, LatentStone]],
latent_coords: Sequence[int]
) -> np.ndarray:
return [aligned_stone.aligned_coords.astype(np.int)
for _, aligned_stone, latent_stone in python_stones
if latent_stone.latent_coords.tolist() == latent_coords][0]
def find_dim_map_and_stone_map(
chemistry: utils.Chemistry
) -> Tuple[np.ndarray, StoneMap, np.ndarray]:
"""Find a dimension map and stone map which map latent stones to perceived."""
latent_stones = stones_and_potions.possible_latent_stones()
aligned_stones = [chemistry.stone_map.apply_inverse(stone)
for stone in latent_stones]
perceived_stones = [stones_and_potions.unalign(stone, chemistry.rotation)
for stone in aligned_stones]
for dim_map in [np.eye(3, dtype=np.int)[p, :] for p in itertools.permutations(
[0, 1, 2])]:
for stone_map in stones_and_potions.possible_stone_maps():
sm = np.diag(stone_map.latent_pos_dir.astype(np.int))
# Since we do rotation before reflection in this case we must allow
# rotation forwards and backwards to get all cases.
# Because of the scaling this is not just the inverse matrix.
inverse_rotation = stones_and_potions.rotation_from_angles(
[-a for a in stones_and_potions.rotation_to_angles(
chemistry.rotation)])
for rotation in [chemistry.rotation, inverse_rotation]:
all_match = True
for ls, ps in zip(latent_stones, perceived_stones):
new_ls = np.matmul(dim_map, ls.latent_coords.astype(np.int))
ps_prime = np.matmul(sm, np.matmul(np.linalg.inv(rotation), new_ls))
if not all(abs(a - b) < 0.0001 for a, b in zip(
ps_prime, ps.perceived_coords.astype(np.int))):
all_match = False
break
if all_match:
return np.linalg.inv(dim_map), stone_map, rotation
assert False, (
'No dimension map and stone map takes latent stones to the passed '
'perceived stones with the passed rotation.')
def _apply_dim_map_to_stone(
dim_map: np.ndarray, latent_stone: LatentStone
) -> LatentStone:
coords = np.rint(np.matmul(
dim_map, latent_stone.latent_coords.astype(np.int)))
return LatentStone(np.array([int(c) for c in coords], np.object))
def _apply_dim_map_to_potion(
dim_map: np.ndarray, latent_potion: LatentPotion
) -> LatentPotion:
return LatentPotion(
np.where(dim_map[latent_potion.latent_dim, :])[0][0],
latent_potion.latent_dir)
def _apply_dim_map_to_graph(
dim_map: np.ndarray, graph: graphs.Graph
) -> graphs.Graph:
"""Swap latent dimensions in graph."""
edge_list = graphs.EdgeList()
for start_node, end_nodes in graph.edge_list.edges.items():
start_coords = np.matmul(dim_map, np.array(start_node.coords)).tolist()
new_start_node = graph.node_list.get_node_by_coords(start_coords)
for end_node, edge in end_nodes.items():
end_coords = np.matmul(dim_map, np.array(end_node.coords)).tolist()
new_end_node = graph.node_list.get_node_by_coords(end_coords)
new_potion = stones_and_potions.Potion(
edge[1].idx, np.where(dim_map[edge[1].dimension, :])[0][0],
edge[1].direction)
edge_list.add_edge(new_start_node, new_end_node, new_potion)
return graphs.Graph(graph.node_list, edge_list)
class PythonToUnityDimMap:
"""Convert from python method of mapping to unity method."""
def __init__(self, chemistry: utils.Chemistry):
self._chemistry = chemistry
self._dim_map, self.stone_map, self.rotation = find_dim_map_and_stone_map(
chemistry)
self.graph = self._apply_to_graph(self._chemistry.graph)
self.potion_map = self._apply_to_potion_map(self._chemistry.potion_map)
def apply_to_stone(self, latent_stone: LatentStone) -> LatentStone:
return _apply_dim_map_to_stone(self._dim_map, latent_stone)
def apply_to_potion(self, latent_potion: LatentPotion) -> LatentPotion:
return _apply_dim_map_to_potion(self._dim_map, latent_potion)
def _apply_to_graph(self, graph: graphs.Graph) -> graphs.Graph:
return _apply_dim_map_to_graph(self._dim_map, graph)
def _apply_to_potion_map(self, potion_map: PotionMap) -> PotionMap:
latent_potions = stones_and_potions.possible_latent_potions()
new_latent_potions = [self.apply_to_potion(latent_potion)
for latent_potion in latent_potions]
perceived_potions = [potion_map.apply_inverse(latent_potion)
for latent_potion in latent_potions]
return potion_map_from_potions(new_latent_potions, perceived_potions)
def from_unity_chemistry(
chemistry: alchemy_pb2.Chemistry,
rotation_mapping: alchemy_pb2.RotationMapping
) -> utils.Chemistry:
"""Convert from unity Chemistry object to corresponding python types.
Args:
chemistry: A chemistry object received from the alchemy unity environment.
rotation_mapping: A rotation mapping object received from the alchemy unity
environment.
Returns:
A PotionMap describing the transformation from potion perceptual space to
latent space.
A StoneMap describing the transformation from stone aligned perceptual space
to latent space.
A Graph describing the available edges in latent space.
A np.ndarray describing the rotation from stone aligned perceptual space to
stone perceptual space.
"""
# In unity the latent stones are (possibly) rotated and then "perceptual
# mapping applicators" are applied to say how this is represented on screen,
# e.g. -1 in the first latent dimension is purple and +1 is blue.
# By only considering 7 possible rotations (0 rotation and 45 degrees
# clockise or anticlockwise about each axis) and just considering in what
# direction perceptual attributes change, when this is combined with the
# mapping of potion pairs to latent space dimensions and assigning a direction
# to that potion pair, we get all mappings which are 45 degrees offset on one
# axis (note that latent variables have the same effect on the reward so
# swapping latent space dimensions has no effect). We get duplicates because
# after rotating, one dimension of the max reward stone will have value 0 so
# reflecting about this does not change the value. However, the configuration
# is such that the task distribution is as it would be if we avoided
# duplicates.
# An alternative way to generate all these mappings without the duplicates
# would be to take the stones latent coordinates and first apply a mapping
# which changes the positive direction and then rotate these positions by 45
# degrees clockwise (excluding anticlockwise rotations).
# It is easier to run algorithms like the ideal observer assuming the second
# breakdown of the mapping because the rotation does not effect the best
# action to take so we can take the perceived coordinates and undo the
# rotation using any plausible rotation (even if it is not the correct one)
# and then maintain a belief state over the remaining aspects of the
# chemistry and update the belief state if we find the rotation was wrong.
# We can switch between these equivalent breakdowns by possibly rotating in
# the opposite direction.
# From unity we get
# perceived_stone = sm * r * latent_stone
# where r rotates plus or minus 45 degrees and sm changes directions, we want
# perceived_stone = r_prime * sm * latent_stone
# where r_prime is rotating clockwise about the axis that r rotates around.
rotation = rotation_from_unity(rotation_mapping)
abs_rotation = stones_and_potions.rotation_from_angles(
[-abs(a) for a in stones_and_potions.rotation_to_angles(rotation)])
python_stones = [_from_stone_unity_properties(stone, abs_rotation)
for stone in chemistry.stones]
python_potions = [_potions_from_potion_unity_properties(potion)
for potion in chemistry.potions]
graph = graphs_from_potion_unity_properties(chemistry.potions)
# So sm_prime is diagonal with elements in {-1, 1} and dim_map is such that
# the sum of each row and each column is 1 with non zero elements 1.
# Let a := sm_prime * dim_map
# a := [a11 a12 a13]
# [a21 a22 a23]
# [a31 a32 a33]
# a * [1, 1, 1] = [a11 + a12 + a13, a21 + a22 + a23, a31 + a32 + a33]
sum_of_each_row = _get_aligned_coords_matching_latent(
python_stones, [1, 1, 1])
stone_map = StoneMap(pos_dir=sum_of_each_row)
sm_prime = np.diag(sum_of_each_row)
# a * [1, 1, 1] - a * [-1, 1, 1] = 2 * [a11, a21, a31]
first_column = ((sum_of_each_row - _get_aligned_coords_matching_latent(
python_stones, [-1, 1, 1]))/2).astype(np.int)
second_column = ((sum_of_each_row - _get_aligned_coords_matching_latent(
python_stones, [1, -1, 1]))/2).astype(np.int)
third_column = ((sum_of_each_row - _get_aligned_coords_matching_latent(
python_stones, [1, 1, -1]))/2).astype(np.int)
a = np.hstack((first_column.reshape((3, 1)), second_column.reshape((3, 1)),
third_column.reshape((3, 1))))
dim_map = np.rint(np.matmul(np.linalg.inv(sm_prime), a)).astype(np.int)
latent_stones = [latent_stone for _, _, latent_stone in python_stones]
aligned_stones = [aligned_stone for _, aligned_stone, _ in python_stones]
latent_stones = [_apply_dim_map_to_stone(dim_map, latent_stone)
for latent_stone in latent_stones]
latent_potions = [latent_potion for _, latent_potion in python_potions]
latent_potions = [_apply_dim_map_to_potion(dim_map, latent_potion)
for latent_potion in latent_potions]
perceived_potions = [perceived_potion
for perceived_potion, _ in python_potions]
graph = _apply_dim_map_to_graph(dim_map, graph)
for aligned_stone, latent_stone in zip(aligned_stones, latent_stones):
assert stone_map.apply(aligned_stone) == latent_stone, (
'Applying the stone map to the aligned stone did not give the '
'expected latent stone.\n{aligned_stone}\n{latent_stone}\n'
'{stone_map}\n{chemistry}'.format(
aligned_stone=aligned_stone, latent_stone=latent_stone,
stone_map=stone_map, chemistry=chemistry))
potion_map = potion_map_from_potions(latent_potions, perceived_potions)
for perceived_potion, latent_potion in zip(perceived_potions, latent_potions):
assert potion_map.apply(perceived_potion) == latent_potion, (
'Applying the potion map to the perceived potion did not give the '
'expected latent potion.{perceived_potion}\n{latent_potion}\n'
'{potion_map}\n{chemistry}'.format(
perceived_potion=perceived_potion, latent_potion=latent_potion,
potion_map=potion_map, chemistry=chemistry))
return utils.Chemistry(potion_map, stone_map, graph, abs_rotation)
| 3,794
| 0
| 377
|
3e437eb10c5579c54d13b48fb00dc5e65c50fb7b
| 8,319
|
py
|
Python
|
fastatomography/tomo/backends/astra_cpu.py
|
PhilippPelz/fasta-tomography
|
a75f0559c5912249d7c6e330b061ecad744556bf
|
[
"MIT"
] | 1
|
2021-06-07T14:08:26.000Z
|
2021-06-07T14:08:26.000Z
|
fastatomography/tomo/backends/astra_cpu.py
|
PhilippPelz/fasta-tomography
|
a75f0559c5912249d7c6e330b061ecad744556bf
|
[
"MIT"
] | null | null | null |
fastatomography/tomo/backends/astra_cpu.py
|
PhilippPelz/fasta-tomography
|
a75f0559c5912249d7c6e330b061ecad744556bf
|
[
"MIT"
] | null | null | null |
# Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Backend for ASTRA using CPU."""
from __future__ import print_function, division, absolute_import
import numpy as np
try:
import astra
except ImportError:
pass
from odl.discr import DiscreteLp, DiscreteLpElement
from odl.tomo.backends.astra_setup import (
astra_projection_geometry, astra_volume_geometry, astra_data,
astra_projector, astra_algorithm)
from odl.tomo.geometry import Geometry
from odl.util import writable_array
__all__ = ('astra_cpu_forward_projector', 'astra_cpu_back_projector')
# TODO: use context manager when creating data structures
# TODO: is magnification scaling at the right place?
def astra_cpu_forward_projector(vol_data, geometry, proj_space, out=None):
"""Run an ASTRA forward projection on the given data using the CPU.
Parameters
----------
vol_data : `DiscreteLpElement`
Volume data to which the forward projector is applied
geometry : `Geometry`
Geometry defining the tomographic setup
proj_space : `DiscreteLp`
Space to which the calling operator maps
out : ``proj_space`` element, optional
Element of the projection space to which the result is written. If
``None``, an element in ``proj_space`` is created.
Returns
-------
out : ``proj_space`` element
Projection data resulting from the application of the projector.
If ``out`` was provided, the returned object is a reference to it.
"""
if not isinstance(vol_data, DiscreteLpElement):
raise TypeError('volume data {!r} is not a `DiscreteLpElement` '
'instance.'.format(vol_data))
if vol_data.space.impl != 'numpy':
raise TypeError("`vol_data.space.impl` must be 'numpy', got {!r}"
"".format(vol_data.space.impl))
if not isinstance(geometry, Geometry):
raise TypeError('geometry {!r} is not a Geometry instance'
''.format(geometry))
if not isinstance(proj_space, DiscreteLp):
raise TypeError('`proj_space` {!r} is not a DiscreteLp '
'instance.'.format(proj_space))
if proj_space.impl != 'numpy':
raise TypeError("`proj_space.impl` must be 'numpy', got {!r}"
"".format(proj_space.impl))
if vol_data.ndim != geometry.ndim:
raise ValueError('dimensions {} of volume data and {} of geometry '
'do not match'
''.format(vol_data.ndim, geometry.ndim))
if out is None:
out = proj_space.element()
else:
if out not in proj_space:
raise TypeError('`out` {} is neither None nor a '
'DiscreteLpElement instance'.format(out))
ndim = vol_data.ndim
# Create astra geometries
vol_geom = astra_volume_geometry(vol_data.space)
proj_geom = astra_projection_geometry(geometry)
# Create projector
if not all(s == vol_data.space.interp_byaxis[0]
for s in vol_data.space.interp_byaxis):
raise ValueError('volume interpolation must be the same in each '
'dimension, got {}'.format(vol_data.space.interp))
vol_interp = vol_data.space.interp
proj_id = astra_projector(vol_interp, vol_geom, proj_geom, ndim,
impl='cpu')
# Create ASTRA data structures
vol_data_arr = np.asarray(vol_data)
vol_id = astra_data(vol_geom, datatype='volume', data=vol_data_arr,
allow_copy=True)
with writable_array(out, dtype='float32', order='C') as out_arr:
sino_id = astra_data(proj_geom, datatype='projection', data=out_arr,
ndim=proj_space.ndim)
# Create algorithm
algo_id = astra_algorithm('forward', ndim, vol_id, sino_id, proj_id,
impl='cpu')
# Run algorithm
astra.algorithm.run(algo_id)
# Delete ASTRA objects
astra.algorithm.delete(algo_id)
astra.data2d.delete((vol_id, sino_id))
astra.projector.delete(proj_id)
return out
def astra_cpu_back_projector(proj_data, geometry, reco_space, out=None):
"""Run an ASTRA back-projection on the given data using the CPU.
Parameters
----------
proj_data : `DiscreteLpElement`
Projection data to which the back-projector is applied
geometry : `Geometry`
Geometry defining the tomographic setup
reco_space : `DiscreteLp`
Space to which the calling operator maps
out : ``reco_space`` element, optional
Element of the reconstruction space to which the result is written.
If ``None``, an element in ``reco_space`` is created.
Returns
-------
out : ``reco_space`` element
Reconstruction data resulting from the application of the backward
projector. If ``out`` was provided, the returned object is a
reference to it.
"""
if not isinstance(proj_data, DiscreteLpElement):
raise TypeError('projection data {!r} is not a DiscreteLpElement '
'instance'.format(proj_data))
if proj_data.space.impl != 'numpy':
raise TypeError('`proj_data` must be a `numpy.ndarray` based, '
"container got `impl` {!r}"
"".format(proj_data.space.impl))
if not isinstance(geometry, Geometry):
raise TypeError('geometry {!r} is not a Geometry instance'
''.format(geometry))
if not isinstance(reco_space, DiscreteLp):
raise TypeError('reconstruction space {!r} is not a DiscreteLp '
'instance'.format(reco_space))
if reco_space.impl != 'numpy':
raise TypeError("`reco_space.impl` must be 'numpy', got {!r}"
"".format(reco_space.impl))
if reco_space.ndim != geometry.ndim:
raise ValueError('dimensions {} of reconstruction space and {} of '
'geometry do not match'.format(
reco_space.ndim, geometry.ndim))
if out is None:
out = reco_space.element()
else:
if out not in reco_space:
raise TypeError('`out` {} is neither None nor a '
'DiscreteLpElement instance'.format(out))
ndim = proj_data.ndim
# Create astra geometries
vol_geom = astra_volume_geometry(reco_space)
proj_geom = astra_projection_geometry(geometry)
# Create ASTRA data structure
sino_id = astra_data(proj_geom, datatype='projection', data=proj_data,
allow_copy=True)
# Create projector
# TODO: implement with different schemes for angles and detector
if not all(s == proj_data.space.interp_byaxis[0]
for s in proj_data.space.interp_byaxis):
raise ValueError('data interpolation must be the same in each '
'dimension, got {}'
''.format(proj_data.space.interp_byaxis))
proj_interp = proj_data.space.interp
proj_id = astra_projector(proj_interp, vol_geom, proj_geom, ndim,
impl='cpu')
# Convert out to correct dtype and order if needed.
with writable_array(out, dtype='float32', order='C') as out_arr:
vol_id = astra_data(vol_geom, datatype='volume', data=out_arr,
ndim=reco_space.ndim)
# Create algorithm
algo_id = astra_algorithm('backward', ndim, vol_id, sino_id, proj_id,
impl='cpu')
# Run algorithm
astra.algorithm.run(algo_id)
# Weight the adjoint by appropriate weights
scaling_factor = float(proj_data.space.weighting.const)
scaling_factor /= float(reco_space.weighting.const)
out *= scaling_factor
# Delete ASTRA objects
astra.algorithm.delete(algo_id)
astra.data2d.delete((vol_id, sino_id))
astra.projector.delete(proj_id)
return out
if __name__ == '__main__':
from odl.util.testutils import run_doctests
run_doctests()
| 38.513889
| 78
| 0.63361
|
# Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Backend for ASTRA using CPU."""
from __future__ import print_function, division, absolute_import
import numpy as np
try:
import astra
except ImportError:
pass
from odl.discr import DiscreteLp, DiscreteLpElement
from odl.tomo.backends.astra_setup import (
astra_projection_geometry, astra_volume_geometry, astra_data,
astra_projector, astra_algorithm)
from odl.tomo.geometry import Geometry
from odl.util import writable_array
__all__ = ('astra_cpu_forward_projector', 'astra_cpu_back_projector')
# TODO: use context manager when creating data structures
# TODO: is magnification scaling at the right place?
def astra_cpu_forward_projector(vol_data, geometry, proj_space, out=None):
"""Run an ASTRA forward projection on the given data using the CPU.
Parameters
----------
vol_data : `DiscreteLpElement`
Volume data to which the forward projector is applied
geometry : `Geometry`
Geometry defining the tomographic setup
proj_space : `DiscreteLp`
Space to which the calling operator maps
out : ``proj_space`` element, optional
Element of the projection space to which the result is written. If
``None``, an element in ``proj_space`` is created.
Returns
-------
out : ``proj_space`` element
Projection data resulting from the application of the projector.
If ``out`` was provided, the returned object is a reference to it.
"""
if not isinstance(vol_data, DiscreteLpElement):
raise TypeError('volume data {!r} is not a `DiscreteLpElement` '
'instance.'.format(vol_data))
if vol_data.space.impl != 'numpy':
raise TypeError("`vol_data.space.impl` must be 'numpy', got {!r}"
"".format(vol_data.space.impl))
if not isinstance(geometry, Geometry):
raise TypeError('geometry {!r} is not a Geometry instance'
''.format(geometry))
if not isinstance(proj_space, DiscreteLp):
raise TypeError('`proj_space` {!r} is not a DiscreteLp '
'instance.'.format(proj_space))
if proj_space.impl != 'numpy':
raise TypeError("`proj_space.impl` must be 'numpy', got {!r}"
"".format(proj_space.impl))
if vol_data.ndim != geometry.ndim:
raise ValueError('dimensions {} of volume data and {} of geometry '
'do not match'
''.format(vol_data.ndim, geometry.ndim))
if out is None:
out = proj_space.element()
else:
if out not in proj_space:
raise TypeError('`out` {} is neither None nor a '
'DiscreteLpElement instance'.format(out))
ndim = vol_data.ndim
# Create astra geometries
vol_geom = astra_volume_geometry(vol_data.space)
proj_geom = astra_projection_geometry(geometry)
# Create projector
if not all(s == vol_data.space.interp_byaxis[0]
for s in vol_data.space.interp_byaxis):
raise ValueError('volume interpolation must be the same in each '
'dimension, got {}'.format(vol_data.space.interp))
vol_interp = vol_data.space.interp
proj_id = astra_projector(vol_interp, vol_geom, proj_geom, ndim,
impl='cpu')
# Create ASTRA data structures
vol_data_arr = np.asarray(vol_data)
vol_id = astra_data(vol_geom, datatype='volume', data=vol_data_arr,
allow_copy=True)
with writable_array(out, dtype='float32', order='C') as out_arr:
sino_id = astra_data(proj_geom, datatype='projection', data=out_arr,
ndim=proj_space.ndim)
# Create algorithm
algo_id = astra_algorithm('forward', ndim, vol_id, sino_id, proj_id,
impl='cpu')
# Run algorithm
astra.algorithm.run(algo_id)
# Delete ASTRA objects
astra.algorithm.delete(algo_id)
astra.data2d.delete((vol_id, sino_id))
astra.projector.delete(proj_id)
return out
def astra_cpu_back_projector(proj_data, geometry, reco_space, out=None):
"""Run an ASTRA back-projection on the given data using the CPU.
Parameters
----------
proj_data : `DiscreteLpElement`
Projection data to which the back-projector is applied
geometry : `Geometry`
Geometry defining the tomographic setup
reco_space : `DiscreteLp`
Space to which the calling operator maps
out : ``reco_space`` element, optional
Element of the reconstruction space to which the result is written.
If ``None``, an element in ``reco_space`` is created.
Returns
-------
out : ``reco_space`` element
Reconstruction data resulting from the application of the backward
projector. If ``out`` was provided, the returned object is a
reference to it.
"""
if not isinstance(proj_data, DiscreteLpElement):
raise TypeError('projection data {!r} is not a DiscreteLpElement '
'instance'.format(proj_data))
if proj_data.space.impl != 'numpy':
raise TypeError('`proj_data` must be a `numpy.ndarray` based, '
"container got `impl` {!r}"
"".format(proj_data.space.impl))
if not isinstance(geometry, Geometry):
raise TypeError('geometry {!r} is not a Geometry instance'
''.format(geometry))
if not isinstance(reco_space, DiscreteLp):
raise TypeError('reconstruction space {!r} is not a DiscreteLp '
'instance'.format(reco_space))
if reco_space.impl != 'numpy':
raise TypeError("`reco_space.impl` must be 'numpy', got {!r}"
"".format(reco_space.impl))
if reco_space.ndim != geometry.ndim:
raise ValueError('dimensions {} of reconstruction space and {} of '
'geometry do not match'.format(
reco_space.ndim, geometry.ndim))
if out is None:
out = reco_space.element()
else:
if out not in reco_space:
raise TypeError('`out` {} is neither None nor a '
'DiscreteLpElement instance'.format(out))
ndim = proj_data.ndim
# Create astra geometries
vol_geom = astra_volume_geometry(reco_space)
proj_geom = astra_projection_geometry(geometry)
# Create ASTRA data structure
sino_id = astra_data(proj_geom, datatype='projection', data=proj_data,
allow_copy=True)
# Create projector
# TODO: implement with different schemes for angles and detector
if not all(s == proj_data.space.interp_byaxis[0]
for s in proj_data.space.interp_byaxis):
raise ValueError('data interpolation must be the same in each '
'dimension, got {}'
''.format(proj_data.space.interp_byaxis))
proj_interp = proj_data.space.interp
proj_id = astra_projector(proj_interp, vol_geom, proj_geom, ndim,
impl='cpu')
# Convert out to correct dtype and order if needed.
with writable_array(out, dtype='float32', order='C') as out_arr:
vol_id = astra_data(vol_geom, datatype='volume', data=out_arr,
ndim=reco_space.ndim)
# Create algorithm
algo_id = astra_algorithm('backward', ndim, vol_id, sino_id, proj_id,
impl='cpu')
# Run algorithm
astra.algorithm.run(algo_id)
# Weight the adjoint by appropriate weights
scaling_factor = float(proj_data.space.weighting.const)
scaling_factor /= float(reco_space.weighting.const)
out *= scaling_factor
# Delete ASTRA objects
astra.algorithm.delete(algo_id)
astra.data2d.delete((vol_id, sino_id))
astra.projector.delete(proj_id)
return out
if __name__ == '__main__':
from odl.util.testutils import run_doctests
run_doctests()
| 0
| 0
| 0
|
2032cd849767a64618fdc7306202ed98d257d8f4
| 1,960
|
py
|
Python
|
frappe/query_builder/terms.py
|
Don-Leopardo/frappe
|
39097b05a7a9904776a435ee2c3d7a579d429389
|
[
"MIT"
] | 3,755
|
2015-01-06T07:47:43.000Z
|
2022-03-31T20:54:23.000Z
|
frappe/query_builder/terms.py
|
Don-Leopardo/frappe
|
39097b05a7a9904776a435ee2c3d7a579d429389
|
[
"MIT"
] | 7,369
|
2015-01-01T19:59:41.000Z
|
2022-03-31T23:02:05.000Z
|
frappe/query_builder/terms.py
|
Don-Leopardo/frappe
|
39097b05a7a9904776a435ee2c3d7a579d429389
|
[
"MIT"
] | 2,685
|
2015-01-07T17:51:03.000Z
|
2022-03-31T23:16:24.000Z
|
from typing import Any, Dict, Optional
from pypika.terms import Function, ValueWrapper
from pypika.utils import format_alias_sql
| 39.2
| 138
| 0.764286
|
from typing import Any, Dict, Optional
from pypika.terms import Function, ValueWrapper
from pypika.utils import format_alias_sql
class NamedParameterWrapper():
def __init__(self, parameters: Dict[str, Any]):
self.parameters = parameters
def update_parameters(self, param_key: Any, param_value: Any, **kwargs):
self.parameters[param_key[2:-2]] = param_value
def get_sql(self, **kwargs):
return f'%(param{len(self.parameters) + 1})s'
class ParameterizedValueWrapper(ValueWrapper):
def get_sql(self, quote_char: Optional[str] = None, secondary_quote_char: str = "'", param_wrapper= None, **kwargs: Any) -> str:
if param_wrapper is None:
sql = self.get_value_sql(quote_char=quote_char, secondary_quote_char=secondary_quote_char, **kwargs)
return format_alias_sql(sql, self.alias, quote_char=quote_char, **kwargs)
else:
value_sql = self.get_value_sql(quote_char=quote_char, **kwargs) if not isinstance(self.value,int) else self.value
param_sql = param_wrapper.get_sql(**kwargs)
param_wrapper.update_parameters(param_key=param_sql, param_value=value_sql, **kwargs)
return format_alias_sql(param_sql, self.alias, quote_char=quote_char, **kwargs)
class ParameterizedFunction(Function):
def get_sql(self, **kwargs: Any) -> str:
with_alias = kwargs.pop("with_alias", False)
with_namespace = kwargs.pop("with_namespace", False)
quote_char = kwargs.pop("quote_char", None)
dialect = kwargs.pop("dialect", None)
param_wrapper = kwargs.pop("param_wrapper", None)
function_sql = self.get_function_sql(with_namespace=with_namespace, quote_char=quote_char, param_wrapper=param_wrapper, dialect=dialect)
if self.schema is not None:
function_sql = "{schema}.{function}".format(
schema=self.schema.get_sql(quote_char=quote_char, dialect=dialect, **kwargs),
function=function_sql,
)
if with_alias:
return format_alias_sql(function_sql, self.alias, quote_char=quote_char, **kwargs)
return function_sql
| 1,590
| 51
| 186
|
2437b1fc5a17eb5303585ed2331bad10c365b40a
| 59
|
py
|
Python
|
src/qsiprep_analyses/utils/__init__.py
|
GalBenZvi/qsiprep_analyses
|
51512cffca218210f6b85e5eadd593b382963bbd
|
[
"Apache-2.0"
] | null | null | null |
src/qsiprep_analyses/utils/__init__.py
|
GalBenZvi/qsiprep_analyses
|
51512cffca218210f6b85e5eadd593b382963bbd
|
[
"Apache-2.0"
] | 6
|
2022-03-04T15:28:20.000Z
|
2022-03-30T09:37:12.000Z
|
src/qsiprep_analyses/utils/__init__.py
|
GalBenZvi/qsiprep_analyses
|
51512cffca218210f6b85e5eadd593b382963bbd
|
[
"Apache-2.0"
] | 2
|
2022-03-13T11:07:48.000Z
|
2022-03-13T11:50:40.000Z
|
"""
Utilities for the :mod:`qsiprep_analyses` package.
"""
| 14.75
| 50
| 0.694915
|
"""
Utilities for the :mod:`qsiprep_analyses` package.
"""
| 0
| 0
| 0
|
5bca9217161ce91c534256d2d0103754755ea4e6
| 2,240
|
py
|
Python
|
My_Account/migrations/0001_initial.py
|
CHESyrian/Syrians
|
8376e9bed6e3a03f536d8aacd523d630f6bc4345
|
[
"MIT"
] | null | null | null |
My_Account/migrations/0001_initial.py
|
CHESyrian/Syrians
|
8376e9bed6e3a03f536d8aacd523d630f6bc4345
|
[
"MIT"
] | null | null | null |
My_Account/migrations/0001_initial.py
|
CHESyrian/Syrians
|
8376e9bed6e3a03f536d8aacd523d630f6bc4345
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.3 on 2020-04-22 21:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 44.8
| 133
| 0.612946
|
# Generated by Django 3.0.3 on 2020-04-22 21:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Sharing_Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Post', models.TextField()),
('Post_Date', models.DateTimeField(auto_now=True)),
('username', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Sharing_Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Image', models.ImageField(upload_to='users_profiles/uploaded_images/')),
('Image_Text', models.TextField(null=True)),
('Image_Date', models.DateTimeField(auto_now=True)),
('username', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile_Model',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Address', models.CharField(max_length=64, null=True)),
('Job', models.CharField(max_length=32, null=True)),
('Bio', models.CharField(max_length=120, null=True)),
('Number_Phone', models.CharField(max_length=16, null=True)),
('Profile_Image', models.ImageField(upload_to='users_profiles/profile_images/')),
('Cover_Image', models.ImageField(upload_to='users_profiles/cover_images/')),
('username', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 0
| 2,060
| 23
|
d00702f8ce3921e21728a7927615308f95062ef8
| 3,279
|
py
|
Python
|
refresh.py
|
pwillworth/dfkreport
|
ae10226430a3a74ac3c07ae888cab14dde778db8
|
[
"Apache-2.0"
] | 11
|
2022-01-18T17:36:12.000Z
|
2022-03-21T21:09:17.000Z
|
refresh.py
|
pwillworth/dfkreport
|
ae10226430a3a74ac3c07ae888cab14dde778db8
|
[
"Apache-2.0"
] | null | null | null |
refresh.py
|
pwillworth/dfkreport
|
ae10226430a3a74ac3c07ae888cab14dde778db8
|
[
"Apache-2.0"
] | 4
|
2022-01-18T18:37:48.000Z
|
2022-01-22T02:14:48.000Z
|
#!/usr/bin/env python3
import nets
import db
import contracts
from web3 import Web3
import prices
import logging
import logging.handlers
import jsonpickle
# Iterate through all transactions for account and run refresh actions
# Update the gas transaction fee amount and value assuming tx is on Harmony
if __name__ == "__main__":
main()
| 43.72
| 175
| 0.655688
|
#!/usr/bin/env python3
import nets
import db
import contracts
from web3 import Web3
import prices
import logging
import logging.handlers
import jsonpickle
def main():
handler = logging.handlers.RotatingFileHandler('../refresh.log', maxBytes=33554432, backupCount=10)
logging.basicConfig(handlers=[handler], level=logging.INFO, format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logging.info('Starting refresh process')
# Connect to right network that txs are for
w3 = Web3(Web3.HTTPProvider(nets.hmy_web3))
if not w3.isConnected():
logging.error('Error: Critical w3 connection failure')
return 'Error: Blockchain connection failure.'
con = db.aConn()
cur = con.cursor()
cur.execute("SELECT DISTINCT account FROM transactions")
row = cur.fetchone()
while row != None:
logging.info('Starting refresh of account {0}'.format(row[0]))
refreshAccount(w3, con, row[0])
row = cur.fetchone()
con.close()
# Iterate through all transactions for account and run refresh actions
def refreshAccount(w3, con, account):
cur = con.cursor()
cur.execute("SELECT * FROM transactions WHERE account=%s", (account))
row = cur.fetchone()
while row != None:
if row[7] == None and account not in contracts.address_map:
addFee(w3, con, row[0], row[2], row[3], account)
row = cur.fetchone()
# Update the gas transaction fee amount and value assuming tx is on Harmony
def addFee(w3, con, tx, eventType, events, account):
# don't want to add gas to payment service tx or tavern sales/hires because gas is paid by other account
if (eventType != 'airdrops' or '0x6Ca68D6Df270a047b12Ba8405ec688B5dF42D50C' not in events) and (eventType != 'tavern' or ('sale' not in events and 'hire' not in events)):
try:
# sometimes they just don't exist yet
result = w3.eth.get_transaction(tx)
receipt = w3.eth.get_transaction_receipt(tx)
except Exception as err:
logging.error('Got failed to get transaction {0} {1}'.format(tx, str(err)))
return 'Error: Failed to get tx'
block = result['blockNumber']
timestamp = w3.eth.get_block(block)['timestamp']
txFee = Web3.fromWei(result['gasPrice'], 'ether') * receipt['gasUsed']
feeValue = prices.priceLookup(timestamp, '0xcF664087a5bB0237a0BAd6742852ec6c8d69A27a') * txFee
logging.info('updating gas data {0}: - {1}/{2}'.format(tx, txFee, feeValue))
# Update each event object inside
if events != None and events != '':
results = jsonpickle.decode(events)
if type(results) is list and len(results) > 0:
results[0].fiatFeeValue = feeValue
elif type(results) is not list:
results.fiatFeeValue = feeValue
events = jsonpickle.encode(results)
cur = con.cursor()
cur.execute("UPDATE transactions SET events=%s, network='harmony', fee=%s, feeValue=%s WHERE txHash=%s AND account=%s", (events, txFee, feeValue, tx, account))
con.commit()
else:
logging.info('skipping tx {0}, does not meet update criteria'.format(tx))
if __name__ == "__main__":
main()
| 2,871
| 0
| 67
|
d57b9bd1fceae16ab680d68d444523487314cf4c
| 5,908
|
py
|
Python
|
examples/research/ftt-nas/fixed_point_plugins/fixed_point_patch_new.py
|
Harald-R/aw_nas
|
8cf0cf48f7bcfd7893e6355dcc3ccbc83fd39783
|
[
"MIT"
] | 195
|
2020-08-15T17:39:23.000Z
|
2022-02-28T07:48:03.000Z
|
examples/research/ftt-nas/fixed_point_plugins/fixed_point_patch_new.py
|
Harald-R/aw_nas
|
8cf0cf48f7bcfd7893e6355dcc3ccbc83fd39783
|
[
"MIT"
] | 22
|
2020-08-16T01:44:48.000Z
|
2022-03-12T00:46:54.000Z
|
examples/research/ftt-nas/fixed_point_plugins/fixed_point_patch_new.py
|
Harald-R/aw_nas
|
8cf0cf48f7bcfd7893e6355dcc3ccbc83fd39783
|
[
"MIT"
] | 27
|
2020-08-16T06:34:56.000Z
|
2022-03-06T18:16:52.000Z
|
#pylint: disable-all
"""
Script for patch SubCandidateNet/CNNGenotypeModel.forward
"""
from contextlib import contextmanager
import six
import numpy as np
import torch
from torch import nn
from nics_fix_pt.quant import quantitize
from aw_nas.weights_manager.super_net import SubCandidateNet
from aw_nas.final.cnn_model import CNNGenotypeModel
BITWIDTH = 8
FIX_METHOD = 1 # auto_fix
# ---- patch ----
## Here do not patch forward, as activation is not quantized in calls to `forward`,
## it's not meaningful to quantize the weights during the calls to `forward` too
## It must be patched, or ``backward through a graph second time'' error will occur.
## Let's reset all the module attributes to the original parameters in `_parameters`.
SubCandidateNet.old_forward = SubCandidateNet.forward
CNNGenotypeModel.old_forward = CNNGenotypeModel.forward
SubCandidateNet.forward = fix_forward
CNNGenotypeModel.forward = fix_forward
# only patch `forward_one_step_callback`, not forward
SubCandidateNet.old_forward_one_step_callback = SubCandidateNet.forward_one_step_callback
CNNGenotypeModel.old_forward_one_step_callback = CNNGenotypeModel.forward_one_step_callback
SubCandidateNet.forward_one_step_callback = fix_forward_one_step_callback
CNNGenotypeModel.forward_one_step_callback = fix_forward_one_step_callback
# ---- end patch ----
@contextmanager
"""
Note there are a lot randomness in the search process. so all the number are just a ref.
| | quantize | 30 eva | time quantize weight / | #quantize | ratio |
| | patch method | step time | time feature inject 1e-4 | calls | |
|---+--------------------------------------------------+-----------+--------------------------+-----------+-------|
| 1 | old | ~65 | 31.01/12.82 | ~68088 | 2.4 |
| 2 | new patch forward&fonestepcallback | ~75 | 28.68/13.05 | ~63954 | 2.2 |
| 3 | new patch fonestepcallback | ~60 | 14.19/12.90 | ~31997 | 1.1 |
| x | new patch forward(set original)&fonestepcallback | ~60 | - | ~31997 | - |
from 1->2, the quantization call reduction comes from avoiding quantizing unused params in one forward pass (`check_visited=True`)
and avoiding duplicated quantization calls when there are double connection in the rollout.
"""
| 48.826446
| 130
| 0.629824
|
#pylint: disable-all
"""
Script for patch SubCandidateNet/CNNGenotypeModel.forward
"""
from contextlib import contextmanager
import six
import numpy as np
import torch
from torch import nn
from nics_fix_pt.quant import quantitize
from aw_nas.weights_manager.super_net import SubCandidateNet
from aw_nas.final.cnn_model import CNNGenotypeModel
BITWIDTH = 8
FIX_METHOD = 1 # auto_fix
def _generate_default_fix_cfg(names, scale=0, bitwidth=8, method=0):
return {n: {
"method": torch.autograd.Variable(torch.IntTensor(np.array([method])), requires_grad=False),
"scale": torch.autograd.Variable(torch.IntTensor(np.array([scale])), requires_grad=False),
"bitwidth": torch.autograd.Variable(torch.IntTensor(np.array([bitwidth])), requires_grad=False)
} for n in names}
# ---- patch ----
## Here do not patch forward, as activation is not quantized in calls to `forward`,
## it's not meaningful to quantize the weights during the calls to `forward` too
## It must be patched, or ``backward through a graph second time'' error will occur.
## Let's reset all the module attributes to the original parameters in `_parameters`.
SubCandidateNet.old_forward = SubCandidateNet.forward
CNNGenotypeModel.old_forward = CNNGenotypeModel.forward
def fix_forward(self, *args, **kwargs):
with fix_params(self, original=True):
return self.old_forward(*args, **kwargs)
SubCandidateNet.forward = fix_forward
CNNGenotypeModel.forward = fix_forward
# only patch `forward_one_step_callback`, not forward
SubCandidateNet.old_forward_one_step_callback = SubCandidateNet.forward_one_step_callback
CNNGenotypeModel.old_forward_one_step_callback = CNNGenotypeModel.forward_one_step_callback
def fix_forward_one_step_callback(self, inputs, callback):
with fix_params(self):
return self.old_forward_one_step_callback(inputs, callback)
SubCandidateNet.forward_one_step_callback = fix_forward_one_step_callback
CNNGenotypeModel.forward_one_step_callback = fix_forward_one_step_callback
# ---- end patch ----
def quantize(self, fix_cfg, original=False):
for n, param in six.iteritems(self._parameters):
if not isinstance(param, (torch.Tensor, torch.autograd.Variable)):
continue
if not original:
# run quantize
param, _ = quantitize(param, fix_cfg.get(n, {}), {}, name=n)
object.__setattr__(self, n, param)
for n, param in six.iteritems(self._buffers):
if not isinstance(param, (torch.Tensor, torch.autograd.Variable)):
continue
if not original:
# run quantize
param, _ = quantitize(param, fix_cfg.get(n, {}), {}, name=n)
object.__setattr__(self, n, param)
def setback_buffer(self):
for n, param in six.iteritems(self._buffers):
if param is not None:
# set buffer back, as there will be no gradient, just in-place modification
# FIXME: For fixed-point batch norm,
# the running mean/var accumulattion is on quantitized mean/var,
# which means it might fail to update the running mean/var
# if the updating momentum is too small
self._buffers[n] = getattr(self, n)
@contextmanager
def fix_params(module, original=False):
if not hasattr(module, "_generated_fixed_cfgs"):
module._generated_fixed_cfgs = {}
for mod_prefix, mod in module.named_modules():
if isinstance(mod, (nn.Conv2d, nn.Linear)):
to_fix = ["weight", "bias"]
# elif isinstance(mod, nn.BatchNorm2d):
# to_fix = ["weight", "bias", "running_mean", "running_var"]
else:
to_fix = []
if to_fix:
module._generated_fixed_cfgs[mod_prefix] = _generate_default_fix_cfg(
to_fix, method=FIX_METHOD, bitwidth=BITWIDTH)
current_active_modules = dict(module.named_modules()) \
if not hasattr(module, "active_named_members")\
else dict(module.active_named_members(member="modules",
prefix="super_net",
check_visited=True))
# FIXME: possible multi-access in multi-gpu
to_fix_mods = set(module._generated_fixed_cfgs.keys()).intersection(
current_active_modules.keys())
for mod_prefix in to_fix_mods:
fix_cfg = module._generated_fixed_cfgs[mod_prefix]
mod = current_active_modules[mod_prefix]
quantize(mod, fix_cfg, original=original)
yield
if not original:
for mod_prefix in to_fix_mods:
mod = current_active_modules[mod_prefix]
setback_buffer(mod)
"""
Note there are a lot randomness in the search process. so all the number are just a ref.
| | quantize | 30 eva | time quantize weight / | #quantize | ratio |
| | patch method | step time | time feature inject 1e-4 | calls | |
|---+--------------------------------------------------+-----------+--------------------------+-----------+-------|
| 1 | old | ~65 | 31.01/12.82 | ~68088 | 2.4 |
| 2 | new patch forward&fonestepcallback | ~75 | 28.68/13.05 | ~63954 | 2.2 |
| 3 | new patch fonestepcallback | ~60 | 14.19/12.90 | ~31997 | 1.1 |
| x | new patch forward(set original)&fonestepcallback | ~60 | - | ~31997 | - |
from 1->2, the quantization call reduction comes from avoiding quantizing unused params in one forward pass (`check_visited=True`)
and avoiding duplicated quantization calls when there are double connection in the rollout.
"""
| 3,287
| 0
| 135
|
eedd7919dde8c6f614677433f04639e63a253d58
| 2,942
|
py
|
Python
|
gland_classification/four_resolutions_model/model.py
|
onermustafaumit/MLNM
|
eef9a694fc616d68094c6dbf4d8fd4e45c842e2d
|
[
"MIT"
] | 1
|
2022-02-07T06:41:43.000Z
|
2022-02-07T06:41:43.000Z
|
gland_classification/four_resolutions_model/model.py
|
onermustafaumit/MLNM
|
eef9a694fc616d68094c6dbf4d8fd4e45c842e2d
|
[
"MIT"
] | null | null | null |
gland_classification/four_resolutions_model/model.py
|
onermustafaumit/MLNM
|
eef9a694fc616d68094c6dbf4d8fd4e45c842e2d
|
[
"MIT"
] | null | null | null |
import torchvision
import torch.nn as nn
| 45.261538
| 119
| 0.71312
|
import torchvision
import torch.nn as nn
class ResNet(nn.Module):
def __init__(self, pretrained=False, num_classes=2, num_intermediate_features=64):
super().__init__()
self.pretrained = pretrained
self.num_classes = num_classes
self.num_intermediate_features = num_intermediate_features
self.resnet = torchvision.models.resnet18(pretrained=pretrained)
self.in_features = self.resnet.fc.in_features
self.resnet.fc = nn.Linear(self.in_features, num_intermediate_features, bias=True)
self.fc2 = nn.Linear(num_intermediate_features, num_classes)
self.dropout = nn.Dropout(0.5)
def forward(self, x):
feature_vec = self.resnet(x)
feature_vec = self.dropout(feature_vec)
class_score_vec = self.fc2(feature_vec)
return feature_vec, class_score_vec
class Model(nn.Module):
def __init__(self, pretrained=False, num_classes=2, num_intermediate_features=64):
super().__init__()
self.pretrained = pretrained
self.num_classes = num_classes
self.num_intermediate_features = num_intermediate_features
self.resnet_high = ResNet(pretrained, num_classes, num_intermediate_features)
self.resnet_medium = ResNet(pretrained, num_classes, num_intermediate_features)
self.resnet_low = ResNet(pretrained, num_classes, num_intermediate_features)
self.resnet_low2 = ResNet(pretrained, num_classes, num_intermediate_features)
self.fc3 = nn.Linear(num_intermediate_features, 10)
self.fc4 = nn.Linear(10, num_classes)
self.dropout_high = nn.Dropout(0.5)
self.dropout_medium = nn.Dropout(0.5)
self.dropout_low = nn.Dropout(0.5)
self.dropout_low2 = nn.Dropout(0.5)
self.dropout_result = nn.Dropout(0.5)
self.dropout_fc3 = nn.Dropout(0.5)
def forward(self, x_high, x_medium, x_low, x_low2):
feature_vec_high, class_score_vec_high = self.resnet_high(x_high)
feature_vec_high = self.dropout_high(feature_vec_high)
feature_vec_medium, class_score_vec_medium = self.resnet_medium(x_medium)
feature_vec_medium = self.dropout_medium(feature_vec_medium)
feature_vec_low, class_score_vec_low = self.resnet_low(x_low)
feature_vec_low = self.dropout_low(feature_vec_low)
feature_vec_low2, class_score_vec_low2 = self.resnet_low2(x_low2)
feature_vec_low2 = self.dropout_low2(feature_vec_low2)
feature_vec = feature_vec_high + feature_vec_medium + feature_vec_low + feature_vec_low2
feature_vec = self.dropout_result(feature_vec)
feature_vec = self.fc3(feature_vec)
feature_vec = self.dropout_fc3(feature_vec)
class_score_vec = self.fc4(feature_vec)
return class_score_vec_high, class_score_vec_medium, class_score_vec_low, class_score_vec_low2, class_score_vec
| 2,714
| 5
| 168
|
9dbbc46dc3ab82348e7ae0fc2b05ea58038bf2f9
| 5,019
|
py
|
Python
|
py/sym/messages/authz_pb2.py
|
symopsio/protobufs
|
5a33aa2c00116810e7e5a1d2b23f9134bb13eb10
|
[
"Apache-2.0"
] | null | null | null |
py/sym/messages/authz_pb2.py
|
symopsio/protobufs
|
5a33aa2c00116810e7e5a1d2b23f9134bb13eb10
|
[
"Apache-2.0"
] | null | null | null |
py/sym/messages/authz_pb2.py
|
symopsio/protobufs
|
5a33aa2c00116810e7e5a1d2b23f9134bb13eb10
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: sym/messages/authz.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from sym.models import schema_pb2 as sym_dot_models_dot_schema__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='sym/messages/authz.proto',
package='sym.messages',
syntax='proto3',
serialized_options=b'Z%github.com/symopsio/types/go/messages',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x18sym/messages/authz.proto\x12\x0csym.messages\x1a\x17sym/models/schema.proto\"G\n\x05\x41uthz\x12\"\n\x06schema\x18\x01 \x01(\x0b\x32\x12.sym.models.Schema\x12\x0c\n\x04user\x18\x02 \x01(\t\x12\x0c\n\x04role\x18\x03 \x01(\t\"1\n\rAuthzResponse\x12\n\n\x02ok\x18\x01 \x01(\x08\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\tB\'Z%github.com/symopsio/types/go/messagesb\x06proto3'
,
dependencies=[sym_dot_models_dot_schema__pb2.DESCRIPTOR,])
_AUTHZ = _descriptor.Descriptor(
name='Authz',
full_name='sym.messages.Authz',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='schema', full_name='sym.messages.Authz.schema', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user', full_name='sym.messages.Authz.user', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='role', full_name='sym.messages.Authz.role', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=67,
serialized_end=138,
)
_AUTHZRESPONSE = _descriptor.Descriptor(
name='AuthzResponse',
full_name='sym.messages.AuthzResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='ok', full_name='sym.messages.AuthzResponse.ok', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='errorMessage', full_name='sym.messages.AuthzResponse.errorMessage', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=140,
serialized_end=189,
)
_AUTHZ.fields_by_name['schema'].message_type = sym_dot_models_dot_schema__pb2._SCHEMA
DESCRIPTOR.message_types_by_name['Authz'] = _AUTHZ
DESCRIPTOR.message_types_by_name['AuthzResponse'] = _AUTHZRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Authz = _reflection.GeneratedProtocolMessageType('Authz', (_message.Message,), {
'DESCRIPTOR' : _AUTHZ,
'__module__' : 'sym.messages.authz_pb2'
# @@protoc_insertion_point(class_scope:sym.messages.Authz)
})
_sym_db.RegisterMessage(Authz)
AuthzResponse = _reflection.GeneratedProtocolMessageType('AuthzResponse', (_message.Message,), {
'DESCRIPTOR' : _AUTHZRESPONSE,
'__module__' : 'sym.messages.authz_pb2'
# @@protoc_insertion_point(class_scope:sym.messages.AuthzResponse)
})
_sym_db.RegisterMessage(AuthzResponse)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 36.904412
| 397
| 0.75533
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: sym/messages/authz.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from sym.models import schema_pb2 as sym_dot_models_dot_schema__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='sym/messages/authz.proto',
package='sym.messages',
syntax='proto3',
serialized_options=b'Z%github.com/symopsio/types/go/messages',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x18sym/messages/authz.proto\x12\x0csym.messages\x1a\x17sym/models/schema.proto\"G\n\x05\x41uthz\x12\"\n\x06schema\x18\x01 \x01(\x0b\x32\x12.sym.models.Schema\x12\x0c\n\x04user\x18\x02 \x01(\t\x12\x0c\n\x04role\x18\x03 \x01(\t\"1\n\rAuthzResponse\x12\n\n\x02ok\x18\x01 \x01(\x08\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\tB\'Z%github.com/symopsio/types/go/messagesb\x06proto3'
,
dependencies=[sym_dot_models_dot_schema__pb2.DESCRIPTOR,])
_AUTHZ = _descriptor.Descriptor(
name='Authz',
full_name='sym.messages.Authz',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='schema', full_name='sym.messages.Authz.schema', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user', full_name='sym.messages.Authz.user', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='role', full_name='sym.messages.Authz.role', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=67,
serialized_end=138,
)
_AUTHZRESPONSE = _descriptor.Descriptor(
name='AuthzResponse',
full_name='sym.messages.AuthzResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='ok', full_name='sym.messages.AuthzResponse.ok', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='errorMessage', full_name='sym.messages.AuthzResponse.errorMessage', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=140,
serialized_end=189,
)
_AUTHZ.fields_by_name['schema'].message_type = sym_dot_models_dot_schema__pb2._SCHEMA
DESCRIPTOR.message_types_by_name['Authz'] = _AUTHZ
DESCRIPTOR.message_types_by_name['AuthzResponse'] = _AUTHZRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Authz = _reflection.GeneratedProtocolMessageType('Authz', (_message.Message,), {
'DESCRIPTOR' : _AUTHZ,
'__module__' : 'sym.messages.authz_pb2'
# @@protoc_insertion_point(class_scope:sym.messages.Authz)
})
_sym_db.RegisterMessage(Authz)
AuthzResponse = _reflection.GeneratedProtocolMessageType('AuthzResponse', (_message.Message,), {
'DESCRIPTOR' : _AUTHZRESPONSE,
'__module__' : 'sym.messages.authz_pb2'
# @@protoc_insertion_point(class_scope:sym.messages.AuthzResponse)
})
_sym_db.RegisterMessage(AuthzResponse)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 0
| 0
| 0
|
65515bf65bebc383c9d94596460baa49da4a8f9c
| 2,506
|
py
|
Python
|
parser.py
|
winterYANGWT/STEFANN_PyTorch
|
bcee0ddbc286b4c83de0b2ea243091fb9c14fb90
|
[
"Apache-2.0"
] | 1
|
2021-07-11T04:47:18.000Z
|
2021-07-11T04:47:18.000Z
|
parser.py
|
mathemusician/STEFANN_PyTorch
|
82941bc859af424babcf1f23a3d973764226263b
|
[
"Apache-2.0"
] | 1
|
2021-08-11T16:37:02.000Z
|
2021-08-11T16:37:02.000Z
|
parser.py
|
mathemusician/STEFANN_PyTorch
|
82941bc859af424babcf1f23a3d973764226263b
|
[
"Apache-2.0"
] | 1
|
2021-07-10T04:26:33.000Z
|
2021-07-10T04:26:33.000Z
|
import os
import os.path as path
import itertools
import pandas as pd
import glob
import config
import random
FANNET_IMG_DIR='/mnt/Data/GoogleFontsSTEFANN/fannet'
COLORNET_IMG_DIR='/mnt/Data/GoogleFontsSTEFANN/colornet'
if __name__=='__main__':
process_fannet(path.join(FANNET_IMG_DIR,'train'),
'./Data/STEFANN/fannet_train.csv')
process_fannet(path.join(FANNET_IMG_DIR,'valid'),
'./Data/STEFANN/fannet_val.csv')
process_colornet(path.join(COLORNET_IMG_DIR,'train'),
'./Data/STEFANN/colornet_train.csv')
process_colornet(path.join(COLORNET_IMG_DIR,'valid'),
'./Data/STEFANN/colornet_val.csv')
| 31.721519
| 72
| 0.622107
|
import os
import os.path as path
import itertools
import pandas as pd
import glob
import config
import random
FANNET_IMG_DIR='/mnt/Data/GoogleFontsSTEFANN/fannet'
COLORNET_IMG_DIR='/mnt/Data/GoogleFontsSTEFANN/colornet'
def process_fannet(img_dir,save_path):
perms=itertools.product(os.listdir(img_dir),
list(config.SRC_CHRS),
list(config.TRGT_CHRS))
df=pd.DataFrame(columns=['src_img_path',
'trgt_label',
'trgt_img_path',
'font'])
item_list=[]
for perm in perms:
item={}
src_chr=str(ord(perm[1]))
trgt_chr=str(ord(perm[2]))
font=perm[0]
src_img_path=path.join(img_dir,font,src_chr+'.jpg')
trgt_img_path=path.join(img_dir,font,trgt_chr+'.jpg')
item['src_img_path']=src_img_path
item['trgt_label']=config.TRGT_CHRS.find(perm[2])
item['trgt_img_path']=trgt_img_path
item['font']=font
item_list.append(item)
random.shuffle(item_list)
df=df.append(item_list,ignore_index=True)
df.to_csv(save_path,index=False)
def process_colornet(img_dir,save_path):
input1_img_dir=path.join(img_dir,'input_color')
input2_img_dir=path.join(img_dir,'input_mask')
output_img_dir=path.join(img_dir,'output_color')
files_name=sorted(p.split('/')[-1] for p in \
glob.glob('{}/*{}'.format(input1_img_dir,'.jpg')))
df=pd.DataFrame(columns=['input_color',
'input_mask',
'output_color'])
item_list=[]
for file_name in files_name:
item={}
item['input_color']=path.join(input1_img_dir,file_name)
item['input_mask']=path.join(input2_img_dir,file_name)
item['output_color']=path.join(output_img_dir,file_name)
item_list.append(item)
random.shuffle(item_list)
df=df.append(item_list,ignore_index=True)
df.to_csv(save_path,index=False)
if __name__=='__main__':
process_fannet(path.join(FANNET_IMG_DIR,'train'),
'./Data/STEFANN/fannet_train.csv')
process_fannet(path.join(FANNET_IMG_DIR,'valid'),
'./Data/STEFANN/fannet_val.csv')
process_colornet(path.join(COLORNET_IMG_DIR,'train'),
'./Data/STEFANN/colornet_train.csv')
process_colornet(path.join(COLORNET_IMG_DIR,'valid'),
'./Data/STEFANN/colornet_val.csv')
| 1,762
| 0
| 46
|
7b8242216757295d93bacc284fc1accdd444067f
| 1,084
|
py
|
Python
|
js_code.py
|
luoshenshen/LibrarySeats
|
c9fbd4fb7c26d380ee17b765cb4b83b1f3bbb78c
|
[
"MIT"
] | 12
|
2021-06-03T03:26:45.000Z
|
2022-03-06T09:45:57.000Z
|
js_code.py
|
luoshenshen/LibrarySeats
|
c9fbd4fb7c26d380ee17b765cb4b83b1f3bbb78c
|
[
"MIT"
] | 5
|
2021-11-06T07:35:00.000Z
|
2022-03-13T06:02:58.000Z
|
js_code.py
|
luoshenshen/LibrarySeats
|
c9fbd4fb7c26d380ee17b765cb4b83b1f3bbb78c
|
[
"MIT"
] | 4
|
2021-06-07T09:46:31.000Z
|
2022-03-09T14:20:34.000Z
|
'''
@ project: LibrarySeats
@ file: test
@ user: 罗申申
@ email: luoshenshen@buaa.edu.cn
@ tool: PyCharm
@ time: 2021/5/24 14:27
'''
import re
import function
import execjs
import browser_tools
def verify_code_get(jsname,cookie,time):
'''代码不麻烦,主要是分析js花了些时间'''
url = "https://static.wechat.laixuanzuo.com/template/theme2/cache/layout/" + jsname + ".js"
pattern_js_bg = 'void 0\=\=\=.\&\&\(.\=""\);'
pattern_js_end = '.\.ajax_get'
pattern_js = pattern_js_bg + '.*' + pattern_js_end
pattern_js_res = '\+"\&".*\+"\&yzm\="'
exjs = network(url,cookie,time)
funjs = re.search(pattern_js, exjs).group(0)
funjs = funjs[19:-10]
resjs = re.search(pattern_js_res, exjs).group(0)
resultcommond = resjs[5:-14]
exjs8 = exjs
docjs = execjs.compile(exjs8 + funjs)
return docjs.eval(resultcommond)
| 23.565217
| 95
| 0.654059
|
'''
@ project: LibrarySeats
@ file: test
@ user: 罗申申
@ email: luoshenshen@buaa.edu.cn
@ tool: PyCharm
@ time: 2021/5/24 14:27
'''
import re
import function
import execjs
import browser_tools
def obtain_js(html):
js = '<script src="(.*?)"'
href = re.compile(js, re.S).findall(html)
return href
def verify_code_get(jsname,cookie,time):
'''代码不麻烦,主要是分析js花了些时间'''
url = "https://static.wechat.laixuanzuo.com/template/theme2/cache/layout/" + jsname + ".js"
pattern_js_bg = 'void 0\=\=\=.\&\&\(.\=""\);'
pattern_js_end = '.\.ajax_get'
pattern_js = pattern_js_bg + '.*' + pattern_js_end
pattern_js_res = '\+"\&".*\+"\&yzm\="'
exjs = network(url,cookie,time)
funjs = re.search(pattern_js, exjs).group(0)
funjs = funjs[19:-10]
resjs = re.search(pattern_js_res, exjs).group(0)
resultcommond = resjs[5:-14]
exjs8 = exjs
docjs = execjs.compile(exjs8 + funjs)
return docjs.eval(resultcommond)
def network(url,cookie,time):
return function.session_get(url=url, header=browser_tools.get_js_header(cookie,time)).text
| 195
| 0
| 46
|
9d5a188e29aa85bb09a92509714e910b4fa52596
| 1,678
|
py
|
Python
|
en16931/tests/test_invoice_line.py
|
invinet/python-en16931
|
f6671f86e8d578c3c82a48134426f89ec13b160c
|
[
"Apache-2.0"
] | 9
|
2018-07-09T10:34:27.000Z
|
2021-10-13T20:11:04.000Z
|
en16931/tests/test_invoice_line.py
|
invinet/python-en16931
|
f6671f86e8d578c3c82a48134426f89ec13b160c
|
[
"Apache-2.0"
] | null | null | null |
en16931/tests/test_invoice_line.py
|
invinet/python-en16931
|
f6671f86e8d578c3c82a48134426f89ec13b160c
|
[
"Apache-2.0"
] | 1
|
2022-02-07T15:30:53.000Z
|
2022-02-07T15:30:53.000Z
|
import pytest
from en16931.invoice_line import InvoiceLine
| 28.931034
| 62
| 0.585221
|
import pytest
from en16931.invoice_line import InvoiceLine
class TestInvoiceLine:
def test_initialization(self):
il = InvoiceLine(quantity=11, unit_code="EA", price=2,
item_name='test', currency="EUR",
tax_percent=0.21, tax_category="S")
assert il.is_valid()
assert il.currency == "EUR"
def test_invalid_currency(self):
il = InvoiceLine()
with pytest.raises(KeyError):
il.currency = "blah"
def test_no_taxes(self):
il = InvoiceLine()
assert il.tax is None
def test_creation(self):
il = InvoiceLine()
il.quantity = 11
il.price = 2
il.item_name = 'test'
il.tax_percent = 0.21
il.tax_category = "S"
assert il.is_valid()
def test_line_extension_amount(self):
il = InvoiceLine(quantity=11, unit_code="EA", price=2,
item_name='test', currency="EUR",
tax_percent=0.21, tax_category="S")
assert str(il.line_extension_amount) == '22.00'
def test_invalid_unit_code(self):
il = InvoiceLine()
with pytest.raises(ValueError):
il.unit_code = "ASF"
def test_invalid_price(self):
il = InvoiceLine()
with pytest.raises(ValueError):
il.price = "dasdas"
def test_invalid_line_extension_amount(self):
il = InvoiceLine()
with pytest.raises(ValueError):
il.line_extension_amount = "expensive"
def test_invalid_quantity(self):
il = InvoiceLine()
with pytest.raises(ValueError):
il.quantity = "dasdas"
| 1,350
| 1
| 266
|
9faa68d2b05b4a33cebe4f7dd86018471e582c98
| 3,336
|
py
|
Python
|
var_caller_mt.py
|
shimbalama/friday_sessions_QIMR
|
5f5c65fdea98b22df845b09e3a72dbc7989e8ed1
|
[
"MIT"
] | null | null | null |
var_caller_mt.py
|
shimbalama/friday_sessions_QIMR
|
5f5c65fdea98b22df845b09e3a72dbc7989e8ed1
|
[
"MIT"
] | null | null | null |
var_caller_mt.py
|
shimbalama/friday_sessions_QIMR
|
5f5c65fdea98b22df845b09e3a72dbc7989e8ed1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from multiprocessing import Pool, TimeoutError
from glob import glob
import gzip
import pysam
from Bio import SeqIO
from collections import Counter, defaultdict
import scipy.stats as stats
import operator
import pandas as pd
import argparse
import cProfile
import sys
def chunk_ref(args, chroms):
'''
Split ref into chunks for threading
'''
#make server mode where just takes one chrom and scatters with WDL
chunks = []
size = 100000 #just for testing, put back to 1mb
total_len = 0
for record in SeqIO.parse(args.ref, 'fasta'):
if record.id in chroms:
for i in range(0, len(record.seq), size):
if len(record.seq) > i + size:
end = i + size
else:#end of chrom
end = len(record.seq)
chunks.append((record.id, i, i+size))
return chunks
if __name__ == '__main__':
main()
| 25.082707
| 121
| 0.555456
|
#!/usr/bin/env python3
from multiprocessing import Pool, TimeoutError
from glob import glob
import gzip
import pysam
from Bio import SeqIO
from collections import Counter, defaultdict
import scipy.stats as stats
import operator
import pandas as pd
import argparse
import cProfile
import sys
def main ():
parser = argparse.ArgumentParser(description='somatic var caller')
required = parser.add_argument_group(
'Required',
'ref and pileups')
required.add_argument(
'-r',
'--ref',
type=str,
help='ref 37 or 38 [38]')
required.add_argument(
'-c',
'--cancer_pile',
type=str,
help='pileup of tumour/cancer')
required.add_argument(
'-n',
'--normal_pile',
type=str,
help='pileup of normal/blood')
optional = parser.add_argument_group(
'Optional',
'threads and chroms')
optional.add_argument(
'-x',
'--chrom',
type=str,
help='Which chromosomes to query. comma,separated,list or [all]',
default='all')
optional.add_argument(
'-t',
'--threads',
type=int,
help='Threads [5]',
default=8)
args = parser.parse_args()
if args.chrom == 'all':
chroms = ['chr' + str(i+1) for i in range(22)] + ['chrX', 'chrY']
else:
chroms = args.chrom.split(',')
chunks = chunk_ref(args, chroms)
print (f'len chunks {len(chunks)}')
d={}
with Pool(processes=args.threads) as pool:
tmp = [(args, chunk) for chunk in chunks]
res = pool.map(doit, tmp)
for vars in res:
d = {**d, **vars}
df = pd.DataFrame.from_dict(d, orient='index')
df.to_csv('test.tsv', sep='\t')
'''
Some useful code:
#Define what chroms you want
if args.chrom == 'all':
chroms = ['chr' + str(i+1) for i in range(22)] + ['chrX', 'chrY']
else:
chroms = args.chrom.split(',')
#Parse tabix
for pos in tabixfile.fetch('chr' + str(self.chrom), self.start, self.end):
tmp_d = dict(zip(keys, pos.split()))
...
# variant call with fisher
oddsratio, pvalue1 = stats.fisher_exact([
[tumour_count, normal_count],
[non_base_tumour, non_base_normal]],
alternative='greater')
'''
def doit(tup):
#https://stackoverflow.com/questions/53890693/cprofile-causes-pickling-error-when-running-multiprocessing-python-code
args, genomic_region = tup
print (genomic_region)
def chunk_ref(args, chroms):
'''
Split ref into chunks for threading
'''
#make server mode where just takes one chrom and scatters with WDL
chunks = []
size = 100000 #just for testing, put back to 1mb
total_len = 0
for record in SeqIO.parse(args.ref, 'fasta'):
if record.id in chroms:
for i in range(0, len(record.seq), size):
if len(record.seq) > i + size:
end = i + size
else:#end of chrom
end = len(record.seq)
chunks.append((record.id, i, i+size))
return chunks
if __name__ == '__main__':
main()
| 2,328
| 0
| 46
|
dade49d1b215000562cd2d23b09812f5a692264c
| 713
|
py
|
Python
|
BOJ_Solved/BOJ-16928.py
|
CodingLeeSeungHoon/Python_Algorithm_TeamNote
|
1e92986999b45aa9951e12e67b23062e410e9b36
|
[
"MIT"
] | 7
|
2021-11-19T14:50:59.000Z
|
2022-02-25T20:00:20.000Z
|
BOJ_Solved/BOJ-16928.py
|
CodingLeeSeungHoon/Python_Algorithm_TeamNote
|
1e92986999b45aa9951e12e67b23062e410e9b36
|
[
"MIT"
] | null | null | null |
BOJ_Solved/BOJ-16928.py
|
CodingLeeSeungHoon/Python_Algorithm_TeamNote
|
1e92986999b45aa9951e12e67b23062e410e9b36
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
백준 16928번 : 뱀과 사다리 게임
"""
import sys
from collections import deque
input = sys.stdin.readline
N, M = map(int, input().split())
graph = [*range(101)]
visited = [-1] * 101
for _ in range(N + M):
x, y = map(int, input().split())
graph[x] = y
bfs(graph, 1, visited)
print(visited[-1])
| 19.27027
| 63
| 0.504909
|
# coding=utf-8
"""
백준 16928번 : 뱀과 사다리 게임
"""
import sys
from collections import deque
input = sys.stdin.readline
def bfs(graph, start, visited):
queue = deque([start])
visited[start] = 0
while queue:
v = queue.popleft()
for i in range(1, 7):
y = v + i
if y > 100:
continue
y = graph[y]
if visited[y] == -1 or visited[y] > visited[v] + 1:
visited[y] = visited[v] + 1
queue.append(y)
N, M = map(int, input().split())
graph = [*range(101)]
visited = [-1] * 101
for _ in range(N + M):
x, y = map(int, input().split())
graph[x] = y
bfs(graph, 1, visited)
print(visited[-1])
| 376
| 0
| 23
|
b156fbcb3777c67bbda34c74c49e66b3b3d17cc7
| 4,001
|
py
|
Python
|
data/birthmark_xml_create.py
|
mitubaEX/MITUBASearcher
|
7d6eb92bcc608a27519d8e6e30b033dc784694f3
|
[
"Apache-2.0"
] | null | null | null |
data/birthmark_xml_create.py
|
mitubaEX/MITUBASearcher
|
7d6eb92bcc608a27519d8e6e30b033dc784694f3
|
[
"Apache-2.0"
] | 5
|
2017-09-12T15:41:33.000Z
|
2017-10-23T14:21:23.000Z
|
data/birthmark_xml_create.py
|
mitubaEX/MITUBASearcher
|
7d6eb92bcc608a27519d8e6e30b033dc784694f3
|
[
"Apache-2.0"
] | null | null | null |
import glob
import os
import sys
import csv
csv.field_size_limit(1000000000)
# -*- coding: utf-8 -*-
import codecs
files = []
cvfv = codecs.open("./birth_cvfv.xml","w",'utf-8')
fmc = codecs.open("./birth_fmc.xml","w",'utf-8')
fuc = codecs.open("./birth_fuc.xml","w",'utf-8')
_2gram = codecs.open("./birth_2gram.xml","w",'utf-8')
_3gram = codecs.open("./birth_3gram.xml","w",'utf-8')
_4gram = codecs.open("./birth_4gram.xml","w",'utf-8')
_5gram = codecs.open("./birth_5gram.xml","w",'utf-8')
_6gram = codecs.open("./birth_6gram.xml","w",'utf-8')
smc = codecs.open("./birth_smc.xml","w",'utf-8')
uc = codecs.open("./birth_uc.xml","w",'utf-8')
wsp = codecs.open("./birth_wsp.xml","w",'utf-8')
files = [cvfv, fmc, fuc, _2gram, _3gram, _4gram, _5gram, _6gram, smc, uc, wsp]
for j in files:
init(j)
tmp = glob.glob("./*.csv")
count = 0
for i in tmp:
reader = open(i).read().split('\n')
if '\0' not in open(i).read():
if reader is not None:
for row in reader:
row = row.split(',',3)
if len(row) >= 4:
row[0] = row[0].replace('\n',"").replace('<','<').replace(">",'>').replace("&",'&').replace("\"",'"').replace("\'",''')
row[1] = row[1].replace('\n',"").replace('<','<').replace(">",'>').replace("&",'&').replace("\"",'"').replace("\'",''')
row[2] = row[2].replace('\n',"").replace('<','<').replace(">",'>').replace("&",'&').replace("\"",'"').replace("\'",''')
row[3] = row[3].replace('\n',"").replace('<','<').replace(">",'>').replace("&",'&').replace("\"",'"').replace("\'",''')
if "cvfv" in row[2]:
writer(cvfv, row)
elif "fmc" in row[2]:
writer(fmc, row)
elif "fuc" in row[2]:
writer(fuc, row)
elif "2-gram" in str(i):
writer(_2gram, row)
elif "3-gram" in str(i):
writer(_3gram, row)
elif "4-gram" in str(i):
writer(_4gram, row)
elif "5-gram" in str(i):
writer(_5gram, row)
elif "6-gram" in str(i):
writer(_6gram, row)
elif "smc" in row[2]:
writer(smc, row)
elif "uc" in row[2]:
writer(uc, row)
elif "wsp" in row[2]:
writer(wsp, row)
for j in files:
finish_writer(j)
| 43.021505
| 198
| 0.48038
|
import glob
import os
import sys
import csv
csv.field_size_limit(1000000000)
# -*- coding: utf-8 -*-
import codecs
files = []
cvfv = codecs.open("./birth_cvfv.xml","w",'utf-8')
fmc = codecs.open("./birth_fmc.xml","w",'utf-8')
fuc = codecs.open("./birth_fuc.xml","w",'utf-8')
_2gram = codecs.open("./birth_2gram.xml","w",'utf-8')
_3gram = codecs.open("./birth_3gram.xml","w",'utf-8')
_4gram = codecs.open("./birth_4gram.xml","w",'utf-8')
_5gram = codecs.open("./birth_5gram.xml","w",'utf-8')
_6gram = codecs.open("./birth_6gram.xml","w",'utf-8')
smc = codecs.open("./birth_smc.xml","w",'utf-8')
uc = codecs.open("./birth_uc.xml","w",'utf-8')
wsp = codecs.open("./birth_wsp.xml","w",'utf-8')
files = [cvfv, fmc, fuc, _2gram, _3gram, _4gram, _5gram, _6gram, smc, uc, wsp]
def init(filename):
filename.write("<add>\n")
filename.write("<doc>\n")
def writer(filename, row):
if uc == filename:
filename.write("</doc>\n")
filename.write("<doc>\n")
filename.write("<field name=\"filename\">"+unicode(row[0], 'utf-8')+"</field>\n")
filename.write("<field name=\"place\">"+unicode(row[1], 'utf-8')+"</field>\n")
filename.write("<field name=\"barthmark\">"+unicode(row[2], 'utf-8')+"</field>\n")
if len(row[3]) <= 30000:
filename.write("<field name=\"data\">"+row[3].decode('utf-8').replace('<','<').replace(">",'>').replace("&",'&').replace("\"",'"').replace("\'",'&apo s;')+"</field>\n")
else:
filename.write("</doc>\n")
filename.write("<doc>\n")
filename.write("<field name=\"filename\">"+row[0]+"</field>\n")
filename.write("<field name=\"place\">"+row[1]+"</field>\n")
filename.write("<field name=\"barthmark\">"+row[2]+"</field>\n")
if len(row[3]) <= 30000:
filename.write("<field name=\"data\">"+row[3].decode('utf-8').replace('<','<').replace(">",'>').replace("&",'&').replace("\"",'"').replace("\'",'&apo s;')+"</field>\n")
def finish_writer(filename):
filename.write("</doc>\n")
filename.write("</add>\n")
for j in files:
init(j)
tmp = glob.glob("./*.csv")
count = 0
for i in tmp:
reader = open(i).read().split('\n')
if '\0' not in open(i).read():
if reader is not None:
for row in reader:
row = row.split(',',3)
if len(row) >= 4:
row[0] = row[0].replace('\n',"").replace('<','<').replace(">",'>').replace("&",'&').replace("\"",'"').replace("\'",''')
row[1] = row[1].replace('\n',"").replace('<','<').replace(">",'>').replace("&",'&').replace("\"",'"').replace("\'",''')
row[2] = row[2].replace('\n',"").replace('<','<').replace(">",'>').replace("&",'&').replace("\"",'"').replace("\'",''')
row[3] = row[3].replace('\n',"").replace('<','<').replace(">",'>').replace("&",'&').replace("\"",'"').replace("\'",''')
if "cvfv" in row[2]:
writer(cvfv, row)
elif "fmc" in row[2]:
writer(fmc, row)
elif "fuc" in row[2]:
writer(fuc, row)
elif "2-gram" in str(i):
writer(_2gram, row)
elif "3-gram" in str(i):
writer(_3gram, row)
elif "4-gram" in str(i):
writer(_4gram, row)
elif "5-gram" in str(i):
writer(_5gram, row)
elif "6-gram" in str(i):
writer(_6gram, row)
elif "smc" in row[2]:
writer(smc, row)
elif "uc" in row[2]:
writer(uc, row)
elif "wsp" in row[2]:
writer(wsp, row)
for j in files:
finish_writer(j)
| 1,249
| 0
| 69
|
95964d33b38741eebab8192e62f19d04248c6675
| 1,170
|
py
|
Python
|
setup.py
|
AmidBidee/Robot-Arm
|
cfacfc779b2f025846e9748167bcfb15ce207923
|
[
"MIT"
] | 1
|
2022-03-27T20:09:10.000Z
|
2022-03-27T20:09:10.000Z
|
setup.py
|
AmidBidee/Robot-Arm
|
cfacfc779b2f025846e9748167bcfb15ce207923
|
[
"MIT"
] | 4
|
2022-03-25T03:45:10.000Z
|
2022-03-29T14:31:16.000Z
|
setup.py
|
AmidBidee/RobotArm
|
cfacfc779b2f025846e9748167bcfb15ce207923
|
[
"MIT"
] | null | null | null |
import setuptools
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="robotarm", # Replace with your username
version="0.0.4",
author="<Aaron Ahmid Balogun>",
author_email="<amidbidee@gmail.com>",
description="<Template Setup.py package>",
long_description=long_description,
long_description_content_type="text/markdown",
url="<https://github.com/AmidBidee/RobotArm>",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=['gunicorn', 'flask', 'pyyaml', 'python-decouple', 'requests', 'psutil', 'tabulate'],
package_dir={"": "robotarm"},
packages=find_packages(
where='.',
include=['robotarm*'], # ["*"] by default
exclude=['robotarm.tests'], # empty by default
),
python_requires=">=3.6",
py_modules=['arm', 'robotarm', 'controllers', 'handlers', 'armservice'],
entry_points={
'console_scripts': [
'arm=Robot-Arm.robotarm.arm:main',
],
},
)
| 21.272727
| 106
| 0.620513
|
import setuptools
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="robotarm", # Replace with your username
version="0.0.4",
author="<Aaron Ahmid Balogun>",
author_email="<amidbidee@gmail.com>",
description="<Template Setup.py package>",
long_description=long_description,
long_description_content_type="text/markdown",
url="<https://github.com/AmidBidee/RobotArm>",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=['gunicorn', 'flask', 'pyyaml', 'python-decouple', 'requests', 'psutil', 'tabulate'],
package_dir={"": "robotarm"},
packages=find_packages(
where='.',
include=['robotarm*'], # ["*"] by default
exclude=['robotarm.tests'], # empty by default
),
python_requires=">=3.6",
py_modules=['arm', 'robotarm', 'controllers', 'handlers', 'armservice'],
entry_points={
'console_scripts': [
'arm=Robot-Arm.robotarm.arm:main',
],
},
)
| 0
| 0
| 0
|
51cb20a476045ed9a30f634816c937e0032634fa
| 5,628
|
py
|
Python
|
samples/snippets/publisher_test.py
|
deckikwok/python-pubsub
|
7e02bd8bc87676b0f38c90bc218dd4714f09f3a4
|
[
"Apache-2.0"
] | null | null | null |
samples/snippets/publisher_test.py
|
deckikwok/python-pubsub
|
7e02bd8bc87676b0f38c90bc218dd4714f09f3a4
|
[
"Apache-2.0"
] | null | null | null |
samples/snippets/publisher_test.py
|
deckikwok/python-pubsub
|
7e02bd8bc87676b0f38c90bc218dd4714f09f3a4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import uuid
import backoff
from google.api_core.exceptions import NotFound
from google.cloud import pubsub_v1
import mock
import pytest
import publisher
UUID = uuid.uuid4().hex
PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"]
TOPIC_ID = "publisher-test-topic-" + UUID
SUBSCRIPTION_ID = "publisher-test-subscription-" + UUID
# Allow 60s for tests to finish.
MAX_TIME = 60
@pytest.fixture(scope="module")
@pytest.fixture(scope="module")
@pytest.fixture(scope="module")
@pytest.fixture(scope="module")
| 29.621053
| 88
| 0.744314
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import uuid
import backoff
from google.api_core.exceptions import NotFound
from google.cloud import pubsub_v1
import mock
import pytest
import publisher
UUID = uuid.uuid4().hex
PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"]
TOPIC_ID = "publisher-test-topic-" + UUID
SUBSCRIPTION_ID = "publisher-test-subscription-" + UUID
# Allow 60s for tests to finish.
MAX_TIME = 60
@pytest.fixture(scope="module")
def publisher_client():
yield pubsub_v1.PublisherClient()
@pytest.fixture(scope="module")
def subscriber_client():
subscriber_client = pubsub_v1.SubscriberClient()
yield subscriber_client
# Close the subscriber client properly during teardown.
subscriber_client.close()
@pytest.fixture(scope="module")
def topic_path(publisher_client):
topic_path = publisher_client.topic_path(PROJECT_ID, TOPIC_ID)
try:
topic = publisher_client.get_topic(request={"topic": topic_path})
except NotFound:
topic = publisher_client.create_topic(request={"name": topic_path})
yield topic.name
try:
publisher_client.delete_topic(request={"topic": topic.name})
except NotFound:
pass
@pytest.fixture(scope="module")
def subscription_path(subscriber_client, topic_path):
subscription_path = subscriber_client.subscription_path(PROJECT_ID, SUBSCRIPTION_ID)
subscription = subscriber_client.create_subscription(
request={"name": subscription_path, "topic": topic_path}
)
yield subscription.name
try:
subscriber_client.delete_subscription(
request={"subscription": subscription_path}
)
except NotFound:
pass
def _make_sleep_patch():
real_sleep = time.sleep
def new_sleep(period):
if period == 60:
real_sleep(5)
raise RuntimeError("sigil")
else:
real_sleep(period)
return mock.patch("time.sleep", new=new_sleep)
def test_create(publisher_client, capsys):
# The scope of `topic_path` is limited to this function.
topic_path = publisher_client.topic_path(PROJECT_ID, TOPIC_ID)
try:
publisher_client.delete_topic(request={"topic": topic_path})
except NotFound:
pass
publisher.create_topic(PROJECT_ID, TOPIC_ID)
out, _ = capsys.readouterr()
assert f"Created topic: {topic_path}" in out
def test_list(topic_path, capsys):
publisher.list_topics(PROJECT_ID)
out, _ = capsys.readouterr()
assert topic_path in out
def test_publish(topic_path, capsys):
publisher.publish_messages(PROJECT_ID, TOPIC_ID)
out, _ = capsys.readouterr()
assert f"Published messages to {topic_path}." in out
def test_publish_with_custom_attributes(topic_path, capsys):
publisher.publish_messages_with_custom_attributes(PROJECT_ID, TOPIC_ID)
out, _ = capsys.readouterr()
assert f"Published messages with custom attributes to {topic_path}." in out
def test_publish_with_batch_settings(topic_path, capsys):
publisher.publish_messages_with_batch_settings(PROJECT_ID, TOPIC_ID)
out, _ = capsys.readouterr()
assert f"Published messages with batch settings to {topic_path}." in out
def test_publish_with_flow_control_settings(topic_path, capsys):
publisher.publish_messages_with_flow_control_settings(PROJECT_ID, TOPIC_ID)
out, _ = capsys.readouterr()
assert f"Published messages with flow control settings to {topic_path}." in out
def test_publish_with_retry_settings(topic_path, capsys):
publisher.publish_messages_with_retry_settings(PROJECT_ID, TOPIC_ID)
out, _ = capsys.readouterr()
assert f"Published messages with retry settings to {topic_path}." in out
def test_publish_with_error_handler(topic_path, capsys):
publisher.publish_messages_with_error_handler(PROJECT_ID, TOPIC_ID)
out, _ = capsys.readouterr()
assert f"Published messages with error handler to {topic_path}." in out
def test_publish_with_ordering_keys(topic_path, capsys):
publisher.publish_with_ordering_keys(PROJECT_ID, TOPIC_ID)
out, _ = capsys.readouterr()
assert f"Published messages with ordering keys to {topic_path}." in out
def test_resume_publish_with_error_handler(topic_path, capsys):
publisher.resume_publish_with_ordering_keys(PROJECT_ID, TOPIC_ID)
out, _ = capsys.readouterr()
assert f"Resumed publishing messages with ordering keys to {topic_path}." in out
def test_detach_subscription(subscription_path, capsys):
publisher.detach_subscription(PROJECT_ID, SUBSCRIPTION_ID)
out, _ = capsys.readouterr()
assert f"{subscription_path} is detached." in out
def test_delete(publisher_client):
publisher.delete_topic(PROJECT_ID, TOPIC_ID)
@backoff.on_exception(backoff.expo, AssertionError, max_time=MAX_TIME)
def eventually_consistent_test():
with pytest.raises(Exception):
publisher_client.get_topic(
request={"topic": publisher_client.topic_path(PROJECT_ID, TOPIC_ID)}
)
eventually_consistent_test()
| 4,100
| 0
| 387
|
7b2a91c84bc4cb09ca9e0267db05e2ac0ace93e9
| 3,354
|
py
|
Python
|
newsapp/models.py
|
Esther-Anyona/four-one-one
|
6a5e019b35710941a669c1b49e993b683c99d615
|
[
"MIT"
] | null | null | null |
newsapp/models.py
|
Esther-Anyona/four-one-one
|
6a5e019b35710941a669c1b49e993b683c99d615
|
[
"MIT"
] | null | null | null |
newsapp/models.py
|
Esther-Anyona/four-one-one
|
6a5e019b35710941a669c1b49e993b683c99d615
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
# CloudinaryImage("turtles.jpg").image(width=70, height=53, crop="scale")
"""
Models
"""
| 34.9375
| 173
| 0.720036
|
from django.db import models
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
# CloudinaryImage("turtles.jpg").image(width=70, height=53, crop="scale")
"""
Models
"""
class Neighbourhood(models.Model):
neighbourhood_name = models.CharField(max_length=100)
location = models.CharField(max_length=50)
description = models.TextField()
occupants_count = models.IntegerField(null=True, blank=True)
admin = models.ForeignKey(User, on_delete=models.CASCADE)
neighbourhood_pic = CloudinaryField('hood-pic')
ambulance_contact = models.CharField(max_length=20, null=True, blank=True)
police_contact = models.CharField(max_length=20, null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
def create_neigbourhood(self):
self.save()
def get_neighbourhoods(self):
neighbourhoods = Neighbourhood.objects.all()
return neighbourhoods
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
member_id = models.CharField(max_length=10, blank=True)
profile_pic = CloudinaryField('profile-pic')
bio = models.TextField(blank=True)
neighbourhood_id = models.ForeignKey(Neighbourhood, on_delete=models.CASCADE)
email = models.CharField(max_length=100)
date_created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f'{self.user.username} Profile'
def save_profile(self):
super().save()
class Business(models.Model):
name = models.CharField(max_length=200, null=True, blank=True)
description = models.TextField(blank=True)
user = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name='Business_owner')
neighbourhood_id = models.ForeignKey(Neighbourhood, on_delete=models.CASCADE, related_name='Hood')
business_email = models.CharField(max_length=100)
image = CloudinaryField('business_image', null=True)
date_created = models.DateTimeField(auto_now_add=True, null=True)
date_updated = models.DateTimeField(auto_now_add=True, null=True)
def __str__(self):
return f'{self.name}Business'
def create_business(self):
self.save()
def delete_business(self):
self.delete()
@classmethod
def find_business(cls,business_id):
business = cls.objects.get(id = business_id)
return business
def update_business(self, id, name, description, user, neighbourhood_id, biz_email):
update = NeighbourHood.objects.filter(id = id).update(name = name , description = description, user=user, neighbourhood_id = neighbourhood_id, biz_email = biz_email)
return update
"""
methods
"""
# create_business()
# delete_business()
# find_business(business_id)
# update_business()
class Post(models.Model):
title = models.CharField(max_length=50, null=True)
post = models.TextField()
date_created = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name='Author')
neighbourhood = models.ForeignKey(Neighbourhood, on_delete=models.CASCADE, related_name='Hood_post')
def __str__(self):
return f'{self.title} Post'
def save_post(self):
self.save()
def delete_post(self):
self.delete()
| 701
| 2,349
| 91
|
38bbd6e96323d1dc65e0352cdedb7d3a7f70fe9a
| 321
|
py
|
Python
|
setup.py
|
fiefdx/pytea
|
466f2e15506fc66e402437aa2cfeb8b2c9f19c64
|
[
"MIT"
] | 1
|
2016-09-04T10:24:27.000Z
|
2016-09-04T10:24:27.000Z
|
setup.py
|
fiefdx/pytea
|
466f2e15506fc66e402437aa2cfeb8b2c9f19c64
|
[
"MIT"
] | null | null | null |
setup.py
|
fiefdx/pytea
|
466f2e15506fc66e402437aa2cfeb8b2c9f19c64
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Created on 2014-07-16
@summary: Trellis TEA use pure python
@author: fiefdx
'''
from distutils.core import setup
setup(name='pytea',
version='1.0.2',
author = 'fiefdx',
author_email = 'fiefdx@gmail.com',
package_dir={'pytea': 'src'},
packages=['pytea'],
)
| 20.0625
| 40
| 0.595016
|
# -*- coding: utf-8 -*-
'''
Created on 2014-07-16
@summary: Trellis TEA use pure python
@author: fiefdx
'''
from distutils.core import setup
setup(name='pytea',
version='1.0.2',
author = 'fiefdx',
author_email = 'fiefdx@gmail.com',
package_dir={'pytea': 'src'},
packages=['pytea'],
)
| 0
| 0
| 0
|