hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
500e861a5d9736a4f8277adb38a02df2b39dbf6e
| 1,031
|
py
|
Python
|
addons/finetune_style.py
|
geoffrey0822/multilevellabel_NN
|
faec3303dac2376d6e8a761632aca31bc3868413
|
[
"Apache-2.0"
] | null | null | null |
addons/finetune_style.py
|
geoffrey0822/multilevellabel_NN
|
faec3303dac2376d6e8a761632aca31bc3868413
|
[
"Apache-2.0"
] | null | null | null |
addons/finetune_style.py
|
geoffrey0822/multilevellabel_NN
|
faec3303dac2376d6e8a761632aca31bc3868413
|
[
"Apache-2.0"
] | null | null | null |
import os,sys
from caffe.proto import caffe_pb2
import google.protobuf.text_format as txtf
prototxt_file=sys.argv[1]
to_prototxt_file=sys.argv[2]
hold_layers=sys.argv[3].split(',')
model=caffe_pb2.NetParameter()
with open(prototxt_file,'r') as f:
txtf.Merge(f.read(),model)
f.close()
active_layers={}
i=0
for layer in model.layer:
if len(layer.param)>0:
isActive=False
for param in layer.param:
isActive=param.lr_mult>0 or param.decay_mult>0
if isActive:
break
active_layers[layer.name]=i
i+=1
print active_layers
print model.layer[active_layers['cls']]
for lname in active_layers.keys():
if lname in hold_layers:
continue
nParam=len(model.layer[active_layers[lname]].param)
for n in range(nParam):
model.layer[active_layers[lname]].param[n].lr_mult=0
model.layer[active_layers[lname]].param[n].decay_mult=0
with open(to_prototxt_file,'w') as f:
f.write(str(model))
f.close()
print 'finished'
| 25.146341
| 63
| 0.675073
|
import os,sys
from caffe.proto import caffe_pb2
import google.protobuf.text_format as txtf
prototxt_file=sys.argv[1]
to_prototxt_file=sys.argv[2]
hold_layers=sys.argv[3].split(',')
model=caffe_pb2.NetParameter()
with open(prototxt_file,'r') as f:
txtf.Merge(f.read(),model)
f.close()
active_layers={}
i=0
for layer in model.layer:
if len(layer.param)>0:
isActive=False
for param in layer.param:
isActive=param.lr_mult>0 or param.decay_mult>0
if isActive:
break
active_layers[layer.name]=i
i+=1
print active_layers
print model.layer[active_layers['cls']]
for lname in active_layers.keys():
if lname in hold_layers:
continue
nParam=len(model.layer[active_layers[lname]].param)
for n in range(nParam):
model.layer[active_layers[lname]].param[n].lr_mult=0
model.layer[active_layers[lname]].param[n].decay_mult=0
with open(to_prototxt_file,'w') as f:
f.write(str(model))
f.close()
print 'finished'
| 0
| 0
| 0
|
d4a79cd608d45cd244b8fc0f37ecdbd2b9b20e1d
| 6,134
|
py
|
Python
|
docarray/document/mixins/porting.py
|
qdrant/docarray
|
acc76ca31e3344cc5dd7e42e6627951b91845ba3
|
[
"Apache-2.0"
] | 591
|
2022-01-09T14:39:59.000Z
|
2022-03-31T13:19:39.000Z
|
docarray/document/mixins/porting.py
|
qdrant/docarray
|
acc76ca31e3344cc5dd7e42e6627951b91845ba3
|
[
"Apache-2.0"
] | 210
|
2022-01-10T07:59:29.000Z
|
2022-03-31T14:49:18.000Z
|
docarray/document/mixins/porting.py
|
qdrant/docarray
|
acc76ca31e3344cc5dd7e42e6627951b91845ba3
|
[
"Apache-2.0"
] | 40
|
2022-01-09T14:52:20.000Z
|
2022-03-31T07:59:45.000Z
|
import base64
import json
import pickle
from typing import Optional, TYPE_CHECKING, Type, Dict, Any, Union
from ...helper import compress_bytes, decompress_bytes
if TYPE_CHECKING:
from ...typing import T
| 34.655367
| 104
| 0.604989
|
import base64
import json
import pickle
from typing import Optional, TYPE_CHECKING, Type, Dict, Any, Union
from ...helper import compress_bytes, decompress_bytes
if TYPE_CHECKING:
from ...typing import T
class PortingMixin:
@classmethod
def from_dict(
cls: Type['T'], obj: Dict, protocol: str = 'jsonschema', **kwargs
) -> 'T':
"""Convert a dict object into a Document.
:param obj: a Python dict object
:param protocol: `jsonschema` or `protobuf`
:param kwargs: extra key-value args pass to pydantic and protobuf parser.
:return: the parsed Document object
"""
if protocol == 'jsonschema':
from ..pydantic_model import PydanticDocument
return cls.from_pydantic_model(PydanticDocument.parse_obj(obj, **kwargs))
elif protocol == 'protobuf':
from google.protobuf import json_format
from ...proto.docarray_pb2 import DocumentProto
pb_msg = DocumentProto()
json_format.ParseDict(obj, pb_msg, **kwargs)
return cls.from_protobuf(pb_msg)
else:
return cls(obj)
@classmethod
def from_json(
cls: Type['T'],
obj: Union[str, bytes, bytearray],
protocol: str = 'jsonschema',
**kwargs,
) -> 'T':
"""Convert a JSON string into a Document.
:param obj: a valid JSON string
:param protocol: `jsonschema` or `protobuf`
:param kwargs: extra key-value args pass to pydantic and protobuf parser.
:return: the parsed Document object
"""
if protocol == 'jsonschema':
from ..pydantic_model import PydanticDocument
return cls.from_pydantic_model(PydanticDocument.parse_raw(obj, **kwargs))
elif protocol == 'protobuf':
from google.protobuf import json_format
from ...proto.docarray_pb2 import DocumentProto
pb_msg = DocumentProto()
json_format.Parse(obj, pb_msg, **kwargs)
return cls.from_protobuf(pb_msg)
else:
return cls.from_dict(json.loads(obj), protocol=protocol)
def to_dict(self, protocol: str = 'jsonschema', **kwargs) -> Dict[str, Any]:
"""Convert itself into a Python dict object.
:param protocol: `jsonschema` or `protobuf`
:param kwargs: extra key-value args pass to pydantic and protobuf dumper.
:return: the dumped Document as a dict object
"""
if protocol == 'jsonschema':
return self.to_pydantic_model().dict(**kwargs)
elif protocol == 'protobuf':
from google.protobuf.json_format import MessageToDict
return MessageToDict(
self.to_protobuf(),
**kwargs,
)
else:
raise ValueError(f'protocol=`{protocol}` is not supported')
def to_bytes(
self, protocol: str = 'pickle', compress: Optional[str] = None
) -> bytes:
if protocol == 'pickle':
bstr = pickle.dumps(self)
elif protocol == 'protobuf':
bstr = self.to_protobuf().SerializePartialToString()
else:
raise ValueError(
f'protocol={protocol} is not supported. Can be only `protobuf` or pickle protocols 0-5.'
)
return compress_bytes(bstr, algorithm=compress)
@classmethod
def from_bytes(
cls: Type['T'],
data: bytes,
protocol: str = 'pickle',
compress: Optional[str] = None,
) -> 'T':
"""Build Document object from binary bytes
:param data: binary bytes
:param protocol: protocol to use
:param compress: compress method to use
:return: a Document object
"""
bstr = decompress_bytes(data, algorithm=compress)
if protocol == 'pickle':
return pickle.loads(bstr)
elif protocol == 'protobuf':
from ...proto.docarray_pb2 import DocumentProto
pb_msg = DocumentProto()
pb_msg.ParseFromString(bstr)
return cls.from_protobuf(pb_msg)
else:
raise ValueError(
f'protocol={protocol} is not supported. Can be only `protobuf` or pickle protocols 0-5.'
)
def to_json(self, protocol: str = 'jsonschema', **kwargs) -> str:
"""Convert itself into a JSON string.
:param protocol: `jsonschema` or `protobuf`
:param kwargs: extra key-value args pass to pydantic and protobuf dumper.
:return: the dumped JSON string
"""
if protocol == 'jsonschema':
return self.to_pydantic_model().json(**kwargs)
elif protocol == 'protobuf':
from google.protobuf.json_format import MessageToJson
return MessageToJson(self.to_protobuf(), **kwargs)
else:
raise ValueError(f'protocol={protocol} is not supported.')
def to_base64(
self, protocol: str = 'pickle', compress: Optional[str] = None
) -> str:
"""Serialize a Document object into as base64 string
:param protocol: protocol to use
:param compress: compress method to use
:return: a base64 encoded string
"""
return base64.b64encode(self.to_bytes(protocol, compress)).decode('utf-8')
@classmethod
def from_base64(
cls: Type['T'],
data: str,
protocol: str = 'pickle',
compress: Optional[str] = None,
) -> 'T':
"""Build Document object from binary bytes
:param data: a base64 encoded string
:param protocol: protocol to use
:param compress: compress method to use
:return: a Document object
"""
return cls.from_bytes(base64.b64decode(data), protocol, compress)
def _to_stream_bytes(self, protocol, compress) -> bytes:
# 4 bytes (uint32)
doc_as_bytes = self.to_bytes(protocol=protocol, compress=compress)
# variable size bytes
len_doc_as_bytes = len(doc_as_bytes).to_bytes(4, 'big', signed=False)
return len_doc_as_bytes + doc_as_bytes
| 765
| 5,135
| 23
|
45f6e0cdfb3d2b0b20d8ce80caa54171ae3cfc46
| 195
|
py
|
Python
|
tableau_rest_api/methods/metrics.py
|
Radabaugh/tableau_tools
|
8630ec4c5614ace8b08246f83dc80c330a8045b4
|
[
"MIT"
] | 179
|
2016-07-06T19:34:39.000Z
|
2021-03-16T02:30:27.000Z
|
tableau_rest_api/methods/metrics.py
|
Radabaugh/tableau_tools
|
8630ec4c5614ace8b08246f83dc80c330a8045b4
|
[
"MIT"
] | 75
|
2016-07-28T16:17:44.000Z
|
2021-03-29T12:22:32.000Z
|
tableau_rest_api/methods/metrics.py
|
chip-felton-montage/tableau_tools
|
60881a99a32c7d9e03afbb0e1161deefaa854904
|
[
"MIT"
] | 83
|
2016-08-25T10:32:26.000Z
|
2021-02-17T18:31:14.000Z
|
from .rest_api_base import *
# First Metrics Methods appear in API 3.9
| 21.666667
| 60
| 0.74359
|
from .rest_api_base import *
# First Metrics Methods appear in API 3.9
class MetricsMethods39():
def __init__(self, rest_api_base: TableauRestApiBase38):
self.rest = rest_api_base
| 69
| 4
| 48
|
d4dfe468b447f835214af93d10dfa13057a2239c
| 458
|
py
|
Python
|
tests/factories/pipeline/pipeline.py
|
TheLabbingProject/django_analyses
|
08cac40a32754a265b37524f08ec6160c69ebea8
|
[
"Apache-2.0"
] | 1
|
2020-12-30T12:43:34.000Z
|
2020-12-30T12:43:34.000Z
|
tests/factories/pipeline/pipeline.py
|
TheLabbingProject/django_analyses
|
08cac40a32754a265b37524f08ec6160c69ebea8
|
[
"Apache-2.0"
] | 59
|
2019-12-25T13:14:56.000Z
|
2021-07-22T12:24:46.000Z
|
tests/factories/pipeline/pipeline.py
|
TheLabbingProject/django_analyses
|
08cac40a32754a265b37524f08ec6160c69ebea8
|
[
"Apache-2.0"
] | 2
|
2020-05-24T06:44:27.000Z
|
2020-07-09T15:47:31.000Z
|
from factory import Faker
from factory.django import DjangoModelFactory
| 35.230769
| 80
| 0.733624
|
from factory import Faker
from factory.django import DjangoModelFactory
class PipelineFactory(DjangoModelFactory):
title = Faker("pystr", min_chars=3, max_chars=18)
description = Faker("paragraph", nb_sentences=3, variable_nb_sentences=True)
created = Faker("date_between", start_date="-30d", end_date="-15d")
modified = Faker("date_between", start_date="-14d", end_date="today")
class Meta:
model = "django_analyses.Pipeline"
| 0
| 362
| 23
|
916322b1e03ef17f05188b14b907317d24dead00
| 216
|
py
|
Python
|
src/year2021/day16b.py
|
lancelote/advent_of_code
|
06dda6ca034bc1e86addee7798bb9b2a34ff565b
|
[
"Unlicense"
] | 10
|
2017-12-11T17:54:52.000Z
|
2021-12-09T20:16:30.000Z
|
src/year2021/day16b.py
|
lancelote/advent_of_code
|
06dda6ca034bc1e86addee7798bb9b2a34ff565b
|
[
"Unlicense"
] | 260
|
2015-12-09T11:03:03.000Z
|
2021-12-12T14:32:23.000Z
|
src/year2021/day16b.py
|
lancelote/advent_of_code
|
06dda6ca034bc1e86addee7798bb9b2a34ff565b
|
[
"Unlicense"
] | null | null | null |
"""2021 - Day 16 Part 2: Packet Decoder."""
from src.year2021.day16a import BITS
| 24
| 43
| 0.689815
|
"""2021 - Day 16 Part 2: Packet Decoder."""
from src.year2021.day16a import BITS
def solve(task: str) -> int:
system = BITS.from_hex(task)
top_packet = system.read_packet()
return top_packet.evaluate()
| 111
| 0
| 23
|
e8017922857077673165e16e7b2111a57d751eea
| 93
|
py
|
Python
|
Chapter 05/Praktikum-2/1-n.py
|
icaksh/Python-Projects-Protek
|
dfd56ea5afc637a8850911a9296131652de383c5
|
[
"MIT"
] | null | null | null |
Chapter 05/Praktikum-2/1-n.py
|
icaksh/Python-Projects-Protek
|
dfd56ea5afc637a8850911a9296131652de383c5
|
[
"MIT"
] | null | null | null |
Chapter 05/Praktikum-2/1-n.py
|
icaksh/Python-Projects-Protek
|
dfd56ea5afc637a8850911a9296131652de383c5
|
[
"MIT"
] | null | null | null |
banyakPerulangan = 10
i = 0
while (i < banyakPerulangan):
print('Hello World')
i += 1
| 18.6
| 29
| 0.634409
|
banyakPerulangan = 10
i = 0
while (i < banyakPerulangan):
print('Hello World')
i += 1
| 0
| 0
| 0
|
251bbbc73707a4d8bb70aeffb163ed0511e6ba61
| 660
|
py
|
Python
|
matroska_cache/dep/tag.py
|
kolypto/py-matroska-cache
|
b40030f97d463aac8e3a6f4b0e0e9f081dfc92b1
|
[
"MIT"
] | null | null | null |
matroska_cache/dep/tag.py
|
kolypto/py-matroska-cache
|
b40030f97d463aac8e3a6f4b0e0e9f081dfc92b1
|
[
"MIT"
] | null | null | null |
matroska_cache/dep/tag.py
|
kolypto/py-matroska-cache
|
b40030f97d463aac8e3a6f4b0e0e9f081dfc92b1
|
[
"MIT"
] | null | null | null |
from .base import DependencyBase, dataclass
@dataclass
class Tag(DependencyBase):
""" Dependency on an arbitrary tag
Usage:
use this tag as a named signal to invalidate records
Example:
update_dashboard_for_admins = Tag('update-dashboard-for-admins')
cache.put(
'articles-list',
[...],
dependencies=[
update_dashboard_for_admins,
...
]
)
cache.invalidate(update_dashboard_for_admins)
"""
name: str
__slots__ = 'name',
PREFIX = 'tag'
| 20
| 72
| 0.562121
|
from .base import DependencyBase, dataclass
@dataclass
class Tag(DependencyBase):
""" Dependency on an arbitrary tag
Usage:
use this tag as a named signal to invalidate records
Example:
update_dashboard_for_admins = Tag('update-dashboard-for-admins')
cache.put(
'articles-list',
[...],
dependencies=[
update_dashboard_for_admins,
...
]
)
cache.invalidate(update_dashboard_for_admins)
"""
name: str
__slots__ = 'name',
PREFIX = 'tag'
def key(self) -> str:
return f'{self.PREFIX}:{self.name}'
| 44
| 0
| 27
|
4e3ce9f763c7ac23b63142ca0b09def2613ab552
| 3,644
|
py
|
Python
|
exam_organization/models.py
|
andaeh/django-exam-organization-tool
|
fd269402cb74e4028f3aa65978da5a47e4d2f7ae
|
[
"Apache-2.0"
] | null | null | null |
exam_organization/models.py
|
andaeh/django-exam-organization-tool
|
fd269402cb74e4028f3aa65978da5a47e4d2f7ae
|
[
"Apache-2.0"
] | 2
|
2021-09-11T13:42:08.000Z
|
2022-03-12T01:01:37.000Z
|
exam_organization/models.py
|
andaeh/django-exam-organization-tool
|
fd269402cb74e4028f3aa65978da5a47e4d2f7ae
|
[
"Apache-2.0"
] | null | null | null |
import os
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from django.dispatch import receiver
from django.db.models.signals import post_delete, post_save
from django.conf import settings
from django.core.validators import int_list_validator
from taggit.managers import TaggableManager
# Create your models here.
@receiver(post_save, sender=Task)
@receiver(post_delete, sender=Task)
| 32.828829
| 156
| 0.693743
|
import os
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from django.dispatch import receiver
from django.db.models.signals import post_delete, post_save
from django.conf import settings
from django.core.validators import int_list_validator
from taggit.managers import TaggableManager
# Create your models here.
class Faculty(models.Model):
short_name = models.CharField(max_length=10)
name = models.CharField(max_length=100)
class Meta:
verbose_name = "Ausbildungsrichtung"
verbose_name_plural = "Ausbildungsrichtungen"
def get_absolute_url(self):
return reverse('exam_organization:overview', kwargs={'model': self._meta.verbose_name_raw})
def __str__(self):
return self.name
class Grade(models.Model):
name = models.CharField(max_length=20)
class Meta:
verbose_name = "Jahrgangsstufe"
verbose_name_plural = "Jahrgangsstufen"
def get_absolute_url(self):
return reverse('exam_organization:overview', kwargs={'model': self._meta.verbose_name_raw})
def __str__(self):
return self.name
class Topic(models.Model):
short_name = models.CharField(max_length=10)
grade = models.ForeignKey(
Grade, on_delete=models.SET_NULL, null=True)
faculty = models.ForeignKey(
Faculty, on_delete=models.SET_NULL, null=True)
description = models.CharField(max_length=200)
class Meta:
verbose_name = "Lernbereich"
verbose_name_plural = "Lernbereiche"
def __str__(self):
return self.faculty.short_name + self.grade.name + ": " + self.description
class Task(models.Model):
headline = models.CharField(max_length=100, verbose_name="Überschrift")
topic = models.ManyToManyField(Topic, verbose_name="Lernbereiche", blank=True)
description = models.CharField(max_length=500, verbose_name="Beschreibung")
total_BE = models.CharField(
max_length=50, blank=True, validators=[int_list_validator], help_text="Pro Teilaufgabe, getrennt durch Kommata", verbose_name="Bewertungseinheiten")
task_text = models.TextField(null=True, blank=True, verbose_name="Aufgabentext")
slug = models.SlugField(unique=True, max_length=100, editable=True)
tags = TaggableManager()
created_by = models.ForeignKey(
User, on_delete=models.SET_NULL, null=True, related_name="created_by", editable=False)
edited_by = models.ForeignKey(
User, on_delete=models.SET_NULL, null=True, related_name="edited_by", editable=False)
class Meta:
verbose_name = "Aufgabe"
verbose_name_plural = "Aufgaben"
def get_absolute_url(self):
return reverse('exam_organization:overview', kwargs={'model': self._meta.verbose_name_raw})
def __str__(self):
return self.headline
@receiver(post_save, sender=Task)
def delete_pdf(sender, instance, *args, **kwargs):
try:
file = os.path.join(settings.MEDIA_ROOT, 'pdf',
str(instance.id) + '.pdf')
os.remove(file)
except:
pass
@receiver(post_delete, sender=Task)
def delete_pdf(sender, instance, *args, **kwargs):
try:
file = os.path.join(settings.MEDIA_ROOT, 'pdf',
str(instance.id) + '.pdf')
os.remove(file)
except:
pass
try:
images = os.listdir(os.path.join(settings.MEDIA_ROOT, 'latex'))
task_images = [f for f in images if f.startswith(str(instance.id))]
for image in task_images:
os.remove(os.path.join(settings.MEDIA_ROOT, 'latex', image))
except:
pass
| 1,154
| 1,905
| 136
|
b4825201505425ba7ab5d67d7de5b45c05e857f4
| 432
|
py
|
Python
|
tests/test_utils.py
|
suzukey/fitrate
|
b78625fc6479210b63df93a854d687641b080621
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
suzukey/fitrate
|
b78625fc6479210b63df93a854d687641b080621
|
[
"MIT"
] | 1
|
2020-09-14T22:42:40.000Z
|
2020-09-14T22:42:40.000Z
|
tests/test_utils.py
|
suzukey/fitrate
|
b78625fc6479210b63df93a854d687641b080621
|
[
"MIT"
] | null | null | null |
from fitrate.utils import gcd, nthrt, prod
| 21.6
| 42
| 0.548611
|
from fitrate.utils import gcd, nthrt, prod
def test_gcd() -> None:
assert gcd(*(300, 400)) == 100
assert gcd(*[200, 400, 1000]) == 200
assert gcd(27, 123, 57, 255) == 3
def test_nthrt() -> None:
assert nthrt(25, 2) == 5
assert nthrt(27, 3) == 3
assert nthrt(16, 4) == 2
def test_prod() -> None:
assert prod(*[5, 10]) == 50
assert prod(*(3.5, 4.5, 5)) == 78.75
assert prod(3, 5, 2, 2) == 60
| 317
| 0
| 69
|
ccc8ecdd8c0db0c16f54d60be75aad92f2e60bc2
| 375
|
py
|
Python
|
modulector/migrations/0010_auto_20201022_0445.py
|
omics-datascience/modulector
|
357d8f6f8eab5d04b2357a08d177d75cbdad001a
|
[
"MIT"
] | 2
|
2021-07-10T20:45:58.000Z
|
2021-08-18T02:24:58.000Z
|
modulector/migrations/0010_auto_20201022_0445.py
|
omics-datascience/modulector
|
357d8f6f8eab5d04b2357a08d177d75cbdad001a
|
[
"MIT"
] | 3
|
2021-11-07T23:18:58.000Z
|
2021-11-22T23:17:35.000Z
|
modulector/migrations/0010_auto_20201022_0445.py
|
omics-datascience/modulector
|
357d8f6f8eab5d04b2357a08d177d75cbdad001a
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.8 on 2020-10-22 04:45
from django.db import migrations
| 19.736842
| 50
| 0.597333
|
# Generated by Django 3.0.8 on 2020-10-22 04:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('modulector', '0009_mirnadrugs_support'),
]
operations = [
migrations.RenameField(
model_name='mirnadrugs',
old_name='mirna',
new_name='mature_mirna',
),
]
| 0
| 269
| 23
|
8ae50b2a412cd79582d436e2ec46bea671d5277c
| 468
|
py
|
Python
|
MicroPythonADC/adcloop.py
|
Roger-random/ESP8266Tests
|
a1938083879042f53a87f5a8d2ac656a62f77281
|
[
"MIT"
] | null | null | null |
MicroPythonADC/adcloop.py
|
Roger-random/ESP8266Tests
|
a1938083879042f53a87f5a8d2ac656a62f77281
|
[
"MIT"
] | null | null | null |
MicroPythonADC/adcloop.py
|
Roger-random/ESP8266Tests
|
a1938083879042f53a87f5a8d2ac656a62f77281
|
[
"MIT"
] | null | null | null |
import machine
import time
| 23.4
| 130
| 0.570513
|
import machine
import time
class adcloop:
def __init__(self):
self.min = 1025
self.max = 0
self.vals = list()
def run(self):
while True:
val = machine.ADC(0).read()
if self.max < val:
self.max = val
if self.min > val:
self.min = val
self.vals.append(val)
print("Latest: {:4d} Max: {:4d} Min: {:4d} Average: {:4.2f}".format(val, self.max, self.min, sum(self.vals)/len(self.vals)))
time.sleep(1)
| 376
| -7
| 72
|
5412a958b9304c6f8be80eb4facd2bfcfa635905
| 648
|
py
|
Python
|
utils/train.py
|
csyhhu/attention-is-all-you-need-pytorch
|
5792c9714295b1a33d1ca074206ec223f436b954
|
[
"MIT"
] | 1
|
2020-10-01T23:57:16.000Z
|
2020-10-01T23:57:16.000Z
|
utils/train.py
|
csyhhu/attention-is-all-you-need-pytorch
|
5792c9714295b1a33d1ca074206ec223f436b954
|
[
"MIT"
] | null | null | null |
utils/train.py
|
csyhhu/attention-is-all-you-need-pytorch
|
5792c9714295b1a33d1ca074206ec223f436b954
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from utils.miscellaneous import progress_bar
| 21.6
| 65
| 0.589506
|
import torch
import torch.nn as nn
from utils.miscellaneous import progress_bar
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for batch_idx, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
output = model(src, trg, 0) #turn off teacher forcing
output = output[1:].view(-1, output.shape[-1])
trg = trg[1:].view(-1)
loss = criterion(output, trg)
epoch_loss += loss.item()
progress_bar(batch_idx, len(iterator), 'Testing...')
return epoch_loss / len(iterator)
| 545
| 0
| 23
|
6de1e62c4a84fc603defbbf1a354ab8b75173011
| 6,558
|
py
|
Python
|
odin/classes/visualizer_localization.py
|
rnt-pmi/odin
|
8cfddf04f964393ef30217aa5f4aa61229d7e811
|
[
"Apache-2.0"
] | 4
|
2021-01-09T10:46:31.000Z
|
2021-12-16T14:38:06.000Z
|
odin/classes/visualizer_localization.py
|
rnt-pmi/odin
|
8cfddf04f964393ef30217aa5f4aa61229d7e811
|
[
"Apache-2.0"
] | null | null | null |
odin/classes/visualizer_localization.py
|
rnt-pmi/odin
|
8cfddf04f964393ef30217aa5f4aa61229d7e811
|
[
"Apache-2.0"
] | 3
|
2021-01-09T10:46:15.000Z
|
2021-05-11T01:33:30.000Z
|
import os
import cv2
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Polygon
from .dataset_localization import DatasetLocalization
from odin.utils import Iterator
from .visulizer_interface import VisualizerInterface
from odin.classes import strings as labels_str
| 47.868613
| 199
| 0.582342
|
import os
import cv2
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Polygon
from .dataset_localization import DatasetLocalization
from odin.utils import Iterator
from .visulizer_interface import VisualizerInterface
from odin.classes import strings as labels_str
class VisualizerLocalization(VisualizerInterface):
def __init__(self, dataset: DatasetLocalization):
self.dataset = dataset
self.__colors = {}
category_ids = []
for c in self.dataset.get_categories_names():
self.__colors[c] = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]
category_ids.append(self.dataset.get_category_id_from_name(c))
def __show_image(self, image_path, index):
im_id = self.__current_images[index]["id"]
print("Image with id:{}".format(im_id))
if not os.path.exists(image_path):
print("Image path does not exist: " + image_path )
else:
plt.figure(figsize=(10, 10))
img = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
plt.imshow(img)
if self.__current_category is None:
annIds = self.dataset.coco_lib.getAnnIds(imgIds=[im_id])
elif type(self.__current_category) is int:
annIds = self.dataset.coco_lib.getAnnIds(imgIds=[im_id], catIds=[self.__current_category])
else:
annIds = self.dataset.coco_lib.getAnnIds(imgIds=[im_id], catIds=self.__current_category)
if self.__current_meta_anotation != None and self.__meta_annotation_value != None:
anns = [ann for ann in self.dataset.coco_lib.loadAnns(annIds) if
ann[self.__current_meta_anotation] == self.__meta_annotation_value]
else:
anns = [ann for ann in self.dataset.coco_lib.loadAnns(annIds)]
if len(anns) == 0:
plt.show()
return 0
if self.dataset.is_segmentation and 'segmentation' in anns[0]:
# TODO: move to another function
ax = plt.gca()
for ann in anns:
cat = self.dataset.get_category_name_from_id(ann['category_id'])
color = self.__colors[cat]
seg_points = ann["segmentation"]
for pol in seg_points:
poly = [[float(pol[i]), float(pol[i+1])] for i in range(0, len(pol), 2)]
np_poly = np.array(poly)
ax.add_patch(
Polygon(np_poly, linestyle='--', fill=False, facecolor='none', edgecolor=color, linewidth=2))
ax.text(x=seg_points[0][0], y=seg_points[0][1], s=ann['category_id'], color='white', fontsize=9, horizontalalignment='left',verticalalignment='top',bbox=dict(facecolor=color))
plt.imshow(img)
plt.axis('off')
plt.show()
else:
# TODO: move to another function
ax = plt.gca()
for ann in anns:
cat = self.dataset.get_category_name_from_id(ann['category_id'])
color = self.__colors[cat]
bbox_x, bbox_y, bbox_w, bbox_h = ann['bbox']
poly = [[bbox_x, bbox_y], [bbox_x, bbox_y + bbox_h], [bbox_x + bbox_w, bbox_y + bbox_h],
[bbox_x + bbox_w, bbox_y]]
np_poly = np.array(poly).reshape((4, 2))
ax.add_patch(Polygon(np_poly, linestyle='--', facecolor='none', edgecolor=color, linewidth=3))
ax.text(x=bbox_x, y=bbox_y, s=ann['category_id'], color='white', fontsize=9, horizontalalignment='left',verticalalignment='top',bbox=dict(facecolor=color))
plt.axis('off')
plt.show()
def visualize_annotations(self, categories=None):
categories_ds = self.dataset.get_categories_names()
if categories is None:
categories = categories_ds
images = self.dataset.get_images_id_with_path()
else:
images = []
for cat in categories:
if cat in categories_ds:
ii = self.dataset.get_images_id_with_path_for_category(cat)
images.extend(ii)
else:
print(labels_str.warn_incorrect_class)
category_ids = [self.dataset.get_category_id_from_name(c) for c in categories]
self.__start_iterator( images, category=category_ids)
def visualize_annotations_for_property(self, meta_annotation, meta_annotation_value):
images = self.dataset.get_images_id_with_path_with_property_value(meta_annotation,
meta_annotation_value)
self.__start_iterator(images, meta_annotation= meta_annotation, meta_annotation_value=meta_annotation_value)
def visualize_annotations_for_class_for_property(self, category, meta_annotation, meta_annotation_value):
if self.dataset.is_valid_category(category):
images = self.dataset.get_images_id_with_path_for_category_with_property_value(category, meta_annotation,
meta_annotation_value)
category_id = self.dataset.get_category_id_from_name(category)
self.__start_iterator(images, category=category_id, meta_annotation=meta_annotation, meta_annotation_value=meta_annotation_value)
else:
print(labels_str.warn_incorrect_class)
def __start_iterator(self, images, category=None, meta_annotation=None, meta_annotation_value=None):
self.__current_category = category
self.__current_images = images
self.__current_meta_anotation = meta_annotation
self.__meta_annotation_value = meta_annotation_value
paths = [img["path"] for img in images]
if len(paths) == 0:
print(labels_str.warn_no_images_criteria)
else:
iterator = Iterator(paths, show_name=False, image_display_function=self.__show_image)
iterator.start_iteration()
| 6,017
| 29
| 209
|
acc7052fa3ee7bfdc9aa6aafe4fb59c11afa86c5
| 533
|
py
|
Python
|
NoteBooks/Curso de Python/Python/Data_Managment/Compresion de archivos/Formato_bz2.py
|
Alejandro-sin/Learning_Notebooks
|
161d6bed4c7b1d171b45f61c0cc6fa91e9894aad
|
[
"MIT"
] | 1
|
2021-02-26T13:12:22.000Z
|
2021-02-26T13:12:22.000Z
|
NoteBooks/Curso de Python/Python/Data_Managment/Compresion de archivos/Formato_bz2.py
|
Alejandro-sin/Learning_Notebooks
|
161d6bed4c7b1d171b45f61c0cc6fa91e9894aad
|
[
"MIT"
] | null | null | null |
NoteBooks/Curso de Python/Python/Data_Managment/Compresion de archivos/Formato_bz2.py
|
Alejandro-sin/Learning_Notebooks
|
161d6bed4c7b1d171b45f61c0cc6fa91e9894aad
|
[
"MIT"
] | null | null | null |
'''
Es un méotod de compresión muy sencillo.
Bonzip
'''
import bz2
#Cadena binaria
cadena = b'Este es eun ejemplo del formato de compresion Bonzip, sirve para comprimir TODO.'
cadena_comprimida = bz2.compress(cadena)
print(cadena_comprimida)#b"BZh91AY&SY\xbf\xb8:\xfa\x00\x00\x06\x15\x80@\x04\x08\x00.\xa3\xde@ \x00Hj\x9f\xaayL\xd3'\xa8 \xd4\xd3\xd2i\xa0hb)\xd3Aw\x8d2\x12\x8c\xf8\xc6\xa2q\xc2\x15\xf8-\x1a\xad\x85B\xc6`+\xe1\xfdr>\t\x12.\xe4\x8ap\xa1!\x7fpu\xf4"
#para descromprimir
print(bz2.decompress(cadena_comprimida))
| 26.65
| 246
| 0.752345
|
'''
Es un méotod de compresión muy sencillo.
Bonzip
'''
import bz2
#Cadena binaria
cadena = b'Este es eun ejemplo del formato de compresion Bonzip, sirve para comprimir TODO.'
cadena_comprimida = bz2.compress(cadena)
print(cadena_comprimida)#b"BZh91AY&SY\xbf\xb8:\xfa\x00\x00\x06\x15\x80@\x04\x08\x00.\xa3\xde@ \x00Hj\x9f\xaayL\xd3'\xa8 \xd4\xd3\xd2i\xa0hb)\xd3Aw\x8d2\x12\x8c\xf8\xc6\xa2q\xc2\x15\xf8-\x1a\xad\x85B\xc6`+\xe1\xfdr>\t\x12.\xe4\x8ap\xa1!\x7fpu\xf4"
#para descromprimir
print(bz2.decompress(cadena_comprimida))
| 0
| 0
| 0
|
a345ec5ac2fe536c17768881a75d838a62037de6
| 1,057
|
py
|
Python
|
src/message_based_responses.py
|
LakshyaShastri/C45DiscordBot
|
624674ef2b9ef4d147d709fa5f23dafc0b9fcadb
|
[
"MIT"
] | 2
|
2020-03-31T14:45:11.000Z
|
2020-03-31T15:20:35.000Z
|
src/message_based_responses.py
|
LakshyaShastri/C45DiscordBot
|
624674ef2b9ef4d147d709fa5f23dafc0b9fcadb
|
[
"MIT"
] | 26
|
2020-03-30T17:34:35.000Z
|
2020-10-05T19:26:37.000Z
|
src/message_based_responses.py
|
LakshyaShastri/C45DiscordBot
|
624674ef2b9ef4d147d709fa5f23dafc0b9fcadb
|
[
"MIT"
] | 1
|
2020-10-02T07:58:18.000Z
|
2020-10-02T07:58:18.000Z
|
import re
import random
| 31.088235
| 99
| 0.599811
|
import re
import random
def _match_word(word_to_match, message_content):
return re.search("^.*(\\w*[\b ]?{}([. !?@]+\\w*| +)?)$".format(word_to_match), message_content)
def regex_based_response(message_content):
response = []
if _match_word("test", message_content):
print('[DEBUG] Test hit!')
response.append("Marks out")
response.append("?")
elif _match_word("papi", message_content):
response.append(random.choice([
"UWU DID SOMEBODY SAY P A P I",
"Yas daddi 🤪",
"Big P A P I Dave 😍"
]))
elif _match_word("triggered", message_content):
fl = open("./resources/triggered.lol", "r")
msg = fl.readlines()
index = random.randint(0, len(msg) - 1)
response.append(msg[index])
elif _match_word("vim", message_content):
f = open("./resources/vim.txt", "r")
response.append(f.readline())
elif _match_word("eclipse", message_content):
response.append("eclipse kaka, IDE's kaka")
return response
| 991
| 0
| 46
|
2bc8856b29271b763a0ca748604553dbd4283306
| 10,681
|
py
|
Python
|
test/unit/spiderfoot/test_spiderfootplugin.py
|
khiemtq-cyber/spiderfoot
|
66e671918853b0334931fd2fbabad0096d506726
|
[
"MIT"
] | null | null | null |
test/unit/spiderfoot/test_spiderfootplugin.py
|
khiemtq-cyber/spiderfoot
|
66e671918853b0334931fd2fbabad0096d506726
|
[
"MIT"
] | null | null | null |
test/unit/spiderfoot/test_spiderfootplugin.py
|
khiemtq-cyber/spiderfoot
|
66e671918853b0334931fd2fbabad0096d506726
|
[
"MIT"
] | null | null | null |
# test_spiderfootplugin.py
import pytest
import unittest
from sflib import SpiderFoot
from spiderfoot import SpiderFootDb, SpiderFootEvent, SpiderFootPlugin, SpiderFootTarget
@pytest.mark.usefixtures
class TestSpiderFootPlugin(unittest.TestCase):
"""
Test SpiderFoot
"""
def test_init(self):
"""
Test __init__(self)
"""
sfp = SpiderFootPlugin()
self.assertIsInstance(sfp, SpiderFootPlugin)
def test_updateSocket(self):
"""
Test _updateSocket(self, sock)
"""
sfp = SpiderFootPlugin()
sfp._updateSocket(None)
self.assertEqual('TBD', 'TBD')
def test_clearListeners(self):
"""
Test clearListeners(self)
"""
sfp = SpiderFootPlugin()
sfp.clearListeners()
self.assertEqual('TBD', 'TBD')
def test_setup(self):
"""
Test setup(self, sf, userOpts=dict())
"""
sfp = SpiderFootPlugin()
sfp.setup(None)
sfp.setup(None, None)
self.assertEqual('TBD', 'TBD')
def test_enrichTargetargument_target_should_enrih_target(self):
"""
Test enrichTarget(self, target)
"""
sfp = SpiderFootPlugin()
sfp.enrichTarget(None)
self.assertEqual('TBD', 'TBD')
def test_setTarget_should_set_a_target(self):
"""
Test setTarget(self, target)
"""
sfp = SpiderFootPlugin()
target = SpiderFootTarget("spiderfoot.net", "INTERNET_NAME")
sfp.setTarget(target)
get_target = sfp.getTarget().targetValue
self.assertIsInstance(get_target, str)
self.assertEqual("spiderfoot.net", get_target)
def test_setTarget_argument_target_invalid_type_should_raise_TypeError(self):
"""
Test setTarget(self, target)
"""
sfp = SpiderFootPlugin()
invalid_types = [None, "", list(), dict(), int()]
for invalid_type in invalid_types:
with self.subTest(invalid_type=invalid_type):
with self.assertRaises(TypeError):
sfp.setTarget(invalid_type)
def test_set_dbhargument_dbh_should_set_database_handle(self):
"""
Test setDbh(self, dbh)
"""
sfdb = SpiderFootDb(self.default_options, False)
sfp = SpiderFootPlugin()
sfp.setDbh(sfdb)
self.assertIsInstance(sfp.__sfdb__, SpiderFootDb)
def test_setScanId_argument_id_should_set_a_scan_id(self):
"""
Test setScanId(self, id)
"""
sfp = SpiderFootPlugin()
scan_id = '1234'
sfp.setScanId(scan_id)
get_scan_id = sfp.getScanId()
self.assertIsInstance(get_scan_id, str)
self.assertEqual(scan_id, get_scan_id)
def test_setScanId_argument_id_invalid_type_should_raise_TypeError(self):
"""
Test setScanId(self, id)
"""
sfp = SpiderFootPlugin()
invalid_types = [None, list(), dict(), int()]
for invalid_type in invalid_types:
with self.subTest(invalid_type=invalid_type):
with self.assertRaises(TypeError):
sfp.setScanId(invalid_type)
def test_getScanId_should_return_a_string(self):
"""
Test getScanId(self)
"""
sfp = SpiderFootPlugin()
scan_id = 'example scan id'
sfp.setScanId(scan_id)
get_scan_id = sfp.getScanId()
self.assertIsInstance(get_scan_id, str)
self.assertEqual(scan_id, get_scan_id)
def test_getScanId_unitialised_scanid_should_raise_TypeError(self):
"""
Test getScanId(self)
"""
sfp = SpiderFootPlugin()
with self.assertRaises(TypeError):
sfp.getScanId()
def test_getTarget_should_return_a_string(self):
"""
Test getTarget(self)
"""
sfp = SpiderFootPlugin()
target = SpiderFootTarget("spiderfoot.net", "INTERNET_NAME")
sfp.setTarget(target)
get_target = sfp.getTarget().targetValue
self.assertIsInstance(get_target, str)
self.assertEqual("spiderfoot.net", get_target)
def test_getTarget_unitialised_target_should_raise(self):
"""
Test getTarget(self)
"""
sfp = SpiderFootPlugin()
with self.assertRaises(TypeError):
sfp.getTarget()
def test_register_listener(self):
"""
Test registerListener(self, listener)
"""
sfp = SpiderFootPlugin()
sfp.registerListener(None)
self.assertEqual('TBD', 'TBD')
def test_setOutputFilter_should_set_output_filter(self):
"""
Test setOutputFilter(self, types)
"""
sfp = SpiderFootPlugin()
output_filter = "test filter"
sfp.setOutputFilter("test filter")
self.assertEqual(output_filter, sfp.__outputFilter__)
def test_tempStorage_should_return_a_dict(self):
"""
Test tempStorage(self)
"""
sfp = SpiderFootPlugin()
temp_storage = sfp.tempStorage()
self.assertIsInstance(temp_storage, dict)
def test_notifyListeners_should_notify_listener_modules(self):
"""
Test notifyListeners(self, sfEvent)
"""
sfp = SpiderFootPlugin()
sfdb = SpiderFootDb(self.default_options, False)
sfp.setDbh(sfdb)
event_type = 'ROOT'
event_data = 'test data'
module = 'test module'
source_event = None
evt = SpiderFootEvent(event_type, event_data, module, source_event)
sfp.notifyListeners(evt)
self.assertEqual('TBD', 'TBD')
def test_notifyListeners_output_filter_matched_should_notify_listener_modules(self):
"""
Test notifyListeners(self, sfEvent)
"""
sfp = SpiderFootPlugin()
sfdb = SpiderFootDb(self.default_options, False)
sfp.setDbh(sfdb)
target = SpiderFootTarget("spiderfoot.net", "INTERNET_NAME")
sfp.setTarget(target)
event_type = 'ROOT'
event_data = 'test data'
module = 'test module'
source_event = None
evt = SpiderFootEvent(event_type, event_data, module, source_event)
event_type = 'test event type'
event_data = 'test data'
module = 'test module'
source_event = evt
evt = SpiderFootEvent(event_type, event_data, module, source_event)
sfp.__outputFilter__ = event_type
sfp.notifyListeners(evt)
self.assertEqual('TBD', 'TBD')
def test_notifyListeners_output_filter_unmatched_should_not_notify_listener_modules(self):
"""
Test notifyListeners(self, sfEvent)
"""
sfp = SpiderFootPlugin()
sfdb = SpiderFootDb(self.default_options, False)
sfp.setDbh(sfdb)
target = SpiderFootTarget("spiderfoot.net", "INTERNET_NAME")
sfp.setTarget(target)
event_type = 'ROOT'
event_data = 'test data'
module = 'test module'
source_event = None
evt = SpiderFootEvent(event_type, event_data, module, source_event)
event_type = 'test event type'
event_data = 'test data'
module = 'test module'
source_event = evt
evt = SpiderFootEvent(event_type, event_data, module, source_event)
sfp.__outputFilter__ = "example unmatched event type"
sfp.notifyListeners(evt)
self.assertEqual('TBD', 'TBD')
def test_notifyListeners_event_type_and_data_same_as_source_event_source_event_should_story_only(self):
"""
Test notifyListeners(self, sfEvent)
"""
sfp = SpiderFootPlugin()
sfdb = SpiderFootDb(self.default_options, False)
sfp.setDbh(sfdb)
event_type = 'ROOT'
event_data = 'test data'
module = 'test module'
source_event = None
evt = SpiderFootEvent(event_type, event_data, module, source_event)
event_type = 'test event type'
event_data = 'test data'
module = 'test module'
source_event = evt
evt = SpiderFootEvent(event_type, event_data, module, source_event)
source_event = evt
evt = SpiderFootEvent(event_type, event_data, module, source_event)
source_event = evt
evt = SpiderFootEvent(event_type, event_data, module, source_event)
sfp.notifyListeners(evt)
self.assertEqual('TBD', 'TBD')
def test_notifyListeners_argument_sfEvent_invalid_event_should_raise_TypeError(self):
"""
Test notifyListeners(self, sfEvent)
"""
sfp = SpiderFootPlugin()
invalid_types = [None, "", list(), dict(), int()]
for invalid_type in invalid_types:
with self.subTest(invalid_type=invalid_type):
with self.assertRaises(TypeError):
sfp.notifyListeners(invalid_type)
def test_checkForStop(self):
"""
Test checkForStop(self)
"""
sfp = SpiderFootPlugin()
sfp.__sfdb__ = DatabaseStub()
sfp.__scanId__ = 'example scan id'
# pseudo-parameterized test
scan_statuses = [
(None, False),
("anything", False),
("RUNNING", False),
("ABORT-REQUESTED", True)
]
for status, expectedReturnValue in scan_statuses:
returnValue = sfp.checkForStop()
self.assertEqual(returnValue, expectedReturnValue, status)
def test_watchedEvents_should_return_a_list(self):
"""
Test watchedEvents(self)
"""
sfp = SpiderFootPlugin()
watched_events = sfp.watchedEvents()
self.assertIsInstance(watched_events, list)
def test_producedEvents_should_return_a_list(self):
"""
Test producedEvents(self)
"""
sfp = SpiderFootPlugin()
produced_events = sfp.producedEvents()
self.assertIsInstance(produced_events, list)
def test_handleEvent(self):
"""
Test handleEvent(self, sfEvent)
"""
event_type = 'ROOT'
event_data = 'example event data'
module = ''
source_event = ''
evt = SpiderFootEvent(event_type, event_data, module, source_event)
sfp = SpiderFootPlugin()
sfp.handleEvent(evt)
def test_start(self):
"""
Test start(self)
"""
sf = SpiderFoot(self.default_options)
sfp = SpiderFootPlugin()
sfp.sf = sf
sfp.start()
| 28.712366
| 107
| 0.614175
|
# test_spiderfootplugin.py
import pytest
import unittest
from sflib import SpiderFoot
from spiderfoot import SpiderFootDb, SpiderFootEvent, SpiderFootPlugin, SpiderFootTarget
@pytest.mark.usefixtures
class TestSpiderFootPlugin(unittest.TestCase):
"""
Test SpiderFoot
"""
def test_init(self):
"""
Test __init__(self)
"""
sfp = SpiderFootPlugin()
self.assertIsInstance(sfp, SpiderFootPlugin)
def test_updateSocket(self):
"""
Test _updateSocket(self, sock)
"""
sfp = SpiderFootPlugin()
sfp._updateSocket(None)
self.assertEqual('TBD', 'TBD')
def test_clearListeners(self):
"""
Test clearListeners(self)
"""
sfp = SpiderFootPlugin()
sfp.clearListeners()
self.assertEqual('TBD', 'TBD')
def test_setup(self):
"""
Test setup(self, sf, userOpts=dict())
"""
sfp = SpiderFootPlugin()
sfp.setup(None)
sfp.setup(None, None)
self.assertEqual('TBD', 'TBD')
def test_enrichTargetargument_target_should_enrih_target(self):
"""
Test enrichTarget(self, target)
"""
sfp = SpiderFootPlugin()
sfp.enrichTarget(None)
self.assertEqual('TBD', 'TBD')
def test_setTarget_should_set_a_target(self):
"""
Test setTarget(self, target)
"""
sfp = SpiderFootPlugin()
target = SpiderFootTarget("spiderfoot.net", "INTERNET_NAME")
sfp.setTarget(target)
get_target = sfp.getTarget().targetValue
self.assertIsInstance(get_target, str)
self.assertEqual("spiderfoot.net", get_target)
def test_setTarget_argument_target_invalid_type_should_raise_TypeError(self):
"""
Test setTarget(self, target)
"""
sfp = SpiderFootPlugin()
invalid_types = [None, "", list(), dict(), int()]
for invalid_type in invalid_types:
with self.subTest(invalid_type=invalid_type):
with self.assertRaises(TypeError):
sfp.setTarget(invalid_type)
def test_set_dbhargument_dbh_should_set_database_handle(self):
"""
Test setDbh(self, dbh)
"""
sfdb = SpiderFootDb(self.default_options, False)
sfp = SpiderFootPlugin()
sfp.setDbh(sfdb)
self.assertIsInstance(sfp.__sfdb__, SpiderFootDb)
def test_setScanId_argument_id_should_set_a_scan_id(self):
"""
Test setScanId(self, id)
"""
sfp = SpiderFootPlugin()
scan_id = '1234'
sfp.setScanId(scan_id)
get_scan_id = sfp.getScanId()
self.assertIsInstance(get_scan_id, str)
self.assertEqual(scan_id, get_scan_id)
def test_setScanId_argument_id_invalid_type_should_raise_TypeError(self):
"""
Test setScanId(self, id)
"""
sfp = SpiderFootPlugin()
invalid_types = [None, list(), dict(), int()]
for invalid_type in invalid_types:
with self.subTest(invalid_type=invalid_type):
with self.assertRaises(TypeError):
sfp.setScanId(invalid_type)
def test_getScanId_should_return_a_string(self):
"""
Test getScanId(self)
"""
sfp = SpiderFootPlugin()
scan_id = 'example scan id'
sfp.setScanId(scan_id)
get_scan_id = sfp.getScanId()
self.assertIsInstance(get_scan_id, str)
self.assertEqual(scan_id, get_scan_id)
def test_getScanId_unitialised_scanid_should_raise_TypeError(self):
"""
Test getScanId(self)
"""
sfp = SpiderFootPlugin()
with self.assertRaises(TypeError):
sfp.getScanId()
def test_getTarget_should_return_a_string(self):
"""
Test getTarget(self)
"""
sfp = SpiderFootPlugin()
target = SpiderFootTarget("spiderfoot.net", "INTERNET_NAME")
sfp.setTarget(target)
get_target = sfp.getTarget().targetValue
self.assertIsInstance(get_target, str)
self.assertEqual("spiderfoot.net", get_target)
def test_getTarget_unitialised_target_should_raise(self):
"""
Test getTarget(self)
"""
sfp = SpiderFootPlugin()
with self.assertRaises(TypeError):
sfp.getTarget()
def test_register_listener(self):
"""
Test registerListener(self, listener)
"""
sfp = SpiderFootPlugin()
sfp.registerListener(None)
self.assertEqual('TBD', 'TBD')
def test_setOutputFilter_should_set_output_filter(self):
"""
Test setOutputFilter(self, types)
"""
sfp = SpiderFootPlugin()
output_filter = "test filter"
sfp.setOutputFilter("test filter")
self.assertEqual(output_filter, sfp.__outputFilter__)
def test_tempStorage_should_return_a_dict(self):
"""
Test tempStorage(self)
"""
sfp = SpiderFootPlugin()
temp_storage = sfp.tempStorage()
self.assertIsInstance(temp_storage, dict)
def test_notifyListeners_should_notify_listener_modules(self):
"""
Test notifyListeners(self, sfEvent)
"""
sfp = SpiderFootPlugin()
sfdb = SpiderFootDb(self.default_options, False)
sfp.setDbh(sfdb)
event_type = 'ROOT'
event_data = 'test data'
module = 'test module'
source_event = None
evt = SpiderFootEvent(event_type, event_data, module, source_event)
sfp.notifyListeners(evt)
self.assertEqual('TBD', 'TBD')
def test_notifyListeners_output_filter_matched_should_notify_listener_modules(self):
"""
Test notifyListeners(self, sfEvent)
"""
sfp = SpiderFootPlugin()
sfdb = SpiderFootDb(self.default_options, False)
sfp.setDbh(sfdb)
target = SpiderFootTarget("spiderfoot.net", "INTERNET_NAME")
sfp.setTarget(target)
event_type = 'ROOT'
event_data = 'test data'
module = 'test module'
source_event = None
evt = SpiderFootEvent(event_type, event_data, module, source_event)
event_type = 'test event type'
event_data = 'test data'
module = 'test module'
source_event = evt
evt = SpiderFootEvent(event_type, event_data, module, source_event)
sfp.__outputFilter__ = event_type
sfp.notifyListeners(evt)
self.assertEqual('TBD', 'TBD')
def test_notifyListeners_output_filter_unmatched_should_not_notify_listener_modules(self):
"""
Test notifyListeners(self, sfEvent)
"""
sfp = SpiderFootPlugin()
sfdb = SpiderFootDb(self.default_options, False)
sfp.setDbh(sfdb)
target = SpiderFootTarget("spiderfoot.net", "INTERNET_NAME")
sfp.setTarget(target)
event_type = 'ROOT'
event_data = 'test data'
module = 'test module'
source_event = None
evt = SpiderFootEvent(event_type, event_data, module, source_event)
event_type = 'test event type'
event_data = 'test data'
module = 'test module'
source_event = evt
evt = SpiderFootEvent(event_type, event_data, module, source_event)
sfp.__outputFilter__ = "example unmatched event type"
sfp.notifyListeners(evt)
self.assertEqual('TBD', 'TBD')
def test_notifyListeners_event_type_and_data_same_as_source_event_source_event_should_story_only(self):
"""
Test notifyListeners(self, sfEvent)
"""
sfp = SpiderFootPlugin()
sfdb = SpiderFootDb(self.default_options, False)
sfp.setDbh(sfdb)
event_type = 'ROOT'
event_data = 'test data'
module = 'test module'
source_event = None
evt = SpiderFootEvent(event_type, event_data, module, source_event)
event_type = 'test event type'
event_data = 'test data'
module = 'test module'
source_event = evt
evt = SpiderFootEvent(event_type, event_data, module, source_event)
source_event = evt
evt = SpiderFootEvent(event_type, event_data, module, source_event)
source_event = evt
evt = SpiderFootEvent(event_type, event_data, module, source_event)
sfp.notifyListeners(evt)
self.assertEqual('TBD', 'TBD')
def test_notifyListeners_argument_sfEvent_invalid_event_should_raise_TypeError(self):
"""
Test notifyListeners(self, sfEvent)
"""
sfp = SpiderFootPlugin()
invalid_types = [None, "", list(), dict(), int()]
for invalid_type in invalid_types:
with self.subTest(invalid_type=invalid_type):
with self.assertRaises(TypeError):
sfp.notifyListeners(invalid_type)
def test_checkForStop(self):
"""
Test checkForStop(self)
"""
sfp = SpiderFootPlugin()
class DatabaseStub:
def scanInstanceGet(self, scanId):
return [None, None, None, None, None, status]
sfp.__sfdb__ = DatabaseStub()
sfp.__scanId__ = 'example scan id'
# pseudo-parameterized test
scan_statuses = [
(None, False),
("anything", False),
("RUNNING", False),
("ABORT-REQUESTED", True)
]
for status, expectedReturnValue in scan_statuses:
returnValue = sfp.checkForStop()
self.assertEqual(returnValue, expectedReturnValue, status)
def test_watchedEvents_should_return_a_list(self):
"""
Test watchedEvents(self)
"""
sfp = SpiderFootPlugin()
watched_events = sfp.watchedEvents()
self.assertIsInstance(watched_events, list)
def test_producedEvents_should_return_a_list(self):
"""
Test producedEvents(self)
"""
sfp = SpiderFootPlugin()
produced_events = sfp.producedEvents()
self.assertIsInstance(produced_events, list)
def test_handleEvent(self):
"""
Test handleEvent(self, sfEvent)
"""
event_type = 'ROOT'
event_data = 'example event data'
module = ''
source_event = ''
evt = SpiderFootEvent(event_type, event_data, module, source_event)
sfp = SpiderFootPlugin()
sfp.handleEvent(evt)
def test_start(self):
"""
Test start(self)
"""
sf = SpiderFoot(self.default_options)
sfp = SpiderFootPlugin()
sfp.sf = sf
sfp.start()
| 75
| -2
| 65
|
7c20b7d1b3b1ead52022d57946dc953d553868a4
| 4,989
|
py
|
Python
|
pytorch_lightning/utilities/seed.py
|
GabrielePicco/pytorch-lightning
|
0d6dfd42d8965347a258e3d20e83bddd344e718f
|
[
"Apache-2.0"
] | 1
|
2022-01-08T14:06:36.000Z
|
2022-01-08T14:06:36.000Z
|
pytorch_lightning/utilities/seed.py
|
GabrielePicco/pytorch-lightning
|
0d6dfd42d8965347a258e3d20e83bddd344e718f
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/utilities/seed.py
|
GabrielePicco/pytorch-lightning
|
0d6dfd42d8965347a258e3d20e83bddd344e718f
|
[
"Apache-2.0"
] | 1
|
2022-01-08T14:06:27.000Z
|
2022-01-08T14:06:27.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to help with reproducibility of models. """
import logging
import os
import random
from typing import Optional
import numpy as np
import torch
from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_7, rank_zero_warn
from pytorch_lightning.utilities.distributed import rank_zero_only
log = logging.getLogger(__name__)
def seed_everything(seed: Optional[int] = None, workers: bool = False) -> int:
"""
Function that sets seed for pseudo-random number generators in:
pytorch, numpy, python.random
In addition, sets the following environment variables:
- `PL_GLOBAL_SEED`: will be passed to spawned subprocesses (e.g. ddp_spawn backend).
- `PL_SEED_WORKERS`: (optional) is set to 1 if ```workers=True``.
Args:
seed: the integer value seed for global random state in Lightning.
If `None`, will read seed from `PL_GLOBAL_SEED` env variable
or select it randomly.
workers: if set to ``True``, will properly configure all dataloaders passed to the
Trainer with a ``worker_init_fn``. If the user already provides such a function
for their dataloaders, setting this argument will have no influence. See also:
:func:`~pytorch_lightning.utilities.seed.pl_worker_init_function`.
"""
max_seed_value = np.iinfo(np.uint32).max
min_seed_value = np.iinfo(np.uint32).min
try:
if seed is None:
seed = os.environ.get("PL_GLOBAL_SEED")
seed = int(seed)
except (TypeError, ValueError):
seed = _select_seed_randomly(min_seed_value, max_seed_value)
rank_zero_warn(f"No correct seed found, seed set to {seed}")
if not (min_seed_value <= seed <= max_seed_value):
rank_zero_warn(f"{seed} is not in bounds, numpy accepts from {min_seed_value} to {max_seed_value}")
seed = _select_seed_randomly(min_seed_value, max_seed_value)
# using `log.info` instead of `rank_zero_info`,
# so users can verify the seed is properly set in distributed training.
log.info(f"Global seed set to {seed}")
os.environ["PL_GLOBAL_SEED"] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ["PL_SEED_WORKERS"] = f"{int(workers)}"
return seed
def reset_seed() -> None:
"""
Reset the seed to the value that :func:`pytorch_lightning.utilities.seed.seed_everything` previously set.
If :func:`pytorch_lightning.utilities.seed.seed_everything` is unused, this function will do nothing.
"""
seed = os.environ.get("PL_GLOBAL_SEED", None)
workers = os.environ.get("PL_SEED_WORKERS", False)
if seed is not None:
seed_everything(int(seed), workers=bool(workers))
def pl_worker_init_function(worker_id: int, rank: Optional = None) -> None: # pragma: no cover
"""
The worker_init_fn that Lightning automatically adds to your dataloader if you previously set
set the seed with ``seed_everything(seed, workers=True)``.
See also the PyTorch documentation on
`randomness in DataLoaders <https://pytorch.org/docs/stable/notes/randomness.html#dataloader>`_.
"""
# implementation notes: https://github.com/pytorch/pytorch/issues/5059#issuecomment-817392562
global_rank = rank if rank is not None else rank_zero_only.rank
process_seed = torch.initial_seed()
# back out the base seed so we can use all the bits
base_seed = process_seed - worker_id
log.debug(
f'Initializing random number generators of process {global_rank} worker {worker_id} with base seed {base_seed}'
)
ss = np.random.SeedSequence([base_seed, worker_id, global_rank])
# use 128 bits (4 x 32-bit words)
np.random.seed(ss.generate_state(4))
# Spawn distinct SeedSequences for the PyTorch PRNG and the stdlib random module
torch_ss, stdlib_ss = ss.spawn(2)
# PyTorch 1.7 and above takes a 64-bit seed
dtype = np.uint64 if _TORCH_GREATER_EQUAL_1_7 else np.uint32
torch.manual_seed(torch_ss.generate_state(1, dtype=dtype)[0])
# use 128 bits expressed as an integer
stdlib_seed = (stdlib_ss.generate_state(2, dtype=np.uint64).astype(object) * [1 << 64, 1]).sum()
random.seed(stdlib_seed)
| 42.279661
| 119
| 0.719583
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to help with reproducibility of models. """
import logging
import os
import random
from typing import Optional
import numpy as np
import torch
from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_7, rank_zero_warn
from pytorch_lightning.utilities.distributed import rank_zero_only
log = logging.getLogger(__name__)
def seed_everything(seed: Optional[int] = None, workers: bool = False) -> int:
"""
Function that sets seed for pseudo-random number generators in:
pytorch, numpy, python.random
In addition, sets the following environment variables:
- `PL_GLOBAL_SEED`: will be passed to spawned subprocesses (e.g. ddp_spawn backend).
- `PL_SEED_WORKERS`: (optional) is set to 1 if ```workers=True``.
Args:
seed: the integer value seed for global random state in Lightning.
If `None`, will read seed from `PL_GLOBAL_SEED` env variable
or select it randomly.
workers: if set to ``True``, will properly configure all dataloaders passed to the
Trainer with a ``worker_init_fn``. If the user already provides such a function
for their dataloaders, setting this argument will have no influence. See also:
:func:`~pytorch_lightning.utilities.seed.pl_worker_init_function`.
"""
max_seed_value = np.iinfo(np.uint32).max
min_seed_value = np.iinfo(np.uint32).min
try:
if seed is None:
seed = os.environ.get("PL_GLOBAL_SEED")
seed = int(seed)
except (TypeError, ValueError):
seed = _select_seed_randomly(min_seed_value, max_seed_value)
rank_zero_warn(f"No correct seed found, seed set to {seed}")
if not (min_seed_value <= seed <= max_seed_value):
rank_zero_warn(f"{seed} is not in bounds, numpy accepts from {min_seed_value} to {max_seed_value}")
seed = _select_seed_randomly(min_seed_value, max_seed_value)
# using `log.info` instead of `rank_zero_info`,
# so users can verify the seed is properly set in distributed training.
log.info(f"Global seed set to {seed}")
os.environ["PL_GLOBAL_SEED"] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ["PL_SEED_WORKERS"] = f"{int(workers)}"
return seed
def _select_seed_randomly(min_seed_value: int = 0, max_seed_value: int = 255) -> int:
return random.randint(min_seed_value, max_seed_value)
def reset_seed() -> None:
"""
Reset the seed to the value that :func:`pytorch_lightning.utilities.seed.seed_everything` previously set.
If :func:`pytorch_lightning.utilities.seed.seed_everything` is unused, this function will do nothing.
"""
seed = os.environ.get("PL_GLOBAL_SEED", None)
workers = os.environ.get("PL_SEED_WORKERS", False)
if seed is not None:
seed_everything(int(seed), workers=bool(workers))
def pl_worker_init_function(worker_id: int, rank: Optional = None) -> None: # pragma: no cover
"""
The worker_init_fn that Lightning automatically adds to your dataloader if you previously set
set the seed with ``seed_everything(seed, workers=True)``.
See also the PyTorch documentation on
`randomness in DataLoaders <https://pytorch.org/docs/stable/notes/randomness.html#dataloader>`_.
"""
# implementation notes: https://github.com/pytorch/pytorch/issues/5059#issuecomment-817392562
global_rank = rank if rank is not None else rank_zero_only.rank
process_seed = torch.initial_seed()
# back out the base seed so we can use all the bits
base_seed = process_seed - worker_id
log.debug(
f'Initializing random number generators of process {global_rank} worker {worker_id} with base seed {base_seed}'
)
ss = np.random.SeedSequence([base_seed, worker_id, global_rank])
# use 128 bits (4 x 32-bit words)
np.random.seed(ss.generate_state(4))
# Spawn distinct SeedSequences for the PyTorch PRNG and the stdlib random module
torch_ss, stdlib_ss = ss.spawn(2)
# PyTorch 1.7 and above takes a 64-bit seed
dtype = np.uint64 if _TORCH_GREATER_EQUAL_1_7 else np.uint32
torch.manual_seed(torch_ss.generate_state(1, dtype=dtype)[0])
# use 128 bits expressed as an integer
stdlib_seed = (stdlib_ss.generate_state(2, dtype=np.uint64).astype(object) * [1 << 64, 1]).sum()
random.seed(stdlib_seed)
| 122
| 0
| 23
|
97928beddf03c0eb483c6b3faad2fde0285120b9
| 4,790
|
py
|
Python
|
src/experiments/train_common.py
|
prakashchhipa/Depth-Contrast-Self-Supervised-Method
|
c68f2ea85063be3a63216985fbe806621174889b
|
[
"Apache-2.0"
] | null | null | null |
src/experiments/train_common.py
|
prakashchhipa/Depth-Contrast-Self-Supervised-Method
|
c68f2ea85063be3a63216985fbe806621174889b
|
[
"Apache-2.0"
] | null | null | null |
src/experiments/train_common.py
|
prakashchhipa/Depth-Contrast-Self-Supervised-Method
|
c68f2ea85063be3a63216985fbe806621174889b
|
[
"Apache-2.0"
] | null | null | null |
from distutils.log import error
import errno
import numpy as np
import json
import argparse
import time
from tqdm import tqdm
import cv2
import logging
import sys, os
import torch
import torchvision
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.transforms import transforms
from sklearn.metrics import f1_score
from sampler import BalancedBatchSampler
from models import Resnext_Model, Densenet_Model
from dataloader import MBV_Dataset
sys.path.append(os.path.dirname(__file__))
from train_util import Train_Util
sys.path.append(os.path.dirname(__file__))
from utils import *
from mbv_config import MBV_Config
import mbv_config
import argparse
if __name__ == "__main__":
train()
| 41.293103
| 320
| 0.746764
|
from distutils.log import error
import errno
import numpy as np
import json
import argparse
import time
from tqdm import tqdm
import cv2
import logging
import sys, os
import torch
import torchvision
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.transforms import transforms
from sklearn.metrics import f1_score
from sampler import BalancedBatchSampler
from models import Resnext_Model, Densenet_Model
from dataloader import MBV_Dataset
sys.path.append(os.path.dirname(__file__))
from train_util import Train_Util
sys.path.append(os.path.dirname(__file__))
from utils import *
from mbv_config import MBV_Config
import mbv_config
import argparse
def train():
parser = argparse.ArgumentParser(description='PyTorch MBV Training')
parser.add_argument('--lr', default=0.00001, type=float, help='learning rate')
parser.add_argument('--wd', default=5e-3, type=float, help='weight decay')
parser.add_argument('--architecture', default="resnext", type=str, help='architecture - resnext | densenet')
parser.add_argument('--machine', default=7, type=int, help='define gpu no.')
parser.add_argument('--patience', default=10, type=int, help='patience for learning rate change')
parser.add_argument('--batch_size', default=16, type=int, help='batch size')
parser.add_argument('--input_size', default=225, type=int, help='input image')
parser.add_argument('--epochs', default=100, type=int, help='epochs')
parser.add_argument('--description', default="fine_tune", type=str, help='experiment name | description')
parser.add_argument('--data_path', default="fine_tune", type=str, help=' path for data of specifc fold - Fold 0|1|2|3|4 ')
args = parser.parse_args()
batch_size = args.batch_size
image_size = args.input_size
LR = args.lr
patience = args.patience
weight_decay = args.wd
fold_root = args.data_path
device = torch.device(f"cuda:{args.machine}")
epochs = args.epochs
experiment_description = args.description
architecture = args.architecture
raw_train_transform = transforms.Compose([
transforms.RandomCrop((image_size,image_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485], std=[0.229])
])
ref_train_transform = transforms.Compose([
transforms.RandomCrop((image_size,image_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485], std=[0.229])
])
val_transform = transforms.Compose([
transforms.Resize((image_size*4,image_size*4)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485], std=[0.229])
])
train_raw_image_file = fold_root + 'X_raw_train.npy'
train_ref_image_file = fold_root + 'X_ref_train.npy'
train_label_file = fold_root + 'Y_train.npy'
val_raw_image_file = fold_root + 'X_raw_val.npy'
val_ref_image_file = fold_root + 'X_ref_val.npy'
val_label_file = fold_root + 'Y_val.npy'
train_dataset = MBV_Dataset(raw_train_file_path = train_raw_image_file , reflectance_train_file_path=train_ref_image_file, label_file_path=train_label_file, transform= [raw_train_transform, ref_train_transform])
train_loader = DataLoader(train_dataset, batch_size = batch_size, shuffle=True, sampler=None) #, sampler = BalancedBatchSampler(train_dataset)
val_dataset = MBV_Dataset(raw_train_file_path = val_raw_image_file , reflectance_train_file_path=val_ref_image_file, label_file_path=val_label_file, transform= [val_transform])
val_loader = DataLoader(val_dataset, batch_size = batch_size, shuffle=False, sampler=None)
if architecture == "resnext":
downstream_task_model = Resnext_Model( pretrained=True)
elif architecture == "densenet":
downstream_task_model = Densenet_Model(pretrained=True)
else:
raise error ("invalid architecture name")
downstream_task_model = downstream_task_model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(downstream_task_model.parameters(), lr=LR, weight_decay= weight_decay)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.1 ,patience=patience, min_lr= 5e-3)
writer = SummaryWriter(log_dir=mbv_config.tensorboard_path+experiment_description)
train_util = Train_Util(experiment_description = experiment_description,image_type=mbv_config.image_both, epochs = epochs, model=downstream_task_model, device=device, train_loader=train_loader, val_loader=val_loader, optimizer=optimizer, criterion=criterion, batch_size=batch_size,scheduler=scheduler, writer=writer)
train_util.train_and_evaluate()
if __name__ == "__main__":
train()
| 3,970
| 0
| 23
|
b36a0b7dd2d9576d02a33fbac3594480be303cda
| 1,140
|
py
|
Python
|
spplib/cli/commands/cmd_vm.py
|
sppautomation/sppclient
|
d6d5f2c27877f03f68ef2716b204503b7764fe75
|
[
"Apache-2.0"
] | 7
|
2018-06-01T21:53:35.000Z
|
2021-06-02T13:50:56.000Z
|
spplib/cli/commands/cmd_vm.py
|
sppautomation/sppclient
|
d6d5f2c27877f03f68ef2716b204503b7764fe75
|
[
"Apache-2.0"
] | 7
|
2018-11-29T05:15:26.000Z
|
2021-03-31T13:39:30.000Z
|
spplib/cli/commands/cmd_vm.py
|
sppautomation/sppclient
|
d6d5f2c27877f03f68ef2716b204503b7764fe75
|
[
"Apache-2.0"
] | 8
|
2018-05-08T16:07:09.000Z
|
2020-04-05T09:39:39.000Z
|
import json
import click
from tabulate import tabulate
from spplib.cli import util
@click.group()
@util.pass_context
def cli(ctx, **kwargs):
"""VM resource.
"""
pass
@cli.command()
@util.pass_context
@click.argument('pattern')
| 22.8
| 95
| 0.647368
|
import json
import click
from tabulate import tabulate
from spplib.cli import util
@click.group()
@util.pass_context
def cli(ctx, **kwargs):
"""VM resource.
"""
pass
def get_sla_info(spp_session):
sla_policies = spp_session.get(restype='sla')['slapolicies']
return dict([(int(x['id']), x['name']) for x in sla_policies])
@cli.command()
@util.pass_context
@click.argument('pattern')
def search(ctx, pattern):
if not pattern:
raise Exception('VM pattern is required. ')
qparams = {'resourceType': 'vm', 'from': 'hlo', 'pageSize': '500'}
data = {'name': pattern, 'hypervisorType': 'vmware'}
sla_info = get_sla_info(ctx.spp_session)
resp = ctx.spp_session.post(restype='hypervisor', path='search', data=data, params=qparams)
table_info = []
for vm in resp['vms']:
sla_ids = set([x['storageProfileId'] for x in vm['copies']])
slas = set([sla_info[sla_id] for sla_id in sla_ids])
table_info.append((vm['name'], slas))
if not table_info:
return
print
click.echo_via_pager(tabulate(table_info, headers=["Name", "SLAs"]))
print
| 847
| 0
| 46
|
2b8daba4bc8dba84de9f41bfcd785d9de888127b
| 344
|
py
|
Python
|
immobilien/admin.py
|
sahin88/Django_Rest_Framework_Redux_React_Estate_App_FullStack
|
10e31c4071bcebc0e4401f42084211d170b2ea56
|
[
"Unlicense"
] | null | null | null |
immobilien/admin.py
|
sahin88/Django_Rest_Framework_Redux_React_Estate_App_FullStack
|
10e31c4071bcebc0e4401f42084211d170b2ea56
|
[
"Unlicense"
] | null | null | null |
immobilien/admin.py
|
sahin88/Django_Rest_Framework_Redux_React_Estate_App_FullStack
|
10e31c4071bcebc0e4401f42084211d170b2ea56
|
[
"Unlicense"
] | null | null | null |
from django.contrib import admin
from .models import immobilien
admin.site.register(immobilien, immobilienAdmin)
# Register your models here.
| 21.5
| 61
| 0.718023
|
from django.contrib import admin
from .models import immobilien
class immobilienAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'date_joined', 'topseller')
list_display_links = ('id', 'name')
search_fields = ('name',)
list_per_page = 24
admin.site.register(immobilien, immobilienAdmin)
# Register your models here.
| 0
| 175
| 23
|
d74b93e88cabf9aa66652003dbd1b9b073dd3701
| 289
|
py
|
Python
|
css/email.py
|
Pipefehecar/repo_prueba
|
2d1f8c531e40dc3071d17da1cdb1dbfb98448537
|
[
"Apache-2.0"
] | 2
|
2018-04-13T02:04:25.000Z
|
2018-04-13T02:04:27.000Z
|
css/email.py
|
Pipefehecar/repo_prueba
|
2d1f8c531e40dc3071d17da1cdb1dbfb98448537
|
[
"Apache-2.0"
] | null | null | null |
css/email.py
|
Pipefehecar/repo_prueba
|
2d1f8c531e40dc3071d17da1cdb1dbfb98448537
|
[
"Apache-2.0"
] | null | null | null |
class Alarma():
"""clase del objeto alarma"""
| 16.055556
| 30
| 0.698962
|
class Alarma():
"""clase del objeto alarma"""
def __init__(self,fecha):
global fecha, destinatarios
self.fecha = fecha
pass
def adjuntar_archivo():
pass
def agregar_borrador():
pass
def corregir_error():
pass
def leer_email():
pass
def verificar_recibido():
pass
| 102
| 0
| 138
|
2feb18d20046f1329505a5aa39be610306b9d37c
| 1,339
|
py
|
Python
|
nova/compute/opts.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/compute/opts.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/compute/opts.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | 2
|
2017-07-20T17:31:34.000Z
|
2020-07-24T02:42:19.000Z
|
begin_unit
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may not'
nl|'\n'
comment|'# use this file except in compliance with the License. You may obtain a copy'
nl|'\n'
comment|'# of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS,'
nl|'\n'
comment|'# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.'
nl|'\n'
comment|'# See the License for the specific language governing permissions and'
nl|'\n'
comment|'# limitations under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'nova'
op|'.'
name|'compute'
op|'.'
name|'flavors'
newline|'\n'
name|'import'
name|'nova'
op|'.'
name|'compute'
op|'.'
name|'monitors'
newline|'\n'
name|'import'
name|'nova'
op|'.'
name|'conf'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|list_opts
name|'def'
name|'list_opts'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
nl|'\n'
op|'('
string|"'DEFAULT'"
op|','
nl|'\n'
name|'nova'
op|'.'
name|'compute'
op|'.'
name|'flavors'
op|'.'
name|'flavor_opts'
op|','
nl|'\n'
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
dedent|''
endmarker|''
end_unit
| 17.166667
| 87
| 0.655713
|
begin_unit
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may not'
nl|'\n'
comment|'# use this file except in compliance with the License. You may obtain a copy'
nl|'\n'
comment|'# of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS,'
nl|'\n'
comment|'# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.'
nl|'\n'
comment|'# See the License for the specific language governing permissions and'
nl|'\n'
comment|'# limitations under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'nova'
op|'.'
name|'compute'
op|'.'
name|'flavors'
newline|'\n'
name|'import'
name|'nova'
op|'.'
name|'compute'
op|'.'
name|'monitors'
newline|'\n'
name|'import'
name|'nova'
op|'.'
name|'conf'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|list_opts
name|'def'
name|'list_opts'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
nl|'\n'
op|'('
string|"'DEFAULT'"
op|','
nl|'\n'
name|'nova'
op|'.'
name|'compute'
op|'.'
name|'flavors'
op|'.'
name|'flavor_opts'
op|','
nl|'\n'
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
dedent|''
endmarker|''
end_unit
| 0
| 0
| 0
|
e003d37ef95a2f23468b50954b5d783dca79c69a
| 7,620
|
py
|
Python
|
scratch/rowe_model_opt3.py
|
Lefebvrelab/SpectralNeuralModels
|
35d34095f573713ea8beb061f0ec13929ce579e9
|
[
"MIT"
] | null | null | null |
scratch/rowe_model_opt3.py
|
Lefebvrelab/SpectralNeuralModels
|
35d34095f573713ea8beb061f0ec13929ce579e9
|
[
"MIT"
] | 8
|
2018-05-03T16:09:01.000Z
|
2019-04-20T18:44:46.000Z
|
scratch/rowe_model_opt3.py
|
Lefebvrelab/SpectralNeuralModels
|
35d34095f573713ea8beb061f0ec13929ce579e9
|
[
"MIT"
] | null | null | null |
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from numpy import pi,abs,exp,log,log10
from scipy import optimize
class RoweOptimization():
'''
Optimizing the Rowe Model onto a training set. The key parameters to adjust
are as follows:
- G_ee
- G_ei
- G_ese
- G_esre
- G_srs
- alpha
- beta
- t0
- A_EMG
- f_EMG
'''
def optimize(self, param_list, tol=None):
'''
Fits the model using the listed parameters.
'''
# Define the function w.r.t. the parameters. The vector P has the same
# length as params, with 1-1 coordinate correspondance.
EEG_fun = lambda P: self.mod.update_and_compute_P(P, param_list, self.freqs)
chi_fun = lambda P: sum(((EEG_fun(P) - self.output) / self.output)**2)
# Get initial parameter values
P0 = []
for j in range(len(param_list)):
P0.append(getattr(self.mod, param_list[j]))
P0 = np.array(P0)
# Obtain the bounds for the optimization procedure w.r.t. the selected
# parameters.
bounds_list = []
for k in range(len(param_list)):
bound_attr_str = 'bound_' + param_list[k]
# Check if model has the bound attribute.
if not hasattr(self.mod, bound_attr_str):
bounds_list.append((None,None))
else:
bounds_list.append(tuple(getattr(self.mod, bound_attr_str)))
bounds_tuple = tuple(bounds_list)
# Initiate the optimization
result = optimize.minimize(chi_fun, P0, bounds=bounds_list, tol=tol)
return result
if __name__ == '__main__':
task = 'optimize'
if task == 'optimize':
# Get training data
text_file = np.loadtxt('EEG_data.csv', skiprows=1, delimiter=',')
freqs = text_file[1:,0]
powers = text_file[1:,1]*10**24
N = min(len(freqs), len(powers))
train_data = [(freqs[k], powers[k]) for k in range(N)]
rowe_opt = RoweOptimization(train=train_data)
param_list = ['G_ee',
'G_ei',
'G_ese',
'G_esre',
'G_srs',
'alpha',
'beta',
't0',
'A_EMG'
]
result = rowe_opt.optimize(param_list, tol=5)
model_powers = rowe_opt.mod.compute_P(freqs)
plt.plot(freqs, powers, 'r--',
freqs, model_powers)
plt.show()
elif task == 'graph':
freqs = np.linspace(0.2,100, num=50)
mod = Rowe2015Model()
EEG = mod.compute_P(freqs) - mod.compute_P_EEG(freqs)
df_EEG = pd.DataFrame(np.squeeze(EEG))
df_EEG.abs().plot(logx=True,logy=True)
| 28.973384
| 89
| 0.465354
|
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from numpy import pi,abs,exp,log,log10
from scipy import optimize
class Rowe2015Model():
def __init__(self):
# Constants
self.gamma_e = 116 # s^-1
self.r_e = 86 # mm
self.Q_max = 340 # s^-1
self.theta = 12.9 # mV
self.sigma = 3.8 #mV
self.phi_n = 10**-5 # s^-1
self.k0 = 10
self.G_rs = 0.1 # from Abeysuria 2015
self.G_re = 0.2 # from Abeysuria 2015
self.G_sn = 1 # Random <3 John
self.l_x = self.l_y = 0.5
self.fmax = 50
self.freq_min = 5.
self.freq_max = 100.
# Variable parameters
self.G_ee = 5.4
self.G_ei = -7.
self.G_ese = 5.6
self.G_esre = -2.8
self.G_srs = -0.6
self.alpha = 75 #s^-1
self.beta = 75*3.8 #s^-1
self.t0 = 84 # ms
self.A_EMG = 0.5E-12 #s^-1
self.f_EMG = 40 # Hz
# Variable bounds
self.bound_G_ee = [0., 20.]
self.bound_G_ei = [-40., 0.]
self.bound_G_ese = [0., 40.]
self.bound_G_esre = [-40., 0.]
self.bound_G_srs = [-14., -0.1]
self.bound_alpha = [10., 100.]
self.bound_beta = [100., 800.]
self.bound_t0 = [75., 140.]
self.bound_A_EMG = [0., 1E-12]
self.bound_f_EMG = [10., 50.]
def compute_L(self, omega):
alpha, beta = self.alpha, self.beta
L = (1 - 1j*omega/alpha)**-1 * (1 - 1j*omega/beta)**-1
return L
def compute_q2r2(self, omega):
gamma_e = self.gamma_e
G_ei, G_ee = self.G_ei, self.G_ee
G_ese, G_esre, G_srs = self.G_ese, self.G_esre, self.G_srs
t0 = self.t0
L = self.compute_L(omega)
term1 = (1 - 1j*omega / gamma_e)**2
coeff2 = (1 - G_ei * L)**-1
term2_1 = L * G_ee
term2_2 = (L**2 * G_ese + L**3 * G_esre) * exp(1j*omega*t0) / (1 - L**2 * G_srs)
term2 = term2_1 + term2_2
q2r2 = term1 - coeff2 * term2
return q2r2
def compute_k2r2(self, m, n):
k_x = 2*pi*m / self.l_x
k_y = 2*pi*n / self.l_y
k2r2 = (k_x**2 + k_y**2)*self.r_e**2
return k2r2
def compute_P_EEG(self, omega):
G_ei, G_ee = self.G_ei, self.G_ee
G_ese, G_esre, G_srs = self.G_ese, self.G_esre, self.G_srs
t0 = self.t0
r_e = self.r_e
k0 = self.k0
phi_n = self.phi_n
# Other Gs
G_sr = G_srs / self.G_rs
G_es = G_esre / (G_sr * self.G_re)
G_sn = self.G_sn
L = self.compute_L(omega)
q2r2 = self.compute_q2r2(omega)
term1 = G_es * G_sn * phi_n * L**2 * exp(1j*omega*t0/2)
term2 = (1 - G_srs * L**2) * (1 - G_ei * L)
term3 = 0
k_x = 2 * pi / self.l_x
k_y = 2 * pi / self.l_y
fmax = self.fmax
for m in np.arange(-fmax,fmax):
for n in np.arange(-fmax,fmax):
k2r2 = self.compute_k2r2(m,n)
k2 = k2r2 / r_e
Fk = exp(-k2 / k0**2)
term3 += abs(k2r2 + q2r2)**-2 * Fk * k_x * k_y
P_EEG = abs(term1)**2 * abs(term2)**2 * term3
return P_EEG
def compute_P(self, omega):
'''
Compute the power spectrum.
'''
A_EMG, f_EMG = self.A_EMG, self.f_EMG
mod_omega = omega / (2 * pi * f_EMG)
P_EMG = A_EMG * (mod_omega)**2 / (1 + mod_omega**2)**2
P_EEG = self.compute_P_EEG(omega)
return P_EEG + P_EMG
def update_and_compute_P(self, values, param_list, omega):
N = min(len(values), len(param_list))
for k in range(N):
setattr(self, param_list[k], values[k])
return self.compute_P(omega)
class RoweOptimization():
'''
Optimizing the Rowe Model onto a training set. The key parameters to adjust
are as follows:
- G_ee
- G_ei
- G_ese
- G_esre
- G_srs
- alpha
- beta
- t0
- A_EMG
- f_EMG
'''
def __init__(self, train=[]):
self.train = train
# Get frequencies
self.freqs = np.array([train[k][0] for k in range(len(train))])
self.output = np.array([train[k][1] for k in range(len(train))])
self.mod = Rowe2015Model()
def optimize(self, param_list, tol=None):
'''
Fits the model using the listed parameters.
'''
# Define the function w.r.t. the parameters. The vector P has the same
# length as params, with 1-1 coordinate correspondance.
EEG_fun = lambda P: self.mod.update_and_compute_P(P, param_list, self.freqs)
chi_fun = lambda P: sum(((EEG_fun(P) - self.output) / self.output)**2)
# Get initial parameter values
P0 = []
for j in range(len(param_list)):
P0.append(getattr(self.mod, param_list[j]))
P0 = np.array(P0)
# Obtain the bounds for the optimization procedure w.r.t. the selected
# parameters.
bounds_list = []
for k in range(len(param_list)):
bound_attr_str = 'bound_' + param_list[k]
# Check if model has the bound attribute.
if not hasattr(self.mod, bound_attr_str):
bounds_list.append((None,None))
else:
bounds_list.append(tuple(getattr(self.mod, bound_attr_str)))
bounds_tuple = tuple(bounds_list)
# Initiate the optimization
result = optimize.minimize(chi_fun, P0, bounds=bounds_list, tol=tol)
return result
if __name__ == '__main__':
task = 'optimize'
if task == 'optimize':
# Get training data
text_file = np.loadtxt('EEG_data.csv', skiprows=1, delimiter=',')
freqs = text_file[1:,0]
powers = text_file[1:,1]*10**24
N = min(len(freqs), len(powers))
train_data = [(freqs[k], powers[k]) for k in range(N)]
rowe_opt = RoweOptimization(train=train_data)
param_list = ['G_ee',
'G_ei',
'G_ese',
'G_esre',
'G_srs',
'alpha',
'beta',
't0',
'A_EMG'
]
result = rowe_opt.optimize(param_list, tol=5)
model_powers = rowe_opt.mod.compute_P(freqs)
plt.plot(freqs, powers, 'r--',
freqs, model_powers)
plt.show()
elif task == 'graph':
freqs = np.linspace(0.2,100, num=50)
mod = Rowe2015Model()
EEG = mod.compute_P(freqs) - mod.compute_P_EEG(freqs)
df_EEG = pd.DataFrame(np.squeeze(EEG))
df_EEG.abs().plot(logx=True,logy=True)
| 3,761
| 611
| 57
|
d3530afd4b1ee97d2fb3a9b0e9d548a215b5bb8c
| 1,384
|
py
|
Python
|
src/graph_visualize.py
|
sawyerWeld/PrefGAN
|
295a165947fc300b2dd0754607076c780fbcafa1
|
[
"MIT"
] | null | null | null |
src/graph_visualize.py
|
sawyerWeld/PrefGAN
|
295a165947fc300b2dd0754607076c780fbcafa1
|
[
"MIT"
] | null | null | null |
src/graph_visualize.py
|
sawyerWeld/PrefGAN
|
295a165947fc300b2dd0754607076c780fbcafa1
|
[
"MIT"
] | null | null | null |
#graph_visualize.py
from graphviz import Digraph
import pairwise
# Given a vector of the form generated in pairwise.py for
# easy reading into NNs, produce a diagram of the represented graph
| 32.952381
| 81
| 0.609827
|
#graph_visualize.py
from graphviz import Digraph
import pairwise
# Given a vector of the form generated in pairwise.py for
# easy reading into NNs, produce a diagram of the represented graph
def vec_to_graph(vec, name='no_name_graph', save=False, fromTorch=True):
matrix = None
if fromTorch:
matrix = pairwise.vec_to_matrix(vec.numpy())
else:
matrix = pairwise.vec_to_matrix(vec)
n_cands = len(matrix[0])
dot = Digraph(comment='Preference Graph',format='png')
# init nodes
for i, row in enumerate(matrix):
dot.node(chr(i+97), 'alt {}'.format(i+1))
# init edges
for i, row in enumerate(matrix):
# only care about the upper triangluar part
li = row[i+1:]
for j, alt in enumerate(li):
# math got confusing
a = i+1
b = i+j+2
p_a = chr(a+96)
p_b = chr(b+96)
if alt == 1:
dot.edge(p_a, p_b)
elif alt == -1:
dot.edge(p_b, p_a)
file_output = '../diagrams/graph_views/{}'.format(name)
if save:
dot.render(file_output,view=False)
return dot
def vote_to_graph(vote, name='no_name_graph', save=False):
if 0 in vote:
raise Exception('There should be no 0 values in vote vector')
return vec_to_graph(pairwise.process_vote(vote), name, save, fromTorch=False)
| 1,145
| 0
| 45
|
2d116e962789c83860d1307c9a4a5d5c894be102
| 3,449
|
py
|
Python
|
tests/corpora/test_parallel_text_corpus.py
|
johnml1135/machine.py
|
7bfb668b81d6af476f7ee797900e111c3027d542
|
[
"MIT"
] | 2
|
2021-09-14T15:41:14.000Z
|
2021-09-14T15:53:46.000Z
|
tests/corpora/test_parallel_text_corpus.py
|
johnml1135/machine.py
|
7bfb668b81d6af476f7ee797900e111c3027d542
|
[
"MIT"
] | 2
|
2021-11-04T09:12:26.000Z
|
2021-11-08T08:35:36.000Z
|
tests/corpora/test_parallel_text_corpus.py
|
johnml1135/machine.py
|
7bfb668b81d6af476f7ee797900e111c3027d542
|
[
"MIT"
] | 1
|
2021-11-03T14:45:11.000Z
|
2021-11-03T14:45:11.000Z
|
from machine.corpora import (
DictionaryTextAlignmentCorpus,
DictionaryTextCorpus,
MemoryText,
MemoryTextAlignmentCollection,
ParallelTextCorpus,
)
| 45.381579
| 103
| 0.76138
|
from machine.corpora import (
DictionaryTextAlignmentCorpus,
DictionaryTextCorpus,
MemoryText,
MemoryTextAlignmentCollection,
ParallelTextCorpus,
)
def test_texts_no_texts() -> None:
source_corpus = DictionaryTextCorpus()
target_corpus = DictionaryTextCorpus()
parallel_corpus = ParallelTextCorpus(source_corpus, target_corpus)
assert not any(parallel_corpus.texts)
def test_texts_no_missing_texts() -> None:
source_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text2"), MemoryText("text3"))
target_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text2"), MemoryText("text3"))
alignment_corpus = DictionaryTextAlignmentCorpus(
MemoryTextAlignmentCollection("text1"),
MemoryTextAlignmentCollection("text2"),
MemoryTextAlignmentCollection("text3"),
)
parallel_corpus = ParallelTextCorpus(source_corpus, target_corpus, alignment_corpus)
texts = parallel_corpus.texts
assert [t.id for t in texts] == ["text1", "text2", "text3"]
def test_texts_missing_text() -> None:
source_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text2"), MemoryText("text3"))
target_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text3"))
alignment_corpus = DictionaryTextAlignmentCorpus(
MemoryTextAlignmentCollection("text1"), MemoryTextAlignmentCollection("text3")
)
parallel_corpus = ParallelTextCorpus(source_corpus, target_corpus, alignment_corpus)
texts = parallel_corpus.texts
assert [t.id for t in texts] == ["text1", "text3"]
def test_get_texts_missing_target_text_all_source_segments() -> None:
source_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text2"), MemoryText("text3"))
target_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text3"))
alignment_corpus = DictionaryTextAlignmentCorpus(
MemoryTextAlignmentCollection("text1"), MemoryTextAlignmentCollection("text3")
)
parallel_corpus = ParallelTextCorpus(source_corpus, target_corpus, alignment_corpus)
texts = parallel_corpus.get_texts(all_source_segments=True)
assert [t.id for t in texts] == ["text1", "text2", "text3"]
def test_get_texts_missing_source_text_all_target_segments() -> None:
source_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text3"))
target_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text2"), MemoryText("text3"))
alignment_corpus = DictionaryTextAlignmentCorpus(
MemoryTextAlignmentCollection("text1"), MemoryTextAlignmentCollection("text3")
)
parallel_corpus = ParallelTextCorpus(source_corpus, target_corpus, alignment_corpus)
texts = parallel_corpus.get_texts(all_target_segments=True)
assert [t.id for t in texts] == ["text1", "text2", "text3"]
def test_get_texts_missing_source_and_target_text_all_source_and_target_segments() -> None:
source_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text3"))
target_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text2"))
alignment_corpus = DictionaryTextAlignmentCorpus(MemoryTextAlignmentCollection("text1"))
parallel_corpus = ParallelTextCorpus(source_corpus, target_corpus, alignment_corpus)
texts = parallel_corpus.get_texts(all_source_segments=True, all_target_segments=True)
assert [t.id for t in texts] == ["text1", "text2", "text3"]
| 3,137
| 0
| 138
|
5e7f49dd4d34480adfda849b4671baf126b95da5
| 523
|
py
|
Python
|
tests/test_vote.py
|
james-muriithi/pitches
|
e7da364f9525847c2ed8d88aea9a81a87c4c113b
|
[
"Unlicense"
] | null | null | null |
tests/test_vote.py
|
james-muriithi/pitches
|
e7da364f9525847c2ed8d88aea9a81a87c4c113b
|
[
"Unlicense"
] | null | null | null |
tests/test_vote.py
|
james-muriithi/pitches
|
e7da364f9525847c2ed8d88aea9a81a87c4c113b
|
[
"Unlicense"
] | null | null | null |
from unicodedata import name
import unittest
from app.models import Vote, User
| 32.6875
| 88
| 0.6826
|
from unicodedata import name
import unittest
from app.models import Vote, User
class TestVote(unittest.TestCase):
def setUp(self):
self.user = User(username="james", id=1, name="James Muriithi", email="m@a.com")
self.vote = Vote(vote=1, user=self.user, pitch_id=1)
def test_instance(self):
self.assertTrue(isinstance(self.vote, Vote))
def test_vote_user(self):
self.assertTrue(isinstance(self.vote.user, User))
self.assertEquals(self.vote.user.id, self.user.id)
| 324
| 13
| 107
|
cedaee8ae0d46d7b6db7e6e560962ac51696012d
| 2,076
|
py
|
Python
|
7/challenge2.py
|
roryeiffe/Adent-of-Code
|
80f123663fcf04bf5f0d6733807b4a2dd53bc68c
|
[
"MIT"
] | null | null | null |
7/challenge2.py
|
roryeiffe/Adent-of-Code
|
80f123663fcf04bf5f0d6733807b4a2dd53bc68c
|
[
"MIT"
] | null | null | null |
7/challenge2.py
|
roryeiffe/Adent-of-Code
|
80f123663fcf04bf5f0d6733807b4a2dd53bc68c
|
[
"MIT"
] | null | null | null |
import sys
# recursive search through bag dictionary:
# given a bag string, return only the name of the bag:
# ex: 1 light blue returns light blue
# given a bag string, return the quantity:
# ex: 1 light blue returns 1
f = open(sys.argv[1],"r")
L = []
for item in f:
L.append(item.strip())
# dictionary where key is bag and values are which bags can be stored:
bag_contains = dict()
for rule in L:
rule_list = rule.split(" ")
# this bag is always going to be the first 2 words:
this_bag = rule_list[0] + " " + rule_list[1]
# insert into dictionary:
if this_bag not in bag_contains:
bag_contains[this_bag] = []
i = 3
# loop through rest of list:
while i < len(rule_list):
word = rule_list[i].strip(",").strip(".")
# if we find a bag, grab the last 2 words (these will be the color)
if (word == "bag" or word == "bags"):
current_bag = rule_list[i-3] + " " + rule_list[i-2] + " " + rule_list[i-1]
# this means no bags
if get_bag(current_bag) != "no other":
bag_contains[this_bag].append(current_bag)
i += 1
print(search(bag_contains,"2 dark green"))
print(search(bag_contains,"1 shiny gold"))
| 29.239437
| 77
| 0.69316
|
import sys
# recursive search through bag dictionary:
def search(bag_contains, current):
children = bag_contains[get_bag(current)]
# base cases
# if this bag does not contain any other bags:
# each level is how many bags are in this current bag (including children)
# base case: since there are no children, we are just counting how many bags
# are in our current bag, and no children
if len(children) == 0:
print(current)
return int(get_num(current))
recursion = []
for child in children:
grandchildren = bag_contains[get_bag(child)]
if len(grandchildren) == 0:
recursion.append(int(get_num(child)))
else:
recursion.append(search(bag_contains,child))
print(get_bag(current), "has", recursion, "inner bags")
# make sure we count this current bag:
return int(get_num(current)) + int(get_num(current))*sum(recursion)
# given a bag string, return only the name of the bag:
# ex: 1 light blue returns light blue
def get_bag(bag_string):
return bag_string.split(" ")[1] + " " + bag_string.split(" ")[2]
# given a bag string, return the quantity:
# ex: 1 light blue returns 1
def get_num(bag_string):
return bag_string.split(" ")[0]
f = open(sys.argv[1],"r")
L = []
for item in f:
L.append(item.strip())
# dictionary where key is bag and values are which bags can be stored:
bag_contains = dict()
for rule in L:
rule_list = rule.split(" ")
# this bag is always going to be the first 2 words:
this_bag = rule_list[0] + " " + rule_list[1]
# insert into dictionary:
if this_bag not in bag_contains:
bag_contains[this_bag] = []
i = 3
# loop through rest of list:
while i < len(rule_list):
word = rule_list[i].strip(",").strip(".")
# if we find a bag, grab the last 2 words (these will be the color)
if (word == "bag" or word == "bags"):
current_bag = rule_list[i-3] + " " + rule_list[i-2] + " " + rule_list[i-1]
# this means no bags
if get_bag(current_bag) != "no other":
bag_contains[this_bag].append(current_bag)
i += 1
print(search(bag_contains,"2 dark green"))
print(search(bag_contains,"1 shiny gold"))
| 877
| 0
| 66
|
1bdbba7f4e63c6bef0d5a716b758d10956003075
| 326
|
py
|
Python
|
samples/WebApplication/OneUserScenario.py
|
jproudlo/PyModel
|
2ab0e2cf821807206725adaa425409b0c28929b7
|
[
"BSD-3-Clause"
] | 61
|
2015-01-29T16:18:51.000Z
|
2021-09-28T10:14:02.000Z
|
samples/WebApplication/OneUserScenario.py
|
vikstr/PyModel
|
4fff616fe0fd8342c91a42d9db5d4097a179dff8
|
[
"BSD-3-Clause"
] | 2
|
2015-02-04T11:57:53.000Z
|
2021-07-18T20:59:55.000Z
|
samples/WebApplication/OneUserScenario.py
|
vikstr/PyModel
|
4fff616fe0fd8342c91a42d9db5d4097a179dff8
|
[
"BSD-3-Clause"
] | 34
|
2015-02-04T12:00:29.000Z
|
2022-03-14T07:41:25.000Z
|
"One user repeatedly logs in, logs out. Allow interleaving with other actions"
from WebModel import Login, Logout
actions = (Login, Logout) # just these to allow interleaving
initial = 0
accepting = (0,)
graph = ((0, (Login, ( 'VinniPuhh', 'Correct' ), 'Success'), 1),
(1, (Logout, ( 'VinniPuhh', ), None), 0))
| 25.076923
| 79
| 0.650307
|
"One user repeatedly logs in, logs out. Allow interleaving with other actions"
from WebModel import Login, Logout
actions = (Login, Logout) # just these to allow interleaving
initial = 0
accepting = (0,)
graph = ((0, (Login, ( 'VinniPuhh', 'Correct' ), 'Success'), 1),
(1, (Logout, ( 'VinniPuhh', ), None), 0))
| 0
| 0
| 0
|
cf94635475a33564a16e558dee889bd78cf10571
| 542
|
py
|
Python
|
tests/test_statics.py
|
beastbikes/django-only-admin
|
c89b782b92edbb1f75151e71163c0708afacd4f9
|
[
"MIT"
] | 32
|
2016-11-24T08:33:10.000Z
|
2017-12-18T00:25:00.000Z
|
tests/test_statics.py
|
beastbikes/django-only-admin
|
c89b782b92edbb1f75151e71163c0708afacd4f9
|
[
"MIT"
] | 15
|
2016-11-30T08:28:56.000Z
|
2017-09-20T15:54:18.000Z
|
tests/test_statics.py
|
beastbikes/django-only-admin
|
c89b782b92edbb1f75151e71163c0708afacd4f9
|
[
"MIT"
] | 9
|
2016-11-25T02:14:24.000Z
|
2017-12-06T13:22:51.000Z
|
import os
from django.test import TestCase
from django.contrib.staticfiles import finders
| 27.1
| 62
| 0.710332
|
import os
from django.test import TestCase
from django.contrib.staticfiles import finders
class StaticTestCase(TestCase):
def test_font_awesome(self):
path = 'plugins/font-awesome/css/font-awesome.min.css'
absolute_path = finders.find(path)
assert absolute_path is not None
assert os.path.exists(absolute_path)
path = 'plugins/font-awesome/fonts/FontAwesome.otf'
absolute_path = finders.find(path)
assert absolute_path is not None
assert os.path.exists(absolute_path)
| 389
| 10
| 50
|
0c9884ea394c7fd5bab4fbd1dc9bebd53080457b
| 4,264
|
py
|
Python
|
FaceSwap/zad2.py
|
i-khan/FaceSwap
|
e9f5b0a95af7ba62ce6978a5c7a4511086948a82
|
[
"MIT"
] | null | null | null |
FaceSwap/zad2.py
|
i-khan/FaceSwap
|
e9f5b0a95af7ba62ce6978a5c7a4511086948a82
|
[
"MIT"
] | null | null | null |
FaceSwap/zad2.py
|
i-khan/FaceSwap
|
e9f5b0a95af7ba62ce6978a5c7a4511086948a82
|
[
"MIT"
] | null | null | null |
import dlib
import cv2
import numpy as np
import sys
import models
import NonLinearLeastSquares
import ImageProcessing
from drawing import *
import FaceRendering
import utils
import os
import subprocess
print "Press T to draw the keypoints and the 3D model"
print "Press R to start recording to a video file"
#you need to download shape_predictor_68_face_landmarks.dat from the link below and unpack it where the solution file is
#http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
#loading the keypoint detection model, the image and the 3D model
predictor_path = "../shape_predictor_68_face_landmarks.dat"
face_cvv_detector_path ="../mmod_human_face_detector.dat"
image_name = "../bnl/images/"+sys.argv[1]
#the smaller this value gets the faster the detection will work
#if it is too small, the user's face might not be detected
maxImageSizeForDetection = 960
#detector = dlib.cnn_face_detection_model_v1(face_cvv_detector_path)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
mean3DShape, blendshapes, mesh, idxs3D, idxs2D = utils.load3DFaceModel("../candide.npz")
projectionModel = models.OrthographicProjectionBlendshapes(blendshapes.shape[0])
modelParams = None
lockedTranslation = False
drawOverlay = False
cap = cv2.VideoCapture("../data/"+sys.argv[2]+".mp4")
fourcc = cv2.VideoWriter_fourcc(*'XVID')
# Check if camera opened successfully
if (cap.isOpened()== False):
print("Error opening video stream or file")
cameraImg = cap.read()[1]
writer = None
if writer is None:
print "Starting video writer"
writer = cv2.VideoWriter("../bnl/videos/"+sys.argv[1]+"-out.avi", fourcc, 25,(cameraImg.shape[1], cameraImg.shape[0]))
if writer.isOpened():
print "Writer succesfully opened"
else:
writer = None
print "Writer opening failed"
textureImg = cv2.imread(image_name)
textureCoords = utils.getFaceTextureCoords(textureImg, mean3DShape, blendshapes, idxs2D, idxs3D, detector, predictor)
renderer = FaceRendering.FaceRenderer(cameraImg, textureImg, textureCoords, mesh)
while True:
cameraImg = cap.read()[1]
try:
shapes2D = utils.getFaceKeypoints(cameraImg, detector, predictor, maxImageSizeForDetection)
if shapes2D is not None:
for shape2D in shapes2D:
#3D model parameter initialization
modelParams = projectionModel.getInitialParameters(mean3DShape[:, idxs3D], shape2D[:, idxs2D])
#3D model parameter optimization
modelParams = NonLinearLeastSquares.GaussNewton(modelParams, projectionModel.residual, projectionModel.jacobian, ([mean3DShape[:, idxs3D], blendshapes[:, :, idxs3D]], shape2D[:, idxs2D]), verbose=0)
#rendering the model to an image
shape3D = utils.getShape3D(mean3DShape, blendshapes, modelParams)
renderedImg = renderer.render(shape3D)
#blending of the rendered face with the image
mask = np.copy(renderedImg[:, :, 0])
renderedImg = ImageProcessing.colorTransfer(cameraImg, renderedImg, mask)
cameraImg = ImageProcessing.blendImages(renderedImg, cameraImg, mask,0.1)
#drawing of the mesh and keypoints
if drawOverlay:
drawPoints(cameraImg, shape2D.T)
drawProjectedShape(cameraImg, [mean3DShape, blendshapes], projectionModel, mesh, modelParams, lockedTranslation)
if writer is not None:
writer.write(cameraImg)
cv2.imshow('image', cameraImg)
key = cv2.waitKey(1)
if key == 27:
break
if key == ord('t'):
drawOverlay = not drawOverlay
except:
print("An exception occurred")
break
print "Stopping video writer"
writer.release()
writer = None
os.chdir('C://Users/Asus/')
# ffmpeg -i 1569831566308.jpeg-out.avi -i ../../data/superVideo2.mp4 -map 0:0 -map 1:1 -shortest 1569831566308.jpeg-out.mp4
subprocess.call(["ffmpeg", "-i", "../bnl/videos/"+sys.argv[1]+"-out.avi", "-i ", "../data/"+sys.argv[2]+".mp4", "-map ", "0:0", "-map ", "1:1", "-shortest", "../bnl/videos/"+sys.argv[1]+"-out.mp4" ])
| 36.135593
| 214
| 0.692542
|
import dlib
import cv2
import numpy as np
import sys
import models
import NonLinearLeastSquares
import ImageProcessing
from drawing import *
import FaceRendering
import utils
import os
import subprocess
print "Press T to draw the keypoints and the 3D model"
print "Press R to start recording to a video file"
#you need to download shape_predictor_68_face_landmarks.dat from the link below and unpack it where the solution file is
#http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
#loading the keypoint detection model, the image and the 3D model
predictor_path = "../shape_predictor_68_face_landmarks.dat"
face_cvv_detector_path ="../mmod_human_face_detector.dat"
image_name = "../bnl/images/"+sys.argv[1]
#the smaller this value gets the faster the detection will work
#if it is too small, the user's face might not be detected
maxImageSizeForDetection = 960
#detector = dlib.cnn_face_detection_model_v1(face_cvv_detector_path)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
mean3DShape, blendshapes, mesh, idxs3D, idxs2D = utils.load3DFaceModel("../candide.npz")
projectionModel = models.OrthographicProjectionBlendshapes(blendshapes.shape[0])
modelParams = None
lockedTranslation = False
drawOverlay = False
cap = cv2.VideoCapture("../data/"+sys.argv[2]+".mp4")
fourcc = cv2.VideoWriter_fourcc(*'XVID')
# Check if camera opened successfully
if (cap.isOpened()== False):
print("Error opening video stream or file")
cameraImg = cap.read()[1]
writer = None
if writer is None:
print "Starting video writer"
writer = cv2.VideoWriter("../bnl/videos/"+sys.argv[1]+"-out.avi", fourcc, 25,(cameraImg.shape[1], cameraImg.shape[0]))
if writer.isOpened():
print "Writer succesfully opened"
else:
writer = None
print "Writer opening failed"
textureImg = cv2.imread(image_name)
textureCoords = utils.getFaceTextureCoords(textureImg, mean3DShape, blendshapes, idxs2D, idxs3D, detector, predictor)
renderer = FaceRendering.FaceRenderer(cameraImg, textureImg, textureCoords, mesh)
while True:
cameraImg = cap.read()[1]
try:
shapes2D = utils.getFaceKeypoints(cameraImg, detector, predictor, maxImageSizeForDetection)
if shapes2D is not None:
for shape2D in shapes2D:
#3D model parameter initialization
modelParams = projectionModel.getInitialParameters(mean3DShape[:, idxs3D], shape2D[:, idxs2D])
#3D model parameter optimization
modelParams = NonLinearLeastSquares.GaussNewton(modelParams, projectionModel.residual, projectionModel.jacobian, ([mean3DShape[:, idxs3D], blendshapes[:, :, idxs3D]], shape2D[:, idxs2D]), verbose=0)
#rendering the model to an image
shape3D = utils.getShape3D(mean3DShape, blendshapes, modelParams)
renderedImg = renderer.render(shape3D)
#blending of the rendered face with the image
mask = np.copy(renderedImg[:, :, 0])
renderedImg = ImageProcessing.colorTransfer(cameraImg, renderedImg, mask)
cameraImg = ImageProcessing.blendImages(renderedImg, cameraImg, mask,0.1)
#drawing of the mesh and keypoints
if drawOverlay:
drawPoints(cameraImg, shape2D.T)
drawProjectedShape(cameraImg, [mean3DShape, blendshapes], projectionModel, mesh, modelParams, lockedTranslation)
if writer is not None:
writer.write(cameraImg)
cv2.imshow('image', cameraImg)
key = cv2.waitKey(1)
if key == 27:
break
if key == ord('t'):
drawOverlay = not drawOverlay
except:
print("An exception occurred")
break
print "Stopping video writer"
writer.release()
writer = None
os.chdir('C://Users/Asus/')
# ffmpeg -i 1569831566308.jpeg-out.avi -i ../../data/superVideo2.mp4 -map 0:0 -map 1:1 -shortest 1569831566308.jpeg-out.mp4
subprocess.call(["ffmpeg", "-i", "../bnl/videos/"+sys.argv[1]+"-out.avi", "-i ", "../data/"+sys.argv[2]+".mp4", "-map ", "0:0", "-map ", "1:1", "-shortest", "../bnl/videos/"+sys.argv[1]+"-out.mp4" ])
| 0
| 0
| 0
|
29895e9f595bd0e2bfb7657a094cd535ad76784c
| 586
|
py
|
Python
|
Desafios/Desafio98.py
|
Felix-xilef/Curso-de-Python
|
cdff7c7f3850e6326e274c8c1987b9e1a18ce910
|
[
"MIT"
] | null | null | null |
Desafios/Desafio98.py
|
Felix-xilef/Curso-de-Python
|
cdff7c7f3850e6326e274c8c1987b9e1a18ce910
|
[
"MIT"
] | null | null | null |
Desafios/Desafio98.py
|
Felix-xilef/Curso-de-Python
|
cdff7c7f3850e6326e274c8c1987b9e1a18ce910
|
[
"MIT"
] | null | null | null |
from auxiliar import receberInt
from time import sleep
# main
contador(1, 10, 1)
contador(10, 0, 2)
contador(receberInt('Digite o inicio: '), receberInt('Digite o fim: '), receberInt('Digite o passo: '))
input('\n\nPressione <enter> para continuar')
| 23.44
| 103
| 0.583618
|
from auxiliar import receberInt
from time import sleep
def contador(inicio, fim, passo):
print('-'*30)
print(f'Contagem de {inicio} até {fim} de {passo} em {passo}')
if inicio > fim:
passo *= -1
fim -= 1
else:
fim += 1
for i in range(inicio, fim, passo):
sleep(0.5)
print(f'{i} -> ', end='')
print('FIM')
print('-'*30)
# main
contador(1, 10, 1)
contador(10, 0, 2)
contador(receberInt('Digite o inicio: '), receberInt('Digite o fim: '), receberInt('Digite o passo: '))
input('\n\nPressione <enter> para continuar')
| 311
| 0
| 23
|
31311b055c16c8db5e97123b97dbdb3720180513
| 368
|
py
|
Python
|
blog/migrations/0030_auto_20190705_1244.py
|
Labbit-kw/hologram-project
|
708b773e932f6ad0f92d1d9e2e57cfbd8b17b933
|
[
"MIT"
] | null | null | null |
blog/migrations/0030_auto_20190705_1244.py
|
Labbit-kw/hologram-project
|
708b773e932f6ad0f92d1d9e2e57cfbd8b17b933
|
[
"MIT"
] | null | null | null |
blog/migrations/0030_auto_20190705_1244.py
|
Labbit-kw/hologram-project
|
708b773e932f6ad0f92d1d9e2e57cfbd8b17b933
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.2 on 2019-07-05 12:44
from django.db import migrations
| 19.368421
| 47
| 0.589674
|
# Generated by Django 2.2.2 on 2019-07-05 12:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0029_auto_20190705_1240'),
]
operations = [
migrations.RenameField(
model_name='uploadboard',
old_name='file_title',
new_name='title',
),
]
| 0
| 262
| 23
|
7af1d31414874444295da4f2cff38f7fc177697e
| 4,889
|
py
|
Python
|
tx_rx_list_creator_milp.py
|
WiSig-dataset/wisig-subset-creation
|
bc8c6bd352a8c184a571e6b4c2d7a890cdfff3d8
|
[
"BSD-3-Clause"
] | 1
|
2022-03-17T07:44:24.000Z
|
2022-03-17T07:44:24.000Z
|
tx_rx_list_creator_milp.py
|
WiSig-dataset/wisig-subset-creation
|
bc8c6bd352a8c184a571e6b4c2d7a890cdfff3d8
|
[
"BSD-3-Clause"
] | null | null | null |
tx_rx_list_creator_milp.py
|
WiSig-dataset/wisig-subset-creation
|
bc8c6bd352a8c184a571e6b4c2d7a890cdfff3d8
|
[
"BSD-3-Clause"
] | null | null | null |
import pickle
import numpy as np
import gurobipy as gp
from gurobipy import GRB
from tqdm.notebook import trange, tqdm
| 41.786325
| 141
| 0.599305
|
import pickle
import numpy as np
import gurobipy as gp
from gurobipy import GRB
from tqdm.notebook import trange, tqdm
def solve_for_lists_milp(num_tx = None, min_sig = None, min_sig_low = None, satisfaction=1.0, verbose=False):
with open('data_summary.pkl', 'rb') as f:
d=pickle.load(f)
capture_date_list=d['capture_date_list']
tx_list= d['tx_list']
rx_list= d['rx_list']
mat_date=np.array(d['mat_date'])
mat_date_eq=np.array(d['mat_date_eq'])
n_tx = len(tx_list)
n_rx = len(rx_list)
num_days = 4;
if(num_tx is None):
raise ValueError("num_tx must be specified. Exiting...")
return
if(min_sig is None):
raise ValueError("min_sig must be specified. Exiting...")
return
if(min_sig_low is None): min_sig_low = 0;
M=3000 # Upper-bound to num_signals for any rx-tx pair
mat_date_thresh = np.double(mat_date > min_sig);
mat_date_eq_thresh = np.double(mat_date_eq > min_sig);
m = gp.Model("mip_dual_tol")
if(verbose): m.params.outputflag = 1
else: m.params.outputflag = 0
T = m.addMVar(shape=n_tx, vtype=GRB.BINARY, name="T")
R = m.addMVar(shape=(n_rx, 1), vtype=GRB.BINARY, name="R")
Y = m.addMVar(shape=(n_rx, n_tx), vtype=GRB.BINARY, name="Y")
min_sig_var = m.addMVar(shape = 1, vtype=GRB.INTEGER, name="min_sig_var")
Z = m.addMVar(shape=(n_rx, n_tx), vtype=GRB.INTEGER, name="Z")
Q = m.addMVar(shape=(num_days, n_rx, n_tx), vtype=GRB.BINARY, name="Q")
Q_eq = m.addMVar(shape=(num_days, n_rx, n_tx), vtype=GRB.BINARY, name="Q_eq")
for rxid in range(n_rx):
for txid in range(n_tx):
# Y[rxid][txid] = R[rxid] AND T[txid]
m.addConstr(Y[rxid][txid] <= T[txid])
m.addConstr(Y[rxid][txid] <= R[rxid, 0])
m.addConstr(Y[rxid][txid] >= T[txid] + R[rxid, 0] - 1)
# Z[rxid][txid] = 0 if Y[rxid][txid] == False. Z[rxid][txid] = min_sig if Y[rxid][txid] == True.
m.addConstr(Z[rxid][txid] <= Y[rxid][txid] * M)
m.addConstr(Z[rxid][txid] >= 0)
m.addConstr(Z[rxid][txid] <= min_sig_var[0])
m.addConstr(Z[rxid][txid] >= min_sig_var[0] - (1-Y[rxid][txid]) * M)
for day_id in range(num_days):
# Q[rxid][txid] = Y[rxid][txid] AND (mat_date[rxid][txid] > min_sig)
m.addConstr(Q[day_id][rxid][txid] <= Y[rxid][txid])
m.addConstr(Q[day_id][rxid][txid] <= mat_date_thresh[day_id][rxid][txid])
m.addConstr(Q[day_id][rxid][txid] >= Y[rxid][txid] + mat_date_thresh[day_id][rxid][txid] - 1)
# Q_eq[rxid][txid] = Y[rxid][txid] AND (mat_date_eq[rxid][txid] > min_sig)
m.addConstr(Q_eq[day_id][rxid][txid] <= Y[rxid][txid])
m.addConstr(Q_eq[day_id][rxid][txid] <= mat_date_eq_thresh[day_id][rxid][txid])
m.addConstr(Q_eq[day_id][rxid][txid] >= Y[rxid][txid] + mat_date_eq_thresh[day_id][rxid][txid] - 1)
# mat_date[day_id][rxid][txid] >= min_sig, if R[rxid] == True AND T[txid] == True
m.addConstr(mat_date[day_id][rxid][txid] * Y[rxid][txid] - Z[rxid][txid] >= 0)
# mat_date_eq[day_id][rxid][txid] >= min_sig, if R[rxid] == True AND T[txid] == True
m.addConstr(mat_date_eq[day_id][rxid][txid] * Y[rxid][txid] - Z[rxid][txid] >= 0)
m.addConstr(T.sum() == num_tx) # This can be made m.addConstr(T.sum() >= num_tx) at the expense of larger solve times, but larger dataset
m.addConstr(min_sig_var >= min_sig_low)
for i in range(n_rx):
for day_id in range(num_days):
m.addConstr(Q[day_id, i, :].sum() >= R[i] * num_tx * satisfaction)
m.addConstr(Q_eq[day_id, i, :].sum() >= R[i] * num_tx * satisfaction)
# dual-objective optimization. maximixing n_rx has higher priority over maximizing min_sig
m.setObjectiveN(R.sum(), 0, 10)
m.setObjectiveN(min_sig_var - min_sig_low, 1, 0)
m.ModelSense = GRB.MAXIMIZE
m.optimize()
if m.status == GRB.OPTIMAL:
min_sig_value = min_sig_var.X;
R_V, T_V = np.array(R.X, dtype=bool).squeeze(), np.array(T.X, dtype=bool)
# min_satisfaction = min(np.min((mat_date[:,:,T_V][:,R_V,:]>satisfaction).sum(axis=2) / np.sum(T_V)),
# np.min((mat_date_eq[:,:,T_V][:,R_V,:]>satisfaction).sum(axis=2) / np.sum(T_V)));
print(f"Found optimal solution with {np.sum(R_V)} Rx")
else:
print("Could not find optimal solution.")
T_V = np.array(T.X, dtype=bool)
R_V = np.array(R.X, dtype=bool).squeeze()
op_tx_list = apply_list(tx_list,T_V)
op_rx_list = apply_list(rx_list,R_V)
return op_tx_list,op_rx_list
def apply_list(lst, bool_list):
return [lst[ii] for ii in range(bool_list.size) if bool_list[ii]]
| 4,719
| 0
| 46
|
b84078c4cb83dfb19f774e9116faa6561ca0090b
| 15,948
|
py
|
Python
|
alphapept/matching.py
|
PatrickvanZalm/alphapept
|
58dbe8b75c1a5384f970893c7cc9b7c929a1cfe2
|
[
"Apache-2.0"
] | null | null | null |
alphapept/matching.py
|
PatrickvanZalm/alphapept
|
58dbe8b75c1a5384f970893c7cc9b7c929a1cfe2
|
[
"Apache-2.0"
] | null | null | null |
alphapept/matching.py
|
PatrickvanZalm/alphapept
|
58dbe8b75c1a5384f970893c7cc9b7c929a1cfe2
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/09_matching.ipynb (unless otherwise specified).
__all__ = ['calculate_distance', 'calib_table', 'align', 'calculate_deltas', 'align_files', 'align_datasets',
'get_probability', 'match_datasets']
# Cell
import pandas as pd
import numpy as np
def calculate_distance(table_1: pd.DataFrame, table_2: pd.DataFrame, offset_dict: dict, calib: bool = False) -> (list, int):
"""Calculate the distance between two precursors for different columns
Distance can either be relative or absolute.
An example for a minimal offset_dict is: offset_dict = {'mass':'absolute'}
Args:
table_1 (pd.DataFrame): Dataframe with precusor data.
table_2 (pd.DataFrame): Dataframe with precusor data.
offset_dict (dict): Dictionary with column names and how the distance should be calculated.
calib (bool): Flag to indicate that distances should be calculated on calibrated columns. Defaults to False.
Raises:
KeyError: If either table_1 or table_2 is not indexed by precursor
"""
if table_1.index.name != 'precursor':
raise KeyError('table_1 is not indexed by precursor')
if table_2.index.name != 'precursor':
raise KeyError('table_2 is not indexed by precursor')
shared_precursors = list(set(table_1.index).intersection(set(table_2.index)))
table_1_ = table_1.loc[shared_precursors]
table_2_ = table_2.loc[shared_precursors]
table_1_ = table_1_.groupby('precursor').mean()
table_2_ = table_2_.groupby('precursor').mean()
deltas = []
for col in offset_dict:
if calib:
col_ = col+'_calib'
else:
col_ = col
if offset_dict[col] == 'absolute':
deltas.append(np.nanmedian(table_1_[col_] - table_2_[col_]))
elif offset_dict[col] == 'relative':
deltas.append(np.nanmedian((table_1_[col_] - table_2_[col_]) / (table_1_[col_] + table_2_[col_]) * 2))
else:
raise NotImplementedError(f"Calculating delta for {offset_dict[col_]} not implemented.")
return deltas, len(shared_precursors)
# Cell
def calib_table(table: pd.DataFrame, delta: pd.Series, offset_dict: dict):
"""
Apply offset to a table. Different operations for offsets exist.
Offsets will be saved with a '_calib'-suffix. If this does not already exist,
it will be created.
Args:
table_1 (pd.DataFrame): Dataframe with data.
delta (pd.Series): Series cotaining the offset.
offset_dict (dict): Dictionary with column names and how the distance should be calculated.
Raises:
NotImplementedError: If the type of vonversion is not implemented.
"""
for col in offset_dict:
if (col not in table.columns) and (col+'_apex' in table.columns):
col_ = col+'_apex'
else:
col_ = col
if offset_dict[col] == 'absolute':
table[col+'_calib'] = table[col_]-delta[col]
elif offset_dict[col] == 'relative':
table[col+'_calib'] = (1-delta[col_])*table[col]
else:
raise NotImplementedError(offset_dict[col])
# Cell
import logging
from sklearn.linear_model import LinearRegression
def align(deltas: pd.DataFrame, filenames: list, weights:np.ndarray=None) -> np.ndarray:
"""Align multiple datasets.
This function creates a matrix to represent the shifts from each dataset to another.
This effectively is an overdetermined equation system and is solved with a linear regression.
Args:
deltas (pd.DataFrame): Distances from each dataset to another.
filenames (list): The filenames of the datasts that were compared.
weights (np.ndarray, optional): Distances can be weighted by their number of shared elements. Defaults to None.
Returns:
np.ndarray: alignment values.
"""
matrix = []
for i in range(len(deltas)):
start, end = deltas.index[i]
start_idx = filenames.index(start)
end_idx = filenames.index(end)
lines = np.zeros(len(filenames)-1)
lines[start_idx:end_idx] = 1
matrix.append(lines)
# Remove nan values
not_nan = ~deltas.isnull().any(axis=1)
matrix = np.array(matrix)
matrix = matrix[not_nan]
deltas_ = deltas[not_nan]
if len(deltas) < matrix.shape[1]:
logging.info('Low overlap between datasets detected. Alignment may fail.')
if weights is not None:
reg = LinearRegression(fit_intercept=False).fit(matrix, deltas_.values, sample_weight = weights[not_nan])
score= reg.score(matrix, deltas_.values)
else:
reg = LinearRegression(fit_intercept=False).fit(matrix, deltas_.values)
score= reg.score(matrix, deltas_.values)
logging.info(f"Regression score is {score}")
x = reg.predict(np.eye(len(filenames)-1))
return x
# Cell
import alphapept.io
import os
from typing import Callable
def calculate_deltas(combos: list, calib:bool = False, callback:Callable=None) -> (pd.DataFrame, np.ndarray, dict):
"""Wrapper function to calculate the distances of multiple files.
In here, we define the offset_dict to make a relative comparison for mz and mobility and absolute for rt.
TODO: This function could be speed-up by parallelization
Args:
combos (list): A list containing tuples of filenames that should be compared.
calib (bool): Boolean flag to indicate distance should be calculated on calibrated data.
callback (Callable): A callback function to track progress.
Returns:
pd.DataFrame: Dataframe containing the deltas of the files
np.ndarray: Numpy array containing the weights of each comparison (i.e. number of shared elements)
dict: Offset dictionary whicch was used for comparing.
"""
offset_dict = {}
deltas = pd.DataFrame()
weights = []
for i, combo in enumerate(combos):
file1 = os.path.splitext(combo[0])[0] + '.ms_data.hdf'
file2 = os.path.splitext(combo[1])[0] + '.ms_data.hdf'
df_1 = alphapept.io.MS_Data_File(file1).read(dataset_name="peptide_fdr").set_index('precursor')
df_2 = alphapept.io.MS_Data_File(file2).read(dataset_name="peptide_fdr").set_index('precursor')
if not offset_dict:
offset_dict = {'mz':'relative', 'rt':'absolute'}
if 'mobility' in df_1.columns:
logging.info("Also using mobility for calibration.")
offset_dict['mobility'] = 'relative'
cols = list(offset_dict.keys())
if len(deltas) == 0:
deltas = pd.DataFrame(columns = cols)
dists, weight = calculate_distance(df_1, df_2, offset_dict, calib = calib)
deltas = deltas.append(pd.DataFrame([dists], columns = cols, index=[combo]))
weights.append(weight)
if callback:
callback((i+1)/len(combos))
return deltas, np.array(weights), offset_dict
# Cell
import pandas as pd
from itertools import combinations
import numpy as np
import os
import functools
#There is no unit test for align_files and align_datasets as they are wrappers and should be covered by the quick_test
def align_files(filenames: list, alignment: pd.DataFrame, offset_dict: dict):
"""
Wrapper function that aligns a list of files.
Args:
filenames (list): A list with raw file names.
alignment (pd.DataFrame): A pandas dataframe containing the alignment information.
offset_dict (dict): Dictionary with column names and how the distance should be calculated.
"""
for idx, filename in enumerate(filenames):
file = os.path.splitext(filename)[0] + '.ms_data.hdf'
for column in ['peptide_fdr', 'feature_table']:
df = alphapept.io.MS_Data_File(file).read(dataset_name=column)
calib_table(df, alignment.iloc[idx], offset_dict)
logging.info(f"Saving {file} - {column}.")
ms_file = alphapept.io.MS_Data_File(file, is_overwritable=True)
ms_file.write(df, dataset_name=column)
def align_datasets(settings:dict, callback:callable=None):
"""
Wrapper function that aligns all experimental files specified a settings file.
Args:
settings (dict): A list with raw file names.
callback (Callable): Callback function to indicate progress.
"""
filenames = settings['experiment']['file_paths']
if callback:
cb = functools.partial(progress_wrapper, 0, 2)
else:
cb = None
if len(filenames) > 1:
combos = list(combinations(filenames, 2))
deltas, weights, offset_dict = calculate_deltas(combos, callback=cb)
cols = list(offset_dict.keys())
before_sum = deltas.abs().sum().to_dict()
before_mean = deltas.abs().mean().to_dict()
logging.info(f'Total deviation before calibration {before_sum}')
logging.info(f'Mean deviation before calibration {before_mean}')
logging.info(f'Solving equation system')
alignment = pd.DataFrame(align(deltas, filenames, weights), columns = cols)
alignment = pd.concat([pd.DataFrame(np.zeros((1, alignment.shape[1])), columns= cols), alignment])
alignment -= alignment.mean()
logging.info(f'Solving equation system complete.')
logging.info(f'Applying offset')
align_files(filenames, -alignment, offset_dict)
if cb:
cb = functools.partial(progress_wrapper, 1, 2)
deltas, weights, offset_dict = calculate_deltas(combos, calib=True, callback=cb)
after_sum = deltas.abs().sum().to_dict()
after_mean = deltas.abs().mean().to_dict()
logging.info(f'Total deviation after calibration {after_sum}')
logging.info(f'Mean deviation after calibration {after_mean}')
change_sum = {k:v/before_sum[k] for k,v in after_sum.items()}
change_mean = {k:v/before_mean[k] for k,v in after_mean.items()}
logging.info(f'Change (after/before) total deviation {change_sum}')
logging.info(f'Change (after/before) mean deviation {change_mean}')
else:
logging.info('Only 1 dataset present. Skipping alignment.')
# Cell
from scipy import stats
def get_probability(df: pd.DataFrame, ref: pd.DataFrame, sigma:pd.DataFrame, index:int)-> float:
"""Probablity estimate of a transfered identification using the Mahalanobis distance.
The function calculates the probability that a feature is a reference feature.
The reference features containing std deviations so that a probability can be estimated.
It is required that the data frames are matched, meaning that the first entry in df matches to the first entry in ref.
Args:
df (pd.DataFrame): Dataset containing transferered features
ref (pd.DataFrame): Dataset containing reference features
sigma (pd.DataFrame): Dataset containing the standard deviations of the reference features
index (int): Index to the datframes that should be compared
Returns:
float: Mahalanobis distance
"""
sigma = sigma.iloc[index].values
sigma = sigma*np.eye(len(sigma))
mu = ref.iloc[index].values
x = df.iloc[index].values
try:
m_dist_x = np.dot((x-mu).transpose(), np.linalg.inv(sigma))
m_dist_x = np.dot(m_dist_x, (x-mu))
_ = stats.chi2.cdf(m_dist_x, len(mu))
except Exception as e:
_ = np.nan
return _
# Cell
from sklearn.neighbors import KDTree
from .utils import assemble_df
# This function is a wrapper function and has currently has no unit test
# The function will be revised when implementing issue #255: https://github.com/MannLabs/alphapept/issues/255
def match_datasets(settings:dict, callback:Callable = None):
"""Match datasets: Wrapper function to match datasets based on a settings file.
Args:
settings (dict): Dictionary containg specifications of the run
callback (Callable): Callback function to indicate progress.
"""
if len(settings['experiment']['file_paths']) > 2:
xx = alphapept.utils.assemble_df(settings, field='peptide_fdr')
base_col = ['precursor']
alignment_cols = ['mz_calib','rt_calib']
extra_cols = ['score','decoy','target']
if 'mobility' in xx.columns:
alignment_cols += ['mobility_calib']
use_mobility = True
else:
use_mobility = False
grouped = xx[base_col + alignment_cols + extra_cols].groupby('precursor').mean()
std_ = xx[base_col + alignment_cols].groupby('precursor').std()
grouped[[_+'_std' for _ in alignment_cols]] = std_
std_range = np.nanmedian(std_.values, axis=0)
match_p_min = settings['matching']['match_p_min']
match_d_min = settings['matching']['match_d_min']
filenames = settings['experiment']['file_paths']
lookup_dict = xx.set_index('precursor')[['sequence']].to_dict()
for idx, filename in enumerate(filenames):
file = os.path.splitext(filename)[0] + '.ms_data.hdf'
df = alphapept.io.MS_Data_File(file).read(dataset_name='peptide_fdr')
features = alphapept.io.MS_Data_File(file).read(dataset_name='feature_table')
features['feature_idx'] = features.index
matching_set = set(grouped.index) - set(df['precursor'])
logging.info(f'Trying to match file {file} with database of {len(matching_set):,} unidentified candidates')
mz_range = std_range[0]
rt_range = std_range[1]
tree_points = features[alignment_cols].values
tree_points[:,0] = tree_points[:,0]/mz_range
tree_points[:,1] = tree_points[:,1]/rt_range
query_points = grouped.loc[matching_set][alignment_cols].values
query_points[:,0] = query_points[:,0]/mz_range
query_points[:,1] = query_points[:,1]/rt_range
if use_mobility:
logging.info("Using mobility")
i_range = std_range[2]
tree_points[:,2] = tree_points[:,2]/i_range
query_points[:,2] = query_points[:,2]/i_range
matching_tree = KDTree(tree_points, metric="minkowski")
dist, idx = matching_tree.query(query_points, k=1)
matched = features.iloc[idx[:,0]]
for _ in extra_cols:
matched[_] = grouped.loc[matching_set, _].values
to_keep = dist < match_d_min
matched = matched[to_keep]
ref = grouped.loc[matching_set][alignment_cols][to_keep]
sigma = std_.loc[matching_set][to_keep]
logging.info(f'{len(matched):,} possible features for matching based on distance of {match_d_min}')
matched['matching_p'] = [get_probability(matched[alignment_cols], ref, sigma, i) for i in range(len(matched))]
matched['precursor'] = grouped.loc[matching_set][to_keep].index.values
matched = matched[matched['matching_p']< match_p_min]
logging.info(f'{len(matched):,} possible features for matching based on probability of {match_p_min}')
matched['type'] = 'matched'
for _ in lookup_dict.keys():
matched[_] = [lookup_dict[_][x] for x in matched['precursor']]
df['type'] = 'msms'
df['matching_p'] = np.nan
shared_columns = set(matched.columns).intersection(set(df.columns))
df_ = pd.concat([df, matched[shared_columns]], ignore_index=True)
logging.info(f"Saving {file} - peptide_fdr.")
ms_file = alphapept.io.MS_Data_File(file, is_overwritable=True)
ms_file.write(df_, dataset_name='peptide_fdr')
else:
logging.info('Less than 3 datasets present. Skipping matching.')
| 36.746544
| 124
| 0.658891
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/09_matching.ipynb (unless otherwise specified).
__all__ = ['calculate_distance', 'calib_table', 'align', 'calculate_deltas', 'align_files', 'align_datasets',
'get_probability', 'match_datasets']
# Cell
import pandas as pd
import numpy as np
def calculate_distance(table_1: pd.DataFrame, table_2: pd.DataFrame, offset_dict: dict, calib: bool = False) -> (list, int):
"""Calculate the distance between two precursors for different columns
Distance can either be relative or absolute.
An example for a minimal offset_dict is: offset_dict = {'mass':'absolute'}
Args:
table_1 (pd.DataFrame): Dataframe with precusor data.
table_2 (pd.DataFrame): Dataframe with precusor data.
offset_dict (dict): Dictionary with column names and how the distance should be calculated.
calib (bool): Flag to indicate that distances should be calculated on calibrated columns. Defaults to False.
Raises:
KeyError: If either table_1 or table_2 is not indexed by precursor
"""
if table_1.index.name != 'precursor':
raise KeyError('table_1 is not indexed by precursor')
if table_2.index.name != 'precursor':
raise KeyError('table_2 is not indexed by precursor')
shared_precursors = list(set(table_1.index).intersection(set(table_2.index)))
table_1_ = table_1.loc[shared_precursors]
table_2_ = table_2.loc[shared_precursors]
table_1_ = table_1_.groupby('precursor').mean()
table_2_ = table_2_.groupby('precursor').mean()
deltas = []
for col in offset_dict:
if calib:
col_ = col+'_calib'
else:
col_ = col
if offset_dict[col] == 'absolute':
deltas.append(np.nanmedian(table_1_[col_] - table_2_[col_]))
elif offset_dict[col] == 'relative':
deltas.append(np.nanmedian((table_1_[col_] - table_2_[col_]) / (table_1_[col_] + table_2_[col_]) * 2))
else:
raise NotImplementedError(f"Calculating delta for {offset_dict[col_]} not implemented.")
return deltas, len(shared_precursors)
# Cell
def calib_table(table: pd.DataFrame, delta: pd.Series, offset_dict: dict):
"""
Apply offset to a table. Different operations for offsets exist.
Offsets will be saved with a '_calib'-suffix. If this does not already exist,
it will be created.
Args:
table_1 (pd.DataFrame): Dataframe with data.
delta (pd.Series): Series cotaining the offset.
offset_dict (dict): Dictionary with column names and how the distance should be calculated.
Raises:
NotImplementedError: If the type of vonversion is not implemented.
"""
for col in offset_dict:
if (col not in table.columns) and (col+'_apex' in table.columns):
col_ = col+'_apex'
else:
col_ = col
if offset_dict[col] == 'absolute':
table[col+'_calib'] = table[col_]-delta[col]
elif offset_dict[col] == 'relative':
table[col+'_calib'] = (1-delta[col_])*table[col]
else:
raise NotImplementedError(offset_dict[col])
# Cell
import logging
from sklearn.linear_model import LinearRegression
def align(deltas: pd.DataFrame, filenames: list, weights:np.ndarray=None) -> np.ndarray:
"""Align multiple datasets.
This function creates a matrix to represent the shifts from each dataset to another.
This effectively is an overdetermined equation system and is solved with a linear regression.
Args:
deltas (pd.DataFrame): Distances from each dataset to another.
filenames (list): The filenames of the datasts that were compared.
weights (np.ndarray, optional): Distances can be weighted by their number of shared elements. Defaults to None.
Returns:
np.ndarray: alignment values.
"""
matrix = []
for i in range(len(deltas)):
start, end = deltas.index[i]
start_idx = filenames.index(start)
end_idx = filenames.index(end)
lines = np.zeros(len(filenames)-1)
lines[start_idx:end_idx] = 1
matrix.append(lines)
# Remove nan values
not_nan = ~deltas.isnull().any(axis=1)
matrix = np.array(matrix)
matrix = matrix[not_nan]
deltas_ = deltas[not_nan]
if len(deltas) < matrix.shape[1]:
logging.info('Low overlap between datasets detected. Alignment may fail.')
if weights is not None:
reg = LinearRegression(fit_intercept=False).fit(matrix, deltas_.values, sample_weight = weights[not_nan])
score= reg.score(matrix, deltas_.values)
else:
reg = LinearRegression(fit_intercept=False).fit(matrix, deltas_.values)
score= reg.score(matrix, deltas_.values)
logging.info(f"Regression score is {score}")
x = reg.predict(np.eye(len(filenames)-1))
return x
# Cell
import alphapept.io
import os
from typing import Callable
def calculate_deltas(combos: list, calib:bool = False, callback:Callable=None) -> (pd.DataFrame, np.ndarray, dict):
"""Wrapper function to calculate the distances of multiple files.
In here, we define the offset_dict to make a relative comparison for mz and mobility and absolute for rt.
TODO: This function could be speed-up by parallelization
Args:
combos (list): A list containing tuples of filenames that should be compared.
calib (bool): Boolean flag to indicate distance should be calculated on calibrated data.
callback (Callable): A callback function to track progress.
Returns:
pd.DataFrame: Dataframe containing the deltas of the files
np.ndarray: Numpy array containing the weights of each comparison (i.e. number of shared elements)
dict: Offset dictionary whicch was used for comparing.
"""
offset_dict = {}
deltas = pd.DataFrame()
weights = []
for i, combo in enumerate(combos):
file1 = os.path.splitext(combo[0])[0] + '.ms_data.hdf'
file2 = os.path.splitext(combo[1])[0] + '.ms_data.hdf'
df_1 = alphapept.io.MS_Data_File(file1).read(dataset_name="peptide_fdr").set_index('precursor')
df_2 = alphapept.io.MS_Data_File(file2).read(dataset_name="peptide_fdr").set_index('precursor')
if not offset_dict:
offset_dict = {'mz':'relative', 'rt':'absolute'}
if 'mobility' in df_1.columns:
logging.info("Also using mobility for calibration.")
offset_dict['mobility'] = 'relative'
cols = list(offset_dict.keys())
if len(deltas) == 0:
deltas = pd.DataFrame(columns = cols)
dists, weight = calculate_distance(df_1, df_2, offset_dict, calib = calib)
deltas = deltas.append(pd.DataFrame([dists], columns = cols, index=[combo]))
weights.append(weight)
if callback:
callback((i+1)/len(combos))
return deltas, np.array(weights), offset_dict
# Cell
import pandas as pd
from itertools import combinations
import numpy as np
import os
import functools
#There is no unit test for align_files and align_datasets as they are wrappers and should be covered by the quick_test
def align_files(filenames: list, alignment: pd.DataFrame, offset_dict: dict):
"""
Wrapper function that aligns a list of files.
Args:
filenames (list): A list with raw file names.
alignment (pd.DataFrame): A pandas dataframe containing the alignment information.
offset_dict (dict): Dictionary with column names and how the distance should be calculated.
"""
for idx, filename in enumerate(filenames):
file = os.path.splitext(filename)[0] + '.ms_data.hdf'
for column in ['peptide_fdr', 'feature_table']:
df = alphapept.io.MS_Data_File(file).read(dataset_name=column)
calib_table(df, alignment.iloc[idx], offset_dict)
logging.info(f"Saving {file} - {column}.")
ms_file = alphapept.io.MS_Data_File(file, is_overwritable=True)
ms_file.write(df, dataset_name=column)
def align_datasets(settings:dict, callback:callable=None):
"""
Wrapper function that aligns all experimental files specified a settings file.
Args:
settings (dict): A list with raw file names.
callback (Callable): Callback function to indicate progress.
"""
filenames = settings['experiment']['file_paths']
if callback:
def progress_wrapper(current, step, n_steps):
callback(step+current/n_steps)
cb = functools.partial(progress_wrapper, 0, 2)
else:
cb = None
if len(filenames) > 1:
combos = list(combinations(filenames, 2))
deltas, weights, offset_dict = calculate_deltas(combos, callback=cb)
cols = list(offset_dict.keys())
before_sum = deltas.abs().sum().to_dict()
before_mean = deltas.abs().mean().to_dict()
logging.info(f'Total deviation before calibration {before_sum}')
logging.info(f'Mean deviation before calibration {before_mean}')
logging.info(f'Solving equation system')
alignment = pd.DataFrame(align(deltas, filenames, weights), columns = cols)
alignment = pd.concat([pd.DataFrame(np.zeros((1, alignment.shape[1])), columns= cols), alignment])
alignment -= alignment.mean()
logging.info(f'Solving equation system complete.')
logging.info(f'Applying offset')
align_files(filenames, -alignment, offset_dict)
if cb:
cb = functools.partial(progress_wrapper, 1, 2)
deltas, weights, offset_dict = calculate_deltas(combos, calib=True, callback=cb)
after_sum = deltas.abs().sum().to_dict()
after_mean = deltas.abs().mean().to_dict()
logging.info(f'Total deviation after calibration {after_sum}')
logging.info(f'Mean deviation after calibration {after_mean}')
change_sum = {k:v/before_sum[k] for k,v in after_sum.items()}
change_mean = {k:v/before_mean[k] for k,v in after_mean.items()}
logging.info(f'Change (after/before) total deviation {change_sum}')
logging.info(f'Change (after/before) mean deviation {change_mean}')
else:
logging.info('Only 1 dataset present. Skipping alignment.')
# Cell
from scipy import stats
def get_probability(df: pd.DataFrame, ref: pd.DataFrame, sigma:pd.DataFrame, index:int)-> float:
"""Probablity estimate of a transfered identification using the Mahalanobis distance.
The function calculates the probability that a feature is a reference feature.
The reference features containing std deviations so that a probability can be estimated.
It is required that the data frames are matched, meaning that the first entry in df matches to the first entry in ref.
Args:
df (pd.DataFrame): Dataset containing transferered features
ref (pd.DataFrame): Dataset containing reference features
sigma (pd.DataFrame): Dataset containing the standard deviations of the reference features
index (int): Index to the datframes that should be compared
Returns:
float: Mahalanobis distance
"""
sigma = sigma.iloc[index].values
sigma = sigma*np.eye(len(sigma))
mu = ref.iloc[index].values
x = df.iloc[index].values
try:
m_dist_x = np.dot((x-mu).transpose(), np.linalg.inv(sigma))
m_dist_x = np.dot(m_dist_x, (x-mu))
_ = stats.chi2.cdf(m_dist_x, len(mu))
except Exception as e:
_ = np.nan
return _
# Cell
from sklearn.neighbors import KDTree
from .utils import assemble_df
# This function is a wrapper function and has currently has no unit test
# The function will be revised when implementing issue #255: https://github.com/MannLabs/alphapept/issues/255
def match_datasets(settings:dict, callback:Callable = None):
"""Match datasets: Wrapper function to match datasets based on a settings file.
Args:
settings (dict): Dictionary containg specifications of the run
callback (Callable): Callback function to indicate progress.
"""
if len(settings['experiment']['file_paths']) > 2:
xx = alphapept.utils.assemble_df(settings, field='peptide_fdr')
base_col = ['precursor']
alignment_cols = ['mz_calib','rt_calib']
extra_cols = ['score','decoy','target']
if 'mobility' in xx.columns:
alignment_cols += ['mobility_calib']
use_mobility = True
else:
use_mobility = False
grouped = xx[base_col + alignment_cols + extra_cols].groupby('precursor').mean()
std_ = xx[base_col + alignment_cols].groupby('precursor').std()
grouped[[_+'_std' for _ in alignment_cols]] = std_
std_range = np.nanmedian(std_.values, axis=0)
match_p_min = settings['matching']['match_p_min']
match_d_min = settings['matching']['match_d_min']
filenames = settings['experiment']['file_paths']
lookup_dict = xx.set_index('precursor')[['sequence']].to_dict()
for idx, filename in enumerate(filenames):
file = os.path.splitext(filename)[0] + '.ms_data.hdf'
df = alphapept.io.MS_Data_File(file).read(dataset_name='peptide_fdr')
features = alphapept.io.MS_Data_File(file).read(dataset_name='feature_table')
features['feature_idx'] = features.index
matching_set = set(grouped.index) - set(df['precursor'])
logging.info(f'Trying to match file {file} with database of {len(matching_set):,} unidentified candidates')
mz_range = std_range[0]
rt_range = std_range[1]
tree_points = features[alignment_cols].values
tree_points[:,0] = tree_points[:,0]/mz_range
tree_points[:,1] = tree_points[:,1]/rt_range
query_points = grouped.loc[matching_set][alignment_cols].values
query_points[:,0] = query_points[:,0]/mz_range
query_points[:,1] = query_points[:,1]/rt_range
if use_mobility:
logging.info("Using mobility")
i_range = std_range[2]
tree_points[:,2] = tree_points[:,2]/i_range
query_points[:,2] = query_points[:,2]/i_range
matching_tree = KDTree(tree_points, metric="minkowski")
dist, idx = matching_tree.query(query_points, k=1)
matched = features.iloc[idx[:,0]]
for _ in extra_cols:
matched[_] = grouped.loc[matching_set, _].values
to_keep = dist < match_d_min
matched = matched[to_keep]
ref = grouped.loc[matching_set][alignment_cols][to_keep]
sigma = std_.loc[matching_set][to_keep]
logging.info(f'{len(matched):,} possible features for matching based on distance of {match_d_min}')
matched['matching_p'] = [get_probability(matched[alignment_cols], ref, sigma, i) for i in range(len(matched))]
matched['precursor'] = grouped.loc[matching_set][to_keep].index.values
matched = matched[matched['matching_p']< match_p_min]
logging.info(f'{len(matched):,} possible features for matching based on probability of {match_p_min}')
matched['type'] = 'matched'
for _ in lookup_dict.keys():
matched[_] = [lookup_dict[_][x] for x in matched['precursor']]
df['type'] = 'msms'
df['matching_p'] = np.nan
shared_columns = set(matched.columns).intersection(set(df.columns))
df_ = pd.concat([df, matched[shared_columns]], ignore_index=True)
logging.info(f"Saving {file} - peptide_fdr.")
ms_file = alphapept.io.MS_Data_File(file, is_overwritable=True)
ms_file.write(df_, dataset_name='peptide_fdr')
else:
logging.info('Less than 3 datasets present. Skipping matching.')
| 67
| 0
| 30
|
e20006aa1bcdf9766118cc8327dfb3893735dd0f
| 5,147
|
py
|
Python
|
web_journal/service/filesystem.py
|
pete88b/web_journal
|
7a6cd88d13f109836118c2a025a1c69d6172283e
|
[
"Apache-2.0"
] | null | null | null |
web_journal/service/filesystem.py
|
pete88b/web_journal
|
7a6cd88d13f109836118c2a025a1c69d6172283e
|
[
"Apache-2.0"
] | null | null | null |
web_journal/service/filesystem.py
|
pete88b/web_journal
|
7a6cd88d13f109836118c2a025a1c69d6172283e
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: 40b_service_filesystem.ipynb (unless otherwise specified).
__all__ = ['sort_posts', 'posts_list_to_dict', 'migrate', 'ServiceFilesystem', 'before_request', 'after_request',
'init_service']
# Cell
import json,uuid,datetime,re
from pathlib import Path
from operator import attrgetter
# Cell
# Cell
def posts_list_to_dict(posts,key='id'):
"Convert a list of dictionaries to a dictionary of dictionaries"
return {post[key]:post for post in posts}
# Cell
# Cell
# TODO: DRY
# Cell
# Cell
# Cell
| 37.569343
| 113
| 0.631436
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 40b_service_filesystem.ipynb (unless otherwise specified).
__all__ = ['sort_posts', 'posts_list_to_dict', 'migrate', 'ServiceFilesystem', 'before_request', 'after_request',
'init_service']
# Cell
import json,uuid,datetime,re
from pathlib import Path
from operator import attrgetter
# Cell
def sort_posts(posts):
return sorted(posts, key=lambda post: (post['created'],post['last_updated'],post['id']), reverse=True)
# Cell
def posts_list_to_dict(posts,key='id'):
"Convert a list of dictionaries to a dictionary of dictionaries"
return {post[key]:post for post in posts}
# Cell
def migrate(data_dir,output_dir=None):
posts_file_re=re.compile(r'posts-(?:\d{13}).json')
data_dir=Path(data_dir)
output_dir=data_dir if output_dir is None else Path(output_dir)
for f_name in data_dir.iterdir():
if not posts_file_re.fullmatch(f_name.name): continue
with open(f_name) as f: posts = json.load(f)
if not posts: continue
print('migrating',f_name,'to',output_dir)
if not 'status' in posts[0]:
for post in posts:
post['last_updated']=post['created']
post['status']=50 if post['is_deleted']==0 else 20
del post['is_deleted']
for post in posts:
post['id']=str(post['id'])
# subsequent migrations might do something like
# if not 'other_key' in posts[0]: ...
with open(output_dir/f_name.name,'w') as f: json.dump(posts,f)
# Cell
class ServiceFilesystem:
# TODO: DRY
def __init__(self,data_dir):
self.data_dir=Path(data_dir)
self.data_dir.mkdir(parents=True,exist_ok=True)
def read_user_by_id(self,id):
if (self.data_dir/'users.json').is_file():
with open(self.data_dir/'users.json') as f:
for user in json.load(f):
if user['id']==id: return user
return None
def read_user_by_username(self,username):
if (self.data_dir/'users.json').is_file():
with open(self.data_dir/'users.json') as f:
for user in json.load(f):
if user['username']==username: return user
return None
def create_user(self,username,password):
users=[]
if (self.data_dir/'users.json').is_file():
with open(self.data_dir/'users.json') as f: users=json.load(f)
id=uuid.uuid4().hex
users.append(dict(id=id,username=username,password=password))
with open(self.data_dir/'users.json','w') as f: json.dump(users,f)
return id
def _add_username(self,post):
# TODO: check how slow this is ...
user=self.read_user_by_id(post['author_id'])
post['username']='Unknown user' if user is None else user['username']
return post
def _posts(self,author_id):
if (self.data_dir/f'posts-{author_id}.json').is_file():
with open(self.data_dir/f'posts-{author_id}.json') as f:
return json.load(f)
return []
def read_posts_by_author_id(self,author_id):
return [self._add_username(p) for p in self._posts(author_id) if p['status']>30]
def read_post_by_id(self,author_id,id):
for post in self._posts(author_id):
if post['id']==id: return self._add_username(post)
return None
def _now(self):
return datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
def create_post(self,author_id,title,body):
posts=self._posts(author_id)
id,_now=uuid.uuid4().hex,self._now()
posts.insert(0,dict(id=id,author_id=author_id,title=title,body=body,
created=_now,last_updated=_now,status=50))
with open(self.data_dir/f'posts-{author_id}.json','w') as f: json.dump(posts,f)
return id
def update_post_by_id(self,author_id,id,keys,values):
posts=self._posts(author_id)
for post in posts:
if post['id']==id:
for key,value in zip(keys,values): post[key]=value
post['last_updated']=self._now()
with open(self.data_dir/f'posts-{author_id}.json','w') as f: json.dump(posts,f)
return post
return None
def prepare_posts_file_by_author_id(self,author_id):
if (self.data_dir/f'posts-{author_id}.json').is_file():
return self.data_dir,f'posts-{author_id}.json'
return None,None
def upload_posts_from_file(self,author_id,file):
# TODO: handle non-json formats
with open(file) as f:
posts=json.load(f)
for post in posts: post['author_id']=author_id
posts=posts_list_to_dict(posts)
posts.update(posts_list_to_dict(self._posts(author_id)))
posts=sort_posts(posts.values())
with open(self.data_dir/f'posts-{author_id}.json','w') as f: json.dump(posts,f)
# Cell
def before_request(app):
return ServiceFilesystem(app.config['DATA_DIR'])
# Cell
def after_request(app,service):
pass
# Cell
def init_service(app):
print('service.filesystem.init_service')
| 4,097
| 3
| 482
|
cf4b895823276a02926235217abb1d4e591964cc
| 1,468
|
py
|
Python
|
core_dev/_collections/graph/graph.py
|
alexzanderr/_core-dev
|
831f69dad524e450c4243b1dd88f26de80e1d444
|
[
"MIT"
] | null | null | null |
core_dev/_collections/graph/graph.py
|
alexzanderr/_core-dev
|
831f69dad524e450c4243b1dd88f26de80e1d444
|
[
"MIT"
] | null | null | null |
core_dev/_collections/graph/graph.py
|
alexzanderr/_core-dev
|
831f69dad524e450c4243b1dd88f26de80e1d444
|
[
"MIT"
] | null | null | null |
"""
graph.py
useful graph collection
author: @alexzander
"""
# python
import os
# core package
from core.system import *
from core.json__ import *
from exceptions import *
from core.path__ import *
# from exceptions import * (same thing, it works)
| 27.185185
| 97
| 0.583787
|
"""
graph.py
useful graph collection
author: @alexzander
"""
# python
import os
# core package
from core.system import *
from core.json__ import *
from exceptions import *
from core.path__ import *
# from exceptions import * (same thing, it works)
class Graph:
def __init__(self, edges_json: list):
self.nodes = []
self.graph_adj_dict = {}
self.edges = list(map(tuple, edges_json))
for edge in edges_json:
if edge[0] in self.graph_adj_dict:
self.graph_adj_dict[edge[0]].append(edge[1])
else:
self.graph_adj_dict[edge[0]] = [edge[1]]
self.nodes.append(edge[0])
def degree(self, node: int):
for __node, adj_nodes in self.graph_adj_dict.items():
if __node == node:
return len(adj_nodes)
raise VertexNotFoundError
def get_adj_list_represenation(self):
graph_representation = ""
for index, vertex in enumerate(self.graph_adj_dict.keys()):
if index == len(self.graph_adj_dict.keys()) - 1:
graph_representation += "{} -> {}".format(vertex, self.graph_adj_dict[vertex])
else:
graph_representation += "{} -> {}\n".format(vertex, self.graph_adj_dict[vertex])
return graph_representation
def __str__(self):
return self.get_adj_list_represenation()
| 1,047
| -9
| 138
|
1e33a26347f208b9cd6f46e08a1083723e916c43
| 1,262
|
py
|
Python
|
tools/lib/formats/default/lineHandler.py
|
pfloos/QUESTDB_website
|
720fb41c42f50e3614cf406fa6cade594f9dd526
|
[
"BSD-3-Clause"
] | 2
|
2020-10-29T19:41:52.000Z
|
2021-08-12T04:28:33.000Z
|
tools/lib/formats/default/lineHandler.py
|
rdguerrerom/QUESTDB_website
|
bebcfdd9596ca90f9c1ca210a68569b767fdfbce
|
[
"BSD-3-Clause"
] | 1
|
2020-11-20T10:06:44.000Z
|
2020-11-20T10:06:44.000Z
|
tools/lib/formats/default/lineHandler.py
|
rdguerrerom/QUESTDB_website
|
bebcfdd9596ca90f9c1ca210a68569b767fdfbce
|
[
"BSD-3-Clause"
] | 2
|
2020-11-16T14:46:02.000Z
|
2020-11-24T15:56:47.000Z
|
from ..formatHandlerBase import formatHandlerBase
from ..formatName import formatName
from ...data import dataFileBase,DataType,method,excitationValue,datafileSelector
from ...utils import getValFromCell
import numpy as np
@formatName("line")
| 38.242424
| 119
| 0.666403
|
from ..formatHandlerBase import formatHandlerBase
from ..formatName import formatName
from ...data import dataFileBase,DataType,method,excitationValue,datafileSelector
from ...utils import getValFromCell
import numpy as np
@formatName("line")
class lineHandler(formatHandlerBase):
def readFromTable(self,table):
datalist=list()
for col in range(1,np.size(table,1)):
col=table[:,col]
mymolecule=str(col[0])
mymethod=method(str(col[2]),str(col[1]))
initialState=self.TexOps.initialStates[mymolecule]
finsts=dataFileBase.convertState(table[3:,0],initialState,default=self.TexOps.defaultType,commands=self.Commands)
datacls=dict()
for index,cell in enumerate(col[3:]):
if str(cell)!="":
val,unsafe=getValFromCell(cell)
finst=finsts[index]
dt=finst[1]
if dt in datacls:
data=datacls[dt]
else:
cl=datafileSelector(dt)
data=cl()
datacls[dt]=data
data.molecule=mymolecule
data.method=mymethod
data.excitations.append(excitationValue(initialState,finst[0],val,type=finst[2],isUnsafe=unsafe))
for value in datacls.values():
datalist.append(value)
return datalist
| 958
| 16
| 46
|
72b828a699b17bd111dc3d200cb7028c6164204b
| 1,356
|
py
|
Python
|
setup.py
|
karstenw/FMPasteBox
|
c84aa860401051ed369d8559afa83f572a2bd729
|
[
"BSD-2-Clause"
] | 2
|
2021-07-14T10:07:13.000Z
|
2021-11-14T17:59:18.000Z
|
setup.py
|
karstenw/FMPasteBox
|
c84aa860401051ed369d8559afa83f572a2bd729
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
karstenw/FMPasteBox
|
c84aa860401051ed369d8559afa83f572a2bd729
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Script for building FMPasteBox
Usage:
python setup.py py2app
"""
from distutils.core import setup
from setuptools.extension import Extension
import py2app
import FMPasteBoxVersion
setup(
name = FMPasteBoxVersion.appname,
version = FMPasteBoxVersion.version,
description = FMPasteBoxVersion.description,
long_description = FMPasteBoxVersion.longdescription,
author = FMPasteBoxVersion.author,
app=[{
'script': "FMPasteBox.py",
"plist": {
"NSPrincipalClass": 'NSApplication',
"CFBundleIdentifier": FMPasteBoxVersion.bundleID,
"CFBundleName": FMPasteBoxVersion.appnameshort,
"CFBundleSignature": FMPasteBoxVersion.creator,
"CFBundleShortVersionString": FMPasteBoxVersion.version,
"CFBundleGetInfoString": FMPasteBoxVersion.description,
"NSHumanReadableCopyright": FMPasteBoxVersion.copyright,
}
}],
data_files=[
"English.lproj/MainMenu.nib",
"English.lproj/Preferences.nib",
#"English.lproj/FMPasteBoxDocument.nib",
"+icon/FMPasteBox.icns",
#"+icon/FMPasteBoxFile.icns",
],
options={
"py2app": {
"iconfile": "+icon/FMPasteBox.icns",
# "packages": [],
"excludes": ["TkInter", 'Tcl', 'Tk'],
}
} )
| 27.12
| 68
| 0.634218
|
"""
Script for building FMPasteBox
Usage:
python setup.py py2app
"""
from distutils.core import setup
from setuptools.extension import Extension
import py2app
import FMPasteBoxVersion
setup(
name = FMPasteBoxVersion.appname,
version = FMPasteBoxVersion.version,
description = FMPasteBoxVersion.description,
long_description = FMPasteBoxVersion.longdescription,
author = FMPasteBoxVersion.author,
app=[{
'script': "FMPasteBox.py",
"plist": {
"NSPrincipalClass": 'NSApplication',
"CFBundleIdentifier": FMPasteBoxVersion.bundleID,
"CFBundleName": FMPasteBoxVersion.appnameshort,
"CFBundleSignature": FMPasteBoxVersion.creator,
"CFBundleShortVersionString": FMPasteBoxVersion.version,
"CFBundleGetInfoString": FMPasteBoxVersion.description,
"NSHumanReadableCopyright": FMPasteBoxVersion.copyright,
}
}],
data_files=[
"English.lproj/MainMenu.nib",
"English.lproj/Preferences.nib",
#"English.lproj/FMPasteBoxDocument.nib",
"+icon/FMPasteBox.icns",
#"+icon/FMPasteBoxFile.icns",
],
options={
"py2app": {
"iconfile": "+icon/FMPasteBox.icns",
# "packages": [],
"excludes": ["TkInter", 'Tcl', 'Tk'],
}
} )
| 0
| 0
| 0
|
0c0e45b33a5297c3f61ad03da499611f67da31e8
| 9,755
|
py
|
Python
|
src/visualization/visualize.py
|
siebeniris/superresolution
|
2eec93029c1332720ba17d5747ec9aee19bc0c63
|
[
"MIT"
] | null | null | null |
src/visualization/visualize.py
|
siebeniris/superresolution
|
2eec93029c1332720ba17d5747ec9aee19bc0c63
|
[
"MIT"
] | null | null | null |
src/visualization/visualize.py
|
siebeniris/superresolution
|
2eec93029c1332720ba17d5747ec9aee19bc0c63
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import argparse
import os
import sys, glob, itertools, math
from typing import List, Dict
from PIL import Image, ImageFont, ImageDraw
import torch
from ..srcnn.srcnn_run import SRCNNPreProcessor, SRCNNPreProcessorGen
from src.srgan.srgan_module import Generator as SRGANGenerator
from ..srcnn.srcnn_module import *
supportedModels = {
"srcnn": SRCNN,
"srcnn-bnorm": SRCNNBatchNorm,
"srcnn-residual": SRCNNR,
"srgan": lambda: SRGANGenerator(scale_factor=2),
}
class PytorchSRVisulizator:
"""
Visualizes pytorch nn.models:
* model files should contain only the state dict, because the Model classes can't be pickled at runtime.
* model dir must follow this hierarch:
modeldir
| model.pth #final model file
| checkpoints
| model_epoch_{}.pth # '{}' is replaced with epoch number
"""
@staticmethod
if __name__ == "__main__":
parser = argparse.ArgumentParser("SRCNN visualizer")
PytorchSRVisulizator.add_arguments_to(parser)
config = parser.parse_args()
print(config)
visualizer = PytorchSRVisulizator(config)
visualizer.visualize()
| 32.088816
| 108
| 0.526602
|
from __future__ import print_function
import argparse
import os
import sys, glob, itertools, math
from typing import List, Dict
from PIL import Image, ImageFont, ImageDraw
import torch
from ..srcnn.srcnn_run import SRCNNPreProcessor, SRCNNPreProcessorGen
from src.srgan.srgan_module import Generator as SRGANGenerator
from ..srcnn.srcnn_module import *
supportedModels = {
"srcnn": SRCNN,
"srcnn-bnorm": SRCNNBatchNorm,
"srcnn-residual": SRCNNR,
"srgan": lambda: SRGANGenerator(scale_factor=2),
}
class PytorchSRVisulizator:
"""
Visualizes pytorch nn.models:
* model files should contain only the state dict, because the Model classes can't be pickled at runtime.
* model dir must follow this hierarch:
modeldir
| model.pth #final model file
| checkpoints
| model_epoch_{}.pth # '{}' is replaced with epoch number
"""
def __init__(self, config):
self.config = config
@staticmethod
def add_arguments_to(parser: argparse.ArgumentParser):
parser.add_argument(
"-i",
"--input",
nargs="?",
type=argparse.FileType("r"),
default=sys.stdin,
help="List of input images, will be read by default from stdin.",
)
parser.add_argument(
"-t",
"--model_type",
required=True,
default="srcnn",
choices=supportedModels.keys(),
)
parser.add_argument(
"-m", "--model_dir", type=str, required=True, help="Model directory to use"
)
parser.add_argument(
"-s",
"--scale_factor",
type=int,
default=2.0,
help="Factor by which super resolution is needed",
)
parser.add_argument(
"-l",
"--inc_lowres",
type=bool,
const=True,
nargs="?",
default=False,
help="Whether lowres images should be included.",
)
parser.add_argument(
"-o",
"--inc_highres",
type=bool,
const=True,
nargs="?",
default=False,
help="Whether highres images should be included.",
)
parser.add_argument(
"--merge_to",
type=str,
default=None,
help="Merges all generated images to specified filename.",
)
parser.add_argument(
"-c",
"--columns",
type=int,
default=None,
help="Generated images per line. Defaults to one line per input image.",
)
parser.add_argument(
"--checkpoints",
type=str,
nargs="?",
const="::",
default=None,
help="Slice used checkpoints using start:stop:step python syntax.",
)
parser.add_argument(
"-a",
"--annotate",
type=bool,
const=True,
nargs="?",
default=False,
help="Whether images should be labeled.",
)
parser.add_argument(
"-out", "--outdir", type=str, help="output directory of the images"
)
parser.add_argument(
'-no', '--no_downscale', nargs='?', const=True, default=False,
help='Whether downscaling preprocessing should not be applied.'
)
return parser
def model_final(self, model_dir):
return model_dir + "model.pth"
def model_checkpoints(self, model_dir):
return (glob.glob(model_dir + "checkpoints/*.pth"),)
def epoch_from_model_checkpoint_name(self):
return lambda x: int(
os.path.basename(x).replace("model_epoch_", "").replace(".pth", "")
)
def visualize(self):
images_dict = {
line.strip(): [Image.open(line.strip()).convert('RGB')] for line in self.config.input
}
if self.config.checkpoints:
models = sorted(
glob.glob(self.config.model_dir + "checkpoints/*.pth"),
key=self.epoch_from_model_checkpoint_name(),
)
sliced = slice(
*[
None if x is "" else int(x)
for x in self.config.checkpoints.split(":")
]
)
models = models[sliced]
else:
models = [self.model_final(self.config.model_dir)]
model_constr = supportedModels[config.model_type]
print("Using models with {!s}:".format(model_constr()))
for m in models:
print("\t" + m)
# we assume images have all the same size and need to be downscaled first
target_size = list(images_dict.values())[0][0].size[0]
if config.no_downscale:
target_size = int(target_size*config.scale_factor)
print("Scaling images to size {!s}:".format(target_size))
print("\n".join(["\t" + img for img in images_dict.keys()]))
if self.config.inc_lowres:
proc = SRCNNPreProcessorGen(
model_constr(),
models[0],
self.config.scale_factor,
not config.no_downscale,
use_gpu=torch.cuda.is_available(),
target_size=target_size,
)
for key, images in images_dict.items():
low_image = proc.lowres(images[0])
images_dict[key].append((low_image))
for model in models:
proc = SRCNNPreProcessorGen(
model_constr(),
model,
self.config.scale_factor,
not config.no_downscale,
use_gpu=torch.cuda.is_available(),
target_size=target_size,
srgan=config.model_type == 'srgan'
)
for key, images in images_dict.items():
result_image = proc.apply(images[0])
images_dict[key].append(result_image)
if not self.config.inc_highres:
images_dict = {key: value[1:] for key, value in images_dict.items()}
def combine_to_single_image(images, labels: List[str] = []) -> Image:
if config.columns:
columns = config.columns
else:
columns = len(models) + len(
list(
filter(
lambda x: x,
[self.config.inc_lowres, self.config.inc_highres],
)
)
)
rows = math.ceil(len(images) / float(columns))
height, width = rows * target_size, columns * target_size
result = Image.new("RGB", (width, height))
font = ImageFont.truetype("reports/B612Mono-Regular.ttf", 32)
draw = ImageDraw.Draw(result)
image_slots = [
(w, h)
for h, w in itertools.product(
range(0, target_size * rows, target_size),
range(0, target_size * columns, target_size),
)
][
: len(images)
] # take only n slots
for image, (w_index, h_index), label in itertools.zip_longest(
images, image_slots, labels
):
print(*image.size)
result.paste(image, (w_index, h_index))
if label:
draw.text((w_index, h_index), label, (255, 0, 0), font=font)
return result
def gen_labels(file, images):
labels = list(len(images) * [""])
labels[0] += "\n".join(os.path.basename(file).split("-")) + "\n"
index = 0
if config.inc_highres:
labels[index] += "high_res "
index += 1
if config.inc_lowres:
labels[index] += "low_res "
index += 1
if config.checkpoints:
for i, model in enumerate(models, index):
labels[i] += "e" + str(
self.epoch_from_model_checkpoint_name()(model)
)
return labels
if not self.config.outdir:
out_dir = self.config.model_dir + "results/"
else:
out_dir = self.config.outdir
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if self.config.merge_to is not None:
fin_images = []
fin_labels = []
for file, images in images_dict.items():
fin_images.extend(images)
if self.config.annotate:
fin_labels.extend(gen_labels(file, images))
result_image = combine_to_single_image(fin_images, labels=fin_labels)
result_image.save(out_dir + config.merge_to)
print("Saved image to '{}'.".format(out_dir + config.merge_to))
else:
for file, images in images_dict.items():
out_file = out_dir + os.path.basename(file)
combine_params = [images]
if config.annotate:
combine_params.append(gen_labels(file, images))
result_image = combine_to_single_image(*combine_params)
result_image.save(out_file)
print("Saved images to '{}'.".format(out_dir))
if __name__ == "__main__":
parser = argparse.ArgumentParser("SRCNN visualizer")
PytorchSRVisulizator.add_arguments_to(parser)
config = parser.parse_args()
print(config)
visualizer = PytorchSRVisulizator(config)
visualizer.visualize()
| 8,426
| 0
| 161
|
bb3a71e89f46206d090a6a89fb38b7d1c5ab5c34
| 4,883
|
py
|
Python
|
tests/test_store2hdf5.py
|
Matioz/humblerl
|
8000cf60f2baa8b80927275c15147b6de199d75a
|
[
"MIT"
] | null | null | null |
tests/test_store2hdf5.py
|
Matioz/humblerl
|
8000cf60f2baa8b80927275c15147b6de199d75a
|
[
"MIT"
] | null | null | null |
tests/test_store2hdf5.py
|
Matioz/humblerl
|
8000cf60f2baa8b80927275c15147b6de199d75a
|
[
"MIT"
] | null | null | null |
import h5py
import numpy as np
from .. import Transition
from ..callbacks import StoreStates2Hdf5
HDF5_PATH = "/tmp/test_humblerl_callback.hdf5"
class TestStoreTransitions2Hdf5(object):
"""Test callback on 3D (e.g. images) and continuous states."""
| 40.02459
| 94
| 0.584067
|
import h5py
import numpy as np
from .. import Transition
from ..callbacks import StoreStates2Hdf5
HDF5_PATH = "/tmp/test_humblerl_callback.hdf5"
class TestStoreTransitions2Hdf5(object):
"""Test callback on 3D (e.g. images) and continuous states."""
def test_images_states(self):
ACTION_SPACE = np.array([1, 2, 3])
STATE_SPACE = np.zeros((8, 8, 3, 2))
STATE_SPACE[:] = np.array([0, 255])
STATE_SPACE_SHAPE = STATE_SPACE.shape[:-1]
MIN_TRANSITIONS = 96
CHUNK_SIZE = 48
N_TRANSITIONS = 1024
callback = StoreStates2Hdf5(STATE_SPACE_SHAPE, HDF5_PATH,
shuffle=False, min_transitions=MIN_TRANSITIONS,
chunk_size=CHUNK_SIZE, dtype=np.uint8)
transitions = []
for idx in range(N_TRANSITIONS):
transition = Transition(
state=np.random.randint(0, 256, size=(8, 8, 3)),
action=np.random.choice(ACTION_SPACE),
reward=np.random.normal(0, 1),
next_state=np.random.randint(0, 256, size=(8, 8, 3)),
is_terminal=(idx + 1) % 16 == 0
)
transitions.append(transition)
callback.on_step_taken(idx, transition, None)
callback.on_loop_end(False)
h5py_file = h5py.File(HDF5_PATH, "r")
assert h5py_file.attrs["N_TRANSITIONS"] == N_TRANSITIONS
assert h5py_file.attrs["N_GAMES"] == N_TRANSITIONS // 16
assert h5py_file.attrs["CHUNK_SIZE"] == CHUNK_SIZE
assert np.all(h5py_file.attrs["STATE_SHAPE"] == STATE_SPACE_SHAPE)
for idx, transition in enumerate(transitions):
assert np.all(h5py_file['states'][idx] == transition.state)
def test_continous_states(self):
ACTION_SPACE = np.array([1, 2, 3])
STATE_SPACE = np.zeros((4, 2))
STATE_SPACE[:] = np.array([-1, 1])
STATE_SPACE_SHAPE = STATE_SPACE.shape[:-1]
MIN_TRANSITIONS = 96
CHUNK_SIZE = 48
N_TRANSITIONS = 1024
callback = StoreStates2Hdf5(STATE_SPACE_SHAPE, HDF5_PATH,
shuffle=False, min_transitions=MIN_TRANSITIONS,
chunk_size=CHUNK_SIZE, dtype=np.float)
transitions = []
for idx in range(N_TRANSITIONS):
transition = Transition(
state=np.random.uniform(STATE_SPACE.T[0], STATE_SPACE.T[1]),
action=np.random.choice(ACTION_SPACE),
reward=np.random.normal(0, 1),
next_state=np.random.uniform(STATE_SPACE.T[0], STATE_SPACE.T[1]),
is_terminal=(idx + 1) % 16 == 0
)
transitions.append(transition)
callback.on_step_taken(idx, transition, None)
callback.on_loop_end(False)
h5py_file = h5py.File(HDF5_PATH, "r")
assert h5py_file.attrs["N_TRANSITIONS"] == N_TRANSITIONS
assert h5py_file.attrs["N_GAMES"] == N_TRANSITIONS // 16
assert h5py_file.attrs["CHUNK_SIZE"] == CHUNK_SIZE
assert np.all(h5py_file.attrs["STATE_SHAPE"] == STATE_SPACE_SHAPE)
for idx, transition in enumerate(transitions):
assert np.all(h5py_file['states'][idx] == transition.state)
def test_shuffle_chunks(self):
ACTION_SPACE = np.array([1, 2, 3])
STATE_SPACE = np.zeros((4, 2))
STATE_SPACE[:] = np.array([-1, 1])
STATE_SPACE_SHAPE = STATE_SPACE.shape[:-1]
MIN_TRANSITIONS = 48
CHUNK_SIZE = 48
N_TRANSITIONS = 48
callback = StoreStates2Hdf5(STATE_SPACE_SHAPE, HDF5_PATH,
shuffle=True, min_transitions=MIN_TRANSITIONS,
chunk_size=CHUNK_SIZE, dtype=np.float)
states = []
next_states = []
transitions = []
for idx in range(N_TRANSITIONS):
states.append(np.random.uniform(STATE_SPACE.T[0], STATE_SPACE.T[1]).tolist())
next_states.append(np.random.uniform(STATE_SPACE.T[0], STATE_SPACE.T[1]).tolist())
transitions.append((np.random.choice(ACTION_SPACE), np.random.normal(0, 1), 0))
callback.on_step_taken(idx, Transition(
state=states[-1],
action=transitions[-1][0],
reward=transitions[-1][1],
next_state=next_states[-1],
is_terminal=transitions[-1][2]
), None)
in_order = True
h5py_file = h5py.File(HDF5_PATH, "r")
for idx in range(N_TRANSITIONS):
state = h5py_file['states'][idx]
idx_target = states.index(state.tolist())
if idx != idx_target:
in_order = False
assert np.all(h5py_file['states'][idx] == states[idx_target])
assert not in_order, "Data isn't shuffled!"
| 4,545
| 0
| 81
|
c84164ddec73f5647fd324062b3b890f6b6bfb9b
| 3,362
|
py
|
Python
|
tests/test_accounts.py
|
zcking/oink
|
95f07567ecda214172c29684d0ee5f67da5b99cf
|
[
"MIT"
] | 3
|
2017-10-15T21:24:14.000Z
|
2021-06-02T21:15:28.000Z
|
tests/test_accounts.py
|
zach-king/oink
|
95f07567ecda214172c29684d0ee5f67da5b99cf
|
[
"MIT"
] | 1
|
2018-06-26T03:48:55.000Z
|
2018-06-26T03:48:55.000Z
|
tests/test_accounts.py
|
zach-king/oink
|
95f07567ecda214172c29684d0ee5f67da5b99cf
|
[
"MIT"
] | null | null | null |
'''
File: test_accounts.py
Author: Zachary King
Defines unit tests for accounts.py.
Tests all account features.
'''
from __future__ import print_function
import unittest
import os
from oink import accounts, db
class TestAccounts(unittest.TestCase):
'''Defines unit tests for adding and removing accounts.'''
def setUp(self):
'''Setup testing databse.'''
# Creates the testing database
os.mkdir('testdb')
with open(r'testdb\oink.db', 'w') as fout:
pass
db.connect('testdb')
accounts.setup()
def tearDown(self):
'''Destroys the testing database.'''
db.disconnect()
os.remove(r'testdb\oink.db')
os.rmdir('testdb')
def test_add_new_account(self):
'''Test for adding a new account.'''
cur = db.cursor()
# Add a new account
accounts.add_account(12345, 'TestAddAccount', 100.00, '2017-1-1')
self.assertEqual(cur.execute('SELECT COUNT(*) FROM accounts').fetchone()[0], 1)
accounts.add_account(54321, 'TestAddAccount2', 0.01, '2017-1-1')
self.assertEqual(cur.execute('SELECT COUNT(*) FROM accounts').fetchone()[0], 2)
def test_remove_account(self):
'''Test to remove an existing account.'''
cur = db.cursor()
# Insert a new account
cur.execute('INSERT INTO accounts VALUES (024, "TestRemoveAccount", 0.00, "2017-1-1")')
self.assertEqual(cur.rowcount, 1)
# Remove the accont
accounts.delete('TestRemoveAccount')
cur.execute('SELECT COUNT(*) FROM accounts')
self.assertEqual(cur.fetchone()[0], 0)
def test_add_null_account_number(self):
'''Tests NOT NULL constraint of database for account number'''
# Try to insert NULL as account number
with self.assertRaises(ValueError):
accounts.add_account(None, 'TestNullNumAccount', 0.0, '2017-1-1')
with self.assertRaises(ValueError):
accounts.add_account('', 'TestNullNumAccount', 0.0, '2017-1-1')
def test_add_null_account_name(self):
'''Tests NOT NULL constraint of database for account name'''
# Try to insert NULL as account name
with self.assertRaises(ValueError):
accounts.add_account(987, None, 0.0, '2017-1-1')
with self.assertRaises(ValueError):
accounts.add_account(789, '', 0.0, '2017-1-1')
def test_add_null_start_balance(self):
'''Tests NOT NULL constraint of databse for account starting balance'''
# Try to insert NULL as account starting balance
with self.assertRaises(ValueError):
accounts.add_account(111, 'TestNullStartBalanceAccount', None, '2017-1-1')
def test_add_negative_start_balance(self):
'''Tests inserting a negative starting balace for a new account'''
with self.assertRaises(ValueError):
accounts.add_account(222, 'TestNegativeStartingBalance', -100.0, '2017-1-1')
def test_add_null_created_date(self):
'''Tests NOT NULL constraint for account created_on'''
with self.assertRaises(ValueError):
accounts.add_account(333, 'TestNullCreatedOn', 0.0, '')
with self.assertRaises(ValueError):
accounts.add_account(333, 'TestNullCreatedOn', 0.0, None)
if __name__ == '__main__':
unittest.main()
| 33.959596
| 95
| 0.646936
|
'''
File: test_accounts.py
Author: Zachary King
Defines unit tests for accounts.py.
Tests all account features.
'''
from __future__ import print_function
import unittest
import os
from oink import accounts, db
class TestAccounts(unittest.TestCase):
'''Defines unit tests for adding and removing accounts.'''
def setUp(self):
'''Setup testing databse.'''
# Creates the testing database
os.mkdir('testdb')
with open(r'testdb\oink.db', 'w') as fout:
pass
db.connect('testdb')
accounts.setup()
def tearDown(self):
'''Destroys the testing database.'''
db.disconnect()
os.remove(r'testdb\oink.db')
os.rmdir('testdb')
def test_add_new_account(self):
'''Test for adding a new account.'''
cur = db.cursor()
# Add a new account
accounts.add_account(12345, 'TestAddAccount', 100.00, '2017-1-1')
self.assertEqual(cur.execute('SELECT COUNT(*) FROM accounts').fetchone()[0], 1)
accounts.add_account(54321, 'TestAddAccount2', 0.01, '2017-1-1')
self.assertEqual(cur.execute('SELECT COUNT(*) FROM accounts').fetchone()[0], 2)
def test_remove_account(self):
'''Test to remove an existing account.'''
cur = db.cursor()
# Insert a new account
cur.execute('INSERT INTO accounts VALUES (024, "TestRemoveAccount", 0.00, "2017-1-1")')
self.assertEqual(cur.rowcount, 1)
# Remove the accont
accounts.delete('TestRemoveAccount')
cur.execute('SELECT COUNT(*) FROM accounts')
self.assertEqual(cur.fetchone()[0], 0)
def test_add_null_account_number(self):
'''Tests NOT NULL constraint of database for account number'''
# Try to insert NULL as account number
with self.assertRaises(ValueError):
accounts.add_account(None, 'TestNullNumAccount', 0.0, '2017-1-1')
with self.assertRaises(ValueError):
accounts.add_account('', 'TestNullNumAccount', 0.0, '2017-1-1')
def test_add_null_account_name(self):
'''Tests NOT NULL constraint of database for account name'''
# Try to insert NULL as account name
with self.assertRaises(ValueError):
accounts.add_account(987, None, 0.0, '2017-1-1')
with self.assertRaises(ValueError):
accounts.add_account(789, '', 0.0, '2017-1-1')
def test_add_null_start_balance(self):
'''Tests NOT NULL constraint of databse for account starting balance'''
# Try to insert NULL as account starting balance
with self.assertRaises(ValueError):
accounts.add_account(111, 'TestNullStartBalanceAccount', None, '2017-1-1')
def test_add_negative_start_balance(self):
'''Tests inserting a negative starting balace for a new account'''
with self.assertRaises(ValueError):
accounts.add_account(222, 'TestNegativeStartingBalance', -100.0, '2017-1-1')
def test_add_null_created_date(self):
'''Tests NOT NULL constraint for account created_on'''
with self.assertRaises(ValueError):
accounts.add_account(333, 'TestNullCreatedOn', 0.0, '')
with self.assertRaises(ValueError):
accounts.add_account(333, 'TestNullCreatedOn', 0.0, None)
if __name__ == '__main__':
unittest.main()
| 0
| 0
| 0
|
8c4c0dc6efacc5b204fcaf8a7dd7a84ae0e43270
| 466
|
py
|
Python
|
rpna/core/migrations/0007_profile_change_default.py
|
code-for-good-wm/rooseveltparkna-alerts
|
bb22150141dd9448004a04df66d9ce0f353a3c10
|
[
"MIT"
] | 1
|
2021-11-07T19:00:32.000Z
|
2021-11-07T19:00:32.000Z
|
rpna/core/migrations/0007_profile_change_default.py
|
code-for-good-wm/rooseveltparkna-alerts
|
bb22150141dd9448004a04df66d9ce0f353a3c10
|
[
"MIT"
] | null | null | null |
rpna/core/migrations/0007_profile_change_default.py
|
code-for-good-wm/rooseveltparkna-alerts
|
bb22150141dd9448004a04df66d9ce0f353a3c10
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-11-07 02:05
from django.db import migrations, models
| 22.190476
| 65
| 0.590129
|
# Generated by Django 3.2.9 on 2021-11-07 02:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0006_profile_fix_nulls"),
]
operations = [
migrations.AlterField(
model_name="profile",
name="neighborhood_updates",
field=models.BooleanField(
default=True, verbose_name="Neighborhood Updates"
),
),
]
| 0
| 352
| 23
|
c66c4b547d80fddf7c83780bd317fd0de4bbb554
| 1,174
|
py
|
Python
|
siptrackweb/views/device/utils.py
|
lalusvipi/siptrackweb
|
e4d2882595a40d3dbb1bb74c1838937988726d8e
|
[
"BSD-2-Clause"
] | 38
|
2015-03-18T08:05:35.000Z
|
2021-11-09T10:57:54.000Z
|
siptrackweb/views/device/utils.py
|
lalusvipi/siptrackweb
|
e4d2882595a40d3dbb1bb74c1838937988726d8e
|
[
"BSD-2-Clause"
] | 29
|
2015-06-25T11:28:07.000Z
|
2019-11-30T21:15:30.000Z
|
siptrackweb/views/device/utils.py
|
lalusvipi/siptrackweb
|
e4d2882595a40d3dbb1bb74c1838937988726d8e
|
[
"BSD-2-Clause"
] | 13
|
2015-03-18T06:57:46.000Z
|
2021-06-22T10:38:49.000Z
|
from siptrackweb.views import helpers
| 46.96
| 116
| 0.539182
|
from siptrackweb.views import helpers
def make_device_association_list(device):
ret = []
for assoc in device.listAssociations(include = ['device', 'device category']):
if assoc.class_name == 'device category':
path = [{'path': '/display/%s/' % assoc.oid, 'name': assoc.attributes.get('name', '[device category]')}]
else:
path = helpers.make_browsable_path(assoc,
['device category', 'device tree'],
include_root = False)
ent = {'obj': assoc, 'path': path, 'type': 'association'}
ret.append(ent)
for ref in device.listReferences(include = ['device', 'device category']):
if ref.class_name == 'device category':
path = [{'path': '/display/%s/' % ref.oid, 'name': ref.attributes.get('name', '[device category]')}]
else:
path = helpers.make_browsable_path(ref,
['device category', 'device tree'],
include_root = False)
ent = {'obj': ref, 'path': path, 'type': 'reference'}
ret.append(ent)
ret.sort()
return ret
| 1,113
| 0
| 23
|
3f1716b49e21337ee8e79019daeb46f75cdd36f3
| 1,316
|
py
|
Python
|
setup.py
|
WaltWh/w2n
|
be5157949e6d0e07d13084ae3dcee0267c6f1711
|
[
"MIT"
] | null | null | null |
setup.py
|
WaltWh/w2n
|
be5157949e6d0e07d13084ae3dcee0267c6f1711
|
[
"MIT"
] | null | null | null |
setup.py
|
WaltWh/w2n
|
be5157949e6d0e07d13084ae3dcee0267c6f1711
|
[
"MIT"
] | null | null | null |
import re
from setuptools import setup
version = ""
with open("w2n/__init__.py") as f:
search = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE)
if search is not None:
version = search.group(1)
else:
raise RuntimeError("Could not grab version string")
if not version:
raise RuntimeError("version is not set")
with open("README.rst") as f:
readme = f.read()
setup(
name='longsphinx-word2number',
packages=['w2n'],
version=version,
license='MIT',
description='Convert number words eg. three hundred and forty two to numbers (342). '
'Forked for https://github.com/WaltWh/LongSphinx, not intended for public use.',
author='Akshay Nagpal & Walt Whiteside',
author_email='tinman@mage.city',
url='https://github.com/WaltWh/w2n', # use the URL to the GitHub repo
download_url='https://github.com/WaltWh/w2n/tarball/1.2',
keywords=['numbers', 'convert', 'words'], # arbitrary keywords
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.10'
],
long_description=readme,
long_description_content_type="text/x-rst",
python_requires=">=3.10.0", # Probably not, but I haven't tested anything else
test_suite="unit_testing"
)
| 31.333333
| 96
| 0.651216
|
import re
from setuptools import setup
version = ""
with open("w2n/__init__.py") as f:
search = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE)
if search is not None:
version = search.group(1)
else:
raise RuntimeError("Could not grab version string")
if not version:
raise RuntimeError("version is not set")
with open("README.rst") as f:
readme = f.read()
setup(
name='longsphinx-word2number',
packages=['w2n'],
version=version,
license='MIT',
description='Convert number words eg. three hundred and forty two to numbers (342). '
'Forked for https://github.com/WaltWh/LongSphinx, not intended for public use.',
author='Akshay Nagpal & Walt Whiteside',
author_email='tinman@mage.city',
url='https://github.com/WaltWh/w2n', # use the URL to the GitHub repo
download_url='https://github.com/WaltWh/w2n/tarball/1.2',
keywords=['numbers', 'convert', 'words'], # arbitrary keywords
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.10'
],
long_description=readme,
long_description_content_type="text/x-rst",
python_requires=">=3.10.0", # Probably not, but I haven't tested anything else
test_suite="unit_testing"
)
| 0
| 0
| 0
|
e0f36937cc4182603ec4de89ec2b3cd9d5f9e53c
| 5,995
|
py
|
Python
|
regression/train.py
|
Mariappan/examples
|
2c75018b743a26b6495ddba70100e557aeda11b8
|
[
"Apache-2.0"
] | null | null | null |
regression/train.py
|
Mariappan/examples
|
2c75018b743a26b6495ddba70100e557aeda11b8
|
[
"Apache-2.0"
] | null | null | null |
regression/train.py
|
Mariappan/examples
|
2c75018b743a26b6495ddba70100e557aeda11b8
|
[
"Apache-2.0"
] | null | null | null |
import joblib
import logging
from typing import Tuple
from catboost import CatBoostRegressor
from environs import Env
import numpy as np
import pandas as pd
from sklearn import metrics
from bedrock_client.bedrock.analyzer import ModelTask, ModelTypes
from bedrock_client.bedrock.analyzer.model_analyzer import ModelAnalyzer
from bedrock_client.bedrock.api import BedrockApi
from bedrock_client.bedrock.metrics.collector import (
BaselineMetricCollector,
FeatureHistogramCollector,
InferenceHistogramCollector
)
from bedrock_client.bedrock.metrics.encoder import MetricEncoder
env = Env()
OUTPUT_MODEL_PATH = env("OUTPUT_MODEL_PATH")
TRAIN_DATA_PATH = env("TRAIN_DATA_PATH")
TEST_DATA_PATH = env("TEST_DATA_PATH")
CONFIG_FAI = {
"large_rings": {
"group_a": [1],
"group_a_name": "Large",
"group_b": [0],
"group_b_name": "Small"
}
}
def load_dataset(filepath: str,
target: str) -> Tuple[pd.core.frame.DataFrame,
np.ndarray]:
"""
Loads the dataset and returns the features as a pandas dataframe and
the target variable as a numpy array.
:param filepath: Path to load the data
:type filepath: str
:param target: Target variable
:type target: str
:return: The features pandas dataframe and the target numpy array
:rtype: tuple[pandas.core.frame.DataFrame, numpy.ndarray]
"""
df = pd.read_csv(filepath).drop('Type', axis=1) # Removes 'Type' column
df['large_rings'] = (df['Rings'] > 10).astype(int)
# Ensure nothing missing
original_len = len(df)
df.dropna(how="any", axis=0, inplace=True)
num_rows_dropped = original_len - len(df)
if num_rows_dropped > 0:
print(f"Warning - dropped {num_rows_dropped} rows with NA data.")
y = df[target].values
df.drop(target, axis=1, inplace=True)
return df, y
def train_catboost_model(X: pd.core.frame.DataFrame,
y: np.ndarray,
verbose: bool = False) -> CatBoostRegressor:
"""
Scales the features and trains a logistic regression model.
:param X: Features for training
:type X: pandas.core.frame.DataFrame
:param y: Target variable
:type y: numpy.ndarray
:param verbose: Whether to print additional info
:type verbose: bool
:return: Trained CatBoostRegressor model
:rtype: catboost.CatBoostRegressor
"""
verbose and print('\nTRAIN\nScaling...')
model = CatBoostRegressor(iterations=100,
learning_rate=0.1,
depth=5)
verbose and print('Fitting...')
model.fit(X, y)
verbose and print('Done training.')
return model
def compute_log_metrics(model: CatBoostRegressor,
x_test: pd.core.frame.DataFrame,
y_test: np.ndarray):
"""
Computes, prints and log metrics.
:param model: Trained CatBoostRegressor model
:type model: catboost.CatBoostRegressor
:param x_test: Features for testing
:type x_test: pandas.core.frame.DataFrame
:param y_test: Target variable data for testing
:type y_test: numpy.ndarray
:return: Test predicted probability and predictions
:rtype: tuple[numpy.ndarray, numpy.ndarray]
"""
y_pred = model.predict(x_test)
mae = metrics.mean_absolute_error(y_test, y_pred)
mse = metrics.mean_squared_error(y_test, y_pred)
r2_score = metrics.r2_score(y_test, y_pred)
print("\nEVALUATION\n"
f"\tMean absolute error = {mae:.4f}\n"
f"\tMean squared error = {mse:.4f}\n"
f"\tR2 regression score function = {r2_score:.4f}\n")
# Bedrock Logger: captures model metrics
bedrock = BedrockApi(logging.getLogger(__name__))
bedrock.log_metric("MAE", mae)
bedrock.log_metric("MSE", mse)
bedrock.log_metric("R2", r2_score)
return y_pred
if __name__ == '__main__':
main()
| 30.431472
| 76
| 0.639867
|
import joblib
import logging
from typing import Tuple
from catboost import CatBoostRegressor
from environs import Env
import numpy as np
import pandas as pd
from sklearn import metrics
from bedrock_client.bedrock.analyzer import ModelTask, ModelTypes
from bedrock_client.bedrock.analyzer.model_analyzer import ModelAnalyzer
from bedrock_client.bedrock.api import BedrockApi
from bedrock_client.bedrock.metrics.collector import (
BaselineMetricCollector,
FeatureHistogramCollector,
InferenceHistogramCollector
)
from bedrock_client.bedrock.metrics.encoder import MetricEncoder
env = Env()
OUTPUT_MODEL_PATH = env("OUTPUT_MODEL_PATH")
TRAIN_DATA_PATH = env("TRAIN_DATA_PATH")
TEST_DATA_PATH = env("TEST_DATA_PATH")
CONFIG_FAI = {
"large_rings": {
"group_a": [1],
"group_a_name": "Large",
"group_b": [0],
"group_b_name": "Small"
}
}
def load_dataset(filepath: str,
target: str) -> Tuple[pd.core.frame.DataFrame,
np.ndarray]:
"""
Loads the dataset and returns the features as a pandas dataframe and
the target variable as a numpy array.
:param filepath: Path to load the data
:type filepath: str
:param target: Target variable
:type target: str
:return: The features pandas dataframe and the target numpy array
:rtype: tuple[pandas.core.frame.DataFrame, numpy.ndarray]
"""
df = pd.read_csv(filepath).drop('Type', axis=1) # Removes 'Type' column
df['large_rings'] = (df['Rings'] > 10).astype(int)
# Ensure nothing missing
original_len = len(df)
df.dropna(how="any", axis=0, inplace=True)
num_rows_dropped = original_len - len(df)
if num_rows_dropped > 0:
print(f"Warning - dropped {num_rows_dropped} rows with NA data.")
y = df[target].values
df.drop(target, axis=1, inplace=True)
return df, y
def train_catboost_model(X: pd.core.frame.DataFrame,
y: np.ndarray,
verbose: bool = False) -> CatBoostRegressor:
"""
Scales the features and trains a logistic regression model.
:param X: Features for training
:type X: pandas.core.frame.DataFrame
:param y: Target variable
:type y: numpy.ndarray
:param verbose: Whether to print additional info
:type verbose: bool
:return: Trained CatBoostRegressor model
:rtype: catboost.CatBoostRegressor
"""
verbose and print('\nTRAIN\nScaling...')
model = CatBoostRegressor(iterations=100,
learning_rate=0.1,
depth=5)
verbose and print('Fitting...')
model.fit(X, y)
verbose and print('Done training.')
return model
def compute_log_metrics(model: CatBoostRegressor,
x_test: pd.core.frame.DataFrame,
y_test: np.ndarray):
"""
Computes, prints and log metrics.
:param model: Trained CatBoostRegressor model
:type model: catboost.CatBoostRegressor
:param x_test: Features for testing
:type x_test: pandas.core.frame.DataFrame
:param y_test: Target variable data for testing
:type y_test: numpy.ndarray
:return: Test predicted probability and predictions
:rtype: tuple[numpy.ndarray, numpy.ndarray]
"""
y_pred = model.predict(x_test)
mae = metrics.mean_absolute_error(y_test, y_pred)
mse = metrics.mean_squared_error(y_test, y_pred)
r2_score = metrics.r2_score(y_test, y_pred)
print("\nEVALUATION\n"
f"\tMean absolute error = {mae:.4f}\n"
f"\tMean squared error = {mse:.4f}\n"
f"\tR2 regression score function = {r2_score:.4f}\n")
# Bedrock Logger: captures model metrics
bedrock = BedrockApi(logging.getLogger(__name__))
bedrock.log_metric("MAE", mae)
bedrock.log_metric("MSE", mse)
bedrock.log_metric("R2", r2_score)
return y_pred
def main():
x_train, y_train = load_dataset(
filepath=TRAIN_DATA_PATH,
target='ShellWeight'
)
x_test, y_test = load_dataset(
filepath=TEST_DATA_PATH,
target='ShellWeight'
)
print('X (train)')
print(x_train)
model = train_catboost_model(x_train,
y_train,
verbose=True)
# Save trained model
feature_names = x_train.columns.tolist()
print("\nSAMPLE FEATURES")
print({
feature_name: str(x_train[feature_name][0])
for feature_name in feature_names
})
joblib.dump([feature_names, model], OUTPUT_MODEL_PATH)
print('\nSaved feature names and catboost regression model.')
y_pred = compute_log_metrics(model,
x_test,
y_test)
# Save feature and inferance distribution
train_predicted = model.predict(x_train).flatten().tolist()
collectors = [
FeatureHistogramCollector(
data=x_train.iteritems(),
discrete={6, 7}, # Specify which column indices are discrete
),
InferenceHistogramCollector(data=train_predicted,
is_discrete=False)
# Specify inference as discrete
]
encoder = MetricEncoder(collectors=collectors)
with open(BaselineMetricCollector.DEFAULT_HISTOGRAM_PATH, "wb") as f:
f.write(encoder.as_text())
print('Saved feature and inference distribution.')
# Train Shap model and calculate xafai metrics
analyzer = (
ModelAnalyzer(model,
model_name='catboost_model',
model_type=ModelTypes.TREE,
model_task=ModelTask.REGRESSION)
.test_features(x_test)
)
(
analyzer
.fairness_config(CONFIG_FAI)
.test_labels(y_test)
.test_inference(y_pred)
)
analyzer.analyze()
print('Saved Shap model and XAI for regression.')
if __name__ == '__main__':
main()
| 1,994
| 0
| 23
|
2df7f47663759c2dc57b8f7fa714d0205f4b1432
| 10,059
|
py
|
Python
|
Scripts/GenCode_CNN_301.py
|
ShepherdCode/Soars2021
|
ab4f304eaa09e52d260152397a6c53d7a05457da
|
[
"MIT"
] | 1
|
2021-08-16T14:49:04.000Z
|
2021-08-16T14:49:04.000Z
|
Scripts/GenCode_CNN_301.py
|
ShepherdCode/Soars2021
|
ab4f304eaa09e52d260152397a6c53d7a05457da
|
[
"MIT"
] | null | null | null |
Scripts/GenCode_CNN_301.py
|
ShepherdCode/Soars2021
|
ab4f304eaa09e52d260152397a6c53d7a05457da
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# # PC/NC classification by CNN
#
# The convolutional neural network (CNN) was invented for image processing.
# We can use Conv1D layers for processing string sequences.
# How well does CNN work on human RNA as a binary classifier of protein-coding/non-coding?
#
# Assume user downloaded files from GenCode 38 [FTP](http://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_38/)
# to a subdirectory called data.
#
# The learning curve is strange: up then down. I suspect the CNN is learning the fact that we padded sequences with T to make them uniform length.
# In[1]:
import time
show_time()
# In[2]:
PC_FILENAME='gencode.v38.pc_transcripts.fa.gz'
NC_FILENAME='gencode.v38.lncRNA_transcripts.fa.gz'
# In[3]:
PC_SEQUENCES=20000 # how many protein-coding sequences
NC_SEQUENCES=20000 # how many non-coding sequences
PC_TESTS=1000
NC_TESTS=1000
BASES=1000 # how long is each sequence
ALPHABET=4 # how many different letters are possible
INPUT_SHAPE_2D = (BASES,ALPHABET,1) # Conv2D needs 3D inputs
INPUT_SHAPE = (BASES,ALPHABET) # Conv1D needs 2D inputs
FILTERS = 32 # how many different patterns the model looks for
NEURONS = 32
DROP_RATE = 0.2
WIDTH = 3 # how wide each pattern is, in bases
STRIDE_2D = (1,1) # For Conv2D how far in each direction
STRIDE = 1 # For Conv1D, how far between pattern matches, in bases
EPOCHS=50 # how many times to train on all the data
SPLITS=5 # SPLITS=3 means train on 2/3 and validate on 1/3
FOLDS=3 # train the model this many times (range 1 to SPLITS)
# In[4]:
import sys
import csv
try:
from google.colab import drive
IN_COLAB = True
print("On Google CoLab, mount cloud-local file, get our code from GitHub.")
PATH='/content/drive/'
#drive.mount(PATH,force_remount=True) # hardly ever need this
drive.mount(PATH) # Google will require login credentials
DATAPATH=PATH+'My Drive/data/' # must end in "/"
import requests
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_gen.py')
with open('RNA_gen.py', 'w') as f:
f.write(r.text)
from RNA_gen import *
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_describe.py')
with open('RNA_describe.py', 'w') as f:
f.write(r.text)
from RNA_describe import *
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_prep.py')
with open('RNA_prep.py', 'w') as f:
f.write(r.text)
from RNA_prep import *
except:
print("CoLab not working. On my PC, use relative paths.")
IN_COLAB = False
DATAPATH='../data/' # must end in "/"
sys.path.append("..") # append parent dir in order to use sibling dirs
from SimTools.RNA_gen import *
from SimTools.RNA_describe import *
from SimTools.RNA_prep import *
MODELPATH="BestModel" # saved on cloud instance and lost after logout
#MODELPATH=DATAPATH+MODELPATH # saved on Google Drive but requires login
if not assert_imported_RNA_gen():
print("ERROR: Cannot use RNA_gen.")
if not assert_imported_RNA_prep():
print("ERROR: Cannot use RNA_prep.")
# In[5]:
from os import listdir
#from zipfile import ZipFile
import gzip
import numpy as np
import pandas as pd
from scipy import stats # mode
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from keras.models import Sequential
from keras.layers import Dense,Embedding,Dropout
from keras.layers import Conv1D,Conv2D
from keras.layers import Flatten,MaxPooling1D,MaxPooling2D
from keras.losses import BinaryCrossentropy
# tf.keras.losses.BinaryCrossentropy
import matplotlib.pyplot as plt
from matplotlib import colors
mycmap = colors.ListedColormap(['red','blue']) # list color for label 0 then 1
np.set_printoptions(precision=2)
# In[6]:
# In[7]:
PC_FULLPATH=DATAPATH+PC_FILENAME
NC_FULLPATH=DATAPATH+NC_FILENAME
pcdf=load_gencode(PC_FULLPATH,1)
print("PC seqs loaded:",len(pcdf))
ncdf=load_gencode(NC_FULLPATH,0)
print("NC seqs loaded:",len(ncdf))
# In[8]:
# In[9]:
from sklearn.model_selection import train_test_split
pc_all = pcdf['sequence']
nc_all = ncdf['sequence']
# The split function also shuffles
pc_train,pc_test=train_test_split(pc_all,test_size=0.10,random_state=1234)
nc_train,nc_test=train_test_split(nc_all,test_size=0.10,random_state=1234)
# In[10]:
# Use code from our SimTools library.
UNIFORM_LENGTH=1000
MAXIMUM_LENGTH=2000
pc_seqs=uniform_length(pc_train,UNIFORM_LENGTH,MAXIMUM_LENGTH-500)
print("PC seqs ready:",len(pc_seqs))
nc_seqs=uniform_length(nc_train,UNIFORM_LENGTH,MAXIMUM_LENGTH+2000)
print("NC seqs ready:",len(nc_seqs))
X,y = prepare_inputs_len_x_alphabet(pc_seqs,nc_seqs,ALPHABET) # shuffles
print("Data ready")
# In[11]:
model = make_DNN()
print(model.summary())
# In[12]:
from keras.callbacks import ModelCheckpoint
# In[13]:
show_time()
do_cross_validation(X,y)
show_time()
# In[13]:
| 32.344051
| 146
| 0.67283
|
#!/usr/bin/env python
# coding: utf-8
# # PC/NC classification by CNN
#
# The convolutional neural network (CNN) was invented for image processing.
# We can use Conv1D layers for processing string sequences.
# How well does CNN work on human RNA as a binary classifier of protein-coding/non-coding?
#
# Assume user downloaded files from GenCode 38 [FTP](http://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_38/)
# to a subdirectory called data.
#
# The learning curve is strange: up then down. I suspect the CNN is learning the fact that we padded sequences with T to make them uniform length.
# In[1]:
import time
def show_time():
t = time.time()
s = time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t))
print(s)
show_time()
# In[2]:
PC_FILENAME='gencode.v38.pc_transcripts.fa.gz'
NC_FILENAME='gencode.v38.lncRNA_transcripts.fa.gz'
# In[3]:
PC_SEQUENCES=20000 # how many protein-coding sequences
NC_SEQUENCES=20000 # how many non-coding sequences
PC_TESTS=1000
NC_TESTS=1000
BASES=1000 # how long is each sequence
ALPHABET=4 # how many different letters are possible
INPUT_SHAPE_2D = (BASES,ALPHABET,1) # Conv2D needs 3D inputs
INPUT_SHAPE = (BASES,ALPHABET) # Conv1D needs 2D inputs
FILTERS = 32 # how many different patterns the model looks for
NEURONS = 32
DROP_RATE = 0.2
WIDTH = 3 # how wide each pattern is, in bases
STRIDE_2D = (1,1) # For Conv2D how far in each direction
STRIDE = 1 # For Conv1D, how far between pattern matches, in bases
EPOCHS=50 # how many times to train on all the data
SPLITS=5 # SPLITS=3 means train on 2/3 and validate on 1/3
FOLDS=3 # train the model this many times (range 1 to SPLITS)
# In[4]:
import sys
import csv
try:
from google.colab import drive
IN_COLAB = True
print("On Google CoLab, mount cloud-local file, get our code from GitHub.")
PATH='/content/drive/'
#drive.mount(PATH,force_remount=True) # hardly ever need this
drive.mount(PATH) # Google will require login credentials
DATAPATH=PATH+'My Drive/data/' # must end in "/"
import requests
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_gen.py')
with open('RNA_gen.py', 'w') as f:
f.write(r.text)
from RNA_gen import *
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_describe.py')
with open('RNA_describe.py', 'w') as f:
f.write(r.text)
from RNA_describe import *
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_prep.py')
with open('RNA_prep.py', 'w') as f:
f.write(r.text)
from RNA_prep import *
except:
print("CoLab not working. On my PC, use relative paths.")
IN_COLAB = False
DATAPATH='../data/' # must end in "/"
sys.path.append("..") # append parent dir in order to use sibling dirs
from SimTools.RNA_gen import *
from SimTools.RNA_describe import *
from SimTools.RNA_prep import *
MODELPATH="BestModel" # saved on cloud instance and lost after logout
#MODELPATH=DATAPATH+MODELPATH # saved on Google Drive but requires login
if not assert_imported_RNA_gen():
print("ERROR: Cannot use RNA_gen.")
if not assert_imported_RNA_prep():
print("ERROR: Cannot use RNA_prep.")
# In[5]:
from os import listdir
#from zipfile import ZipFile
import gzip
import numpy as np
import pandas as pd
from scipy import stats # mode
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from keras.models import Sequential
from keras.layers import Dense,Embedding,Dropout
from keras.layers import Conv1D,Conv2D
from keras.layers import Flatten,MaxPooling1D,MaxPooling2D
from keras.losses import BinaryCrossentropy
# tf.keras.losses.BinaryCrossentropy
import matplotlib.pyplot as plt
from matplotlib import colors
mycmap = colors.ListedColormap(['red','blue']) # list color for label 0 then 1
np.set_printoptions(precision=2)
# In[6]:
def load_gencode(filename,label):
DEFLINE='>'
DELIM='|'
EMPTY=''
labels=[] # usually 1 for protein-coding or 0 for non-coding
seqs=[] # usually string of ACGT
lens=[] # sequence length
ids=[] # GenCode transcript ID, always starts ENST
one_seq = EMPTY
one_id = None
# Use gzip 'r' mode to open file in read-only mode.
# Use gzip 't' mode to read each line of text as type string.
with gzip.open (filename,'rt') as infile:
for line in infile:
if line[0]==DEFLINE:
# Save the previous sequence if one exists.
if not one_seq == EMPTY:
labels.append(label)
seqs.append(one_seq)
lens.append(len(one_seq))
ids.append(one_id)
# Get ready to read the next sequence.
# Parse a GenCode defline that is formatted like this:
# >transcript_ID|gene_ID|other_fields other_info|other_info
one_id = line[1:].split(DELIM)[0]
one_seq = EMPTY
else:
# Continue loading sequence lines till next defline.
additional = line.rstrip()
one_seq = one_seq + additional
# Don't forget to save the last sequence after end-of-file.
if not one_seq == EMPTY:
labels.append(label)
seqs.append(one_seq)
lens.append(len(one_seq))
ids.append(one_id)
df1=pd.DataFrame(ids,columns=['tid'])
df2=pd.DataFrame(labels,columns=['class'])
df3=pd.DataFrame(seqs,columns=['sequence'])
df4=pd.DataFrame(lens,columns=['seqlen'])
df=pd.concat((df1,df2,df3,df4),axis=1)
return df
# In[7]:
PC_FULLPATH=DATAPATH+PC_FILENAME
NC_FULLPATH=DATAPATH+NC_FILENAME
pcdf=load_gencode(PC_FULLPATH,1)
print("PC seqs loaded:",len(pcdf))
ncdf=load_gencode(NC_FULLPATH,0)
print("NC seqs loaded:",len(ncdf))
# In[8]:
def uniform_length(seqs,hardlen,too_big):
newseqs=[]
pad='T'*hardlen
too_small=200 ## by definition of long non-coding RNA
for seq in seqs:
size=len(seq)
if (size>too_small and size<too_big):
big=seq+pad
little=big[:hardlen]
newseqs.append(little)
return newseqs
# In[9]:
from sklearn.model_selection import train_test_split
pc_all = pcdf['sequence']
nc_all = ncdf['sequence']
# The split function also shuffles
pc_train,pc_test=train_test_split(pc_all,test_size=0.10,random_state=1234)
nc_train,nc_test=train_test_split(nc_all,test_size=0.10,random_state=1234)
# In[10]:
# Use code from our SimTools library.
UNIFORM_LENGTH=1000
MAXIMUM_LENGTH=2000
pc_seqs=uniform_length(pc_train,UNIFORM_LENGTH,MAXIMUM_LENGTH-500)
print("PC seqs ready:",len(pc_seqs))
nc_seqs=uniform_length(nc_train,UNIFORM_LENGTH,MAXIMUM_LENGTH+2000)
print("NC seqs ready:",len(nc_seqs))
X,y = prepare_inputs_len_x_alphabet(pc_seqs,nc_seqs,ALPHABET) # shuffles
print("Data ready")
# In[11]:
def make_DNN():
print("make_DNN")
print("input shape:",INPUT_SHAPE)
dnn = Sequential()
#dnn.add(Embedding(input_dim=INPUT_SHAPE,output_dim=INPUT_SHAPE))
dnn.add(Conv1D(filters=FILTERS,kernel_size=WIDTH,strides=STRIDE,padding="same",
input_shape=INPUT_SHAPE))
dnn.add(Conv1D(filters=FILTERS,kernel_size=WIDTH,strides=STRIDE,padding="same"))
dnn.add(MaxPooling1D())
dnn.add(Conv1D(filters=FILTERS,kernel_size=WIDTH,strides=STRIDE,padding="same"))
dnn.add(Conv1D(filters=FILTERS,kernel_size=WIDTH,strides=STRIDE,padding="same"))
dnn.add(MaxPooling1D())
dnn.add(Flatten())
dnn.add(Dense(NEURONS,activation="sigmoid",dtype=np.float32))
dnn.add(Dropout(DROP_RATE))
dnn.add(Dense(1,activation="sigmoid",dtype=np.float32))
dnn.compile(optimizer='adam',
loss=BinaryCrossentropy(from_logits=False),
metrics=['accuracy']) # add to default metrics=loss
dnn.build(input_shape=INPUT_SHAPE)
#ln_rate = tf.keras.optimizers.Adam(learning_rate = LN_RATE)
#bc=tf.keras.losses.BinaryCrossentropy(from_logits=False)
#model.compile(loss=bc, optimizer=ln_rate, metrics=["accuracy"])
return dnn
model = make_DNN()
print(model.summary())
# In[12]:
from keras.callbacks import ModelCheckpoint
def do_cross_validation(X,y):
cv_scores = []
fold=0
mycallbacks = [ModelCheckpoint(
filepath=MODELPATH, save_best_only=True,
monitor='val_accuracy', mode='max')]
splitter = KFold(n_splits=SPLITS) # this does not shuffle
for train_index,valid_index in splitter.split(X):
if fold < FOLDS:
fold += 1
X_train=X[train_index] # inputs for training
y_train=y[train_index] # labels for training
X_valid=X[valid_index] # inputs for validation
y_valid=y[valid_index] # labels for validation
print("MODEL")
# Call constructor on each CV. Else, continually improves the same model.
model = model = make_DNN()
print("FIT") # model.fit() implements learning
start_time=time.time()
history=model.fit(X_train, y_train,
epochs=EPOCHS,
verbose=1, # ascii art while learning
callbacks=mycallbacks, # called at end of each epoch
validation_data=(X_valid,y_valid))
end_time=time.time()
elapsed_time=(end_time-start_time)
print("Fold %d, %d epochs, %d sec"%(fold,EPOCHS,elapsed_time))
# print(history.history.keys()) # all these keys will be shown in figure
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1) # any losses > 1 will be off the scale
plt.show()
# In[13]:
show_time()
do_cross_validation(X,y)
show_time()
# In[13]:
| 4,856
| 0
| 114
|
5d00cd12b85bb0a5d6ff531e4050eaf2b25a92f6
| 137
|
py
|
Python
|
setup.py
|
perillaseed/PyPunchP2P
|
a85fb41cc6be96463a8fa3f9ffd7c37f378c5910
|
[
"MIT"
] | 1
|
2015-04-25T07:49:37.000Z
|
2015-04-25T07:49:37.000Z
|
setup.py
|
perillaseed/PyPunchP2P
|
a85fb41cc6be96463a8fa3f9ffd7c37f378c5910
|
[
"MIT"
] | null | null | null |
setup.py
|
perillaseed/PyPunchP2P
|
a85fb41cc6be96463a8fa3f9ffd7c37f378c5910
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
import py2exe
setup(console=['client.py'])
setup(console=['server.py'])
setup(console=['stun.py'])
| 22.833333
| 33
| 0.715328
|
from distutils.core import setup
import py2exe
setup(console=['client.py'])
setup(console=['server.py'])
setup(console=['stun.py'])
| 0
| 0
| 0
|
ed617c413c97581a929329b03f0040b599be7555
| 1,088
|
py
|
Python
|
examples/pyramid_backbone_redis_chat/chatter3/scripts/populate.py
|
benthomasson/gevent-socketio
|
0f9bd2744af033b7cba57bfd5b82106592e9f667
|
[
"BSD-3-Clause"
] | 625
|
2015-01-05T04:11:59.000Z
|
2022-03-14T13:29:59.000Z
|
examples/pyramid_backbone_redis_chat/chatter3/scripts/populate.py
|
benthomasson/gevent-socketio
|
0f9bd2744af033b7cba57bfd5b82106592e9f667
|
[
"BSD-3-Clause"
] | 53
|
2015-01-30T07:55:45.000Z
|
2021-02-28T10:50:34.000Z
|
examples/pyramid_backbone_redis_chat/chatter3/scripts/populate.py
|
benthomasson/gevent-socketio
|
0f9bd2744af033b7cba57bfd5b82106592e9f667
|
[
"BSD-3-Clause"
] | 213
|
2015-01-05T10:18:51.000Z
|
2022-01-23T08:57:38.000Z
|
#!/usr/bin/env python
import os
import sys
from pyramid.config import Configurator
from sqlalchemy import engine_from_config
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from chatter3.models import Base
from pyramid.paster import (
get_appsettings,
setup_logging,
)
DBSession = scoped_session(sessionmaker())
here = os.path.dirname(__file__)
if __name__ == "__main__": # pragma: no cover
main()
| 20.923077
| 57
| 0.693934
|
#!/usr/bin/env python
import os
import sys
from pyramid.config import Configurator
from sqlalchemy import engine_from_config
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from chatter3.models import Base
from pyramid.paster import (
get_appsettings,
setup_logging,
)
DBSession = scoped_session(sessionmaker())
here = os.path.dirname(__file__)
def usage(argv): # pragma: no cover
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri>\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv): # pragma: no cover
if len(argv) != 2:
usage(argv)
config_uri = argv[1]
setup_logging(config_uri)
settings = get_appsettings(config_uri)
config = Configurator(
settings=settings
)
config.include('chatter.models')
engine = engine_from_config(settings, 'sqlalchemy.')
Base.metadata.bind = engine
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
if __name__ == "__main__": # pragma: no cover
main()
| 589
| 0
| 46
|
5ab2de7a3b4f2a0c3e272c15eeead137590295a0
| 1,630
|
py
|
Python
|
toolbox/custom_metrics.py
|
dear-anastasia/oil-field-modelling
|
fd5000225de4554564d184c5129322cda27958fa
|
[
"BSD-3-Clause"
] | 6
|
2020-08-27T14:37:36.000Z
|
2022-02-28T04:43:37.000Z
|
toolbox/custom_metrics.py
|
dear-anastasia/oil-field-modelling
|
fd5000225de4554564d184c5129322cda27958fa
|
[
"BSD-3-Clause"
] | null | null | null |
toolbox/custom_metrics.py
|
dear-anastasia/oil-field-modelling
|
fd5000225de4554564d184c5129322cda27958fa
|
[
"BSD-3-Clause"
] | 3
|
2020-11-08T15:13:43.000Z
|
2022-01-05T21:24:37.000Z
|
import numpy as np
import tensorflow as tf
| 33.265306
| 78
| 0.552147
|
import numpy as np
import tensorflow as tf
def _mean_iou(y_true, y_pred):
prec = []
for t in np.arange(0.5, 1.0, 0.05):
tensor = y_pred > t
y_pred_ = tf.cast(tensor, tf.int32)
score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([up_opt]):
score = tf.identity(score)
prec.append(score)
return K.mean(K.stack(prec), axis=0)
def _get_iou_vector(A, B):
batch_size = A.shape[0]
metric = []
for batch in range(batch_size):
t, p = A[batch] > 0, B[batch] > 0
# if np.count_nonzero(t) == 0 and np.count_nonzero(p) > 0:
# metric.append(0)
# continue
# if np.count_nonzero(t) >= 1 and np.count_nonzero(p) == 0:
# metric.append(0)
# continue
# if np.count_nonzero(t) == 0 and np.count_nonzero(p) == 0:
# metric.append(1)
# continue
intersection = np.logical_and(t, p)
union = np.logical_or(t, p)
iou = (np.sum(intersection > 0) + 1e-10) / (np.sum(union > 0) + 1e-10)
thresholds = np.arange(0.5, 1, 0.05)
s = []
for thresh in thresholds:
s.append(iou > thresh)
metric.append(np.mean(s))
return np.mean(metric)
def my_iou_metric(label, pred):
return tf.py_func(get_iou_vector, [label, pred > 0.5], tf.float64)
def my_iou_metric_2(label, pred):
return tf.py_func(get_iou_vector, [label, pred > 0], tf.float64)
| 1,493
| 0
| 92
|
b5ec4357a8609ecfac4ad2393bae54409aeac7a2
| 30,691
|
py
|
Python
|
_JNJ/Media_Defaults.py
|
Tapyr/tapyr
|
4235fba6dce169fe747cce4d17d88dcf4a3f9f1d
|
[
"BSD-3-Clause"
] | 6
|
2016-12-10T17:51:10.000Z
|
2021-10-11T07:51:48.000Z
|
_JNJ/Media_Defaults.py
|
Tapyr/tapyr
|
4235fba6dce169fe747cce4d17d88dcf4a3f9f1d
|
[
"BSD-3-Clause"
] | null | null | null |
_JNJ/Media_Defaults.py
|
Tapyr/tapyr
|
4235fba6dce169fe747cce4d17d88dcf4a3f9f1d
|
[
"BSD-3-Clause"
] | 3
|
2020-03-29T07:37:03.000Z
|
2021-01-21T16:08:40.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2011-2019 Mag. Christian Tanzer All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# ****************************************************************************
# This module is part of the package JNJ.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# JNJ.Media_Defaults
#
# Purpose
# Provide defaults for media fragments in html/*.media
#
# Revision Dates
# 1-Jan-2011 (CT) Creation
# 2-Jan-2011 (CT) Creation continued
# 4-Jan-2011 (CT) Creation continued..
# 14-Jan-2011 (CT) Use `GTW.Parameters.Definition` to allow lazy
# references with `P` and `P_dict`
# 22-Mar-2011 (CT) `afs`
# 22-Mar-2011 (CT) `afs` continued
# 29-Mar-2011 (CT) `afs` continued..
# 30-Mar-2011 (CT) `afs` continued...
# 19-May-2011 (CT) `afs` continued....
# 16-Oct-2011 (MG) `Debugger` added
# 24-Oct-2011 (CT) `color_spec_heading_rev` added
# 24-Oct-2011 (CT) `tablesorter` added
# 24-Nov-2011 (CT) Add `color_spec_selected_rev`
# 23-Feb-2012 (CT) Add `color_status_{bad,good,missing}`, `afs.status_size`
# 23-Feb-2012 (CT) Add `color_bg_bad` and `color_bg_missing`
# 1-Mar-2012 (CT) Change `pg_main_max_width` from `Em (50)` to `Em (45)`
# 8-Mar-2012 (CT) Add `color_bg_menu`
# 10-Aug-2012 (CT) Add `color_spec_sub_heading`, `color_spec_term`
# 7-Mar-2013 (CT) Add `border_added` and `border_deleted`
# 3-Apr-2013 (CT) Add `Rule`, `Rule.clearfix`
# 8-Apr-2013 (CT) Add lots of rules to `Rule`, e.g., `disabled`, `focus`...
# 7-Jan-2014 (CT) Add some more muted colors
# 21-Jan-2014 (CT) Add `breakpoint`
# 20-Feb-2014 (CT) Add `Rule.pg_nav_show`, `.pg_nav_show_a`
# 20-Feb-2014 (CT) Add `breakpoint.broad` and `.very_narrow`
# 8-Apr-2014 (CT) Improve `clearfix`
# 12-Apr-2014 (CT) Use `Border.P`, not `P_Border`
# 15-May-2014 (CT) Add `color_lightest_grey`
# 15-May-2014 (CT) Add `breakpoint.quite_narrow`
# 9-Jul-2014 (CT) Add `Rule.rotate_45_left`
# 26-Aug-2014 (CT) Add `pure` parameters
# 3-Sep-2014 (CT) Add `Rule.hidden`
# 3-Dec-2014 (CT) Add `color.alphabet_max_contrast_colors` and
# `color.kellys_max_contrast_colors`
# 16-Jan-2015 (CT) Change `nav_col.width` and `col_padding` to unit `Rem`
# 23-Jan-2015 (CT) Add `color_jnd_grey`, reduce contrast of `color_spec_row1`
# 23-Jan-2015 (CT) Factor `line_height*` parameters
# 15-Feb-2015 (CT) Add `menu_icon`
# 24-Mar-2015 (CT) Add `Rule.input_focus`
# 8-Apr-2015 (CT) Change `font_family_print` to "serif" to void font
# substitution by the printer
# 2-Dec-2015 (CT) Fix `Rule.visited`
# 2-Dec-2015 (CT) Change `nav_col.color_spec_link_current.background_color`
# 31-Dec-2015 (CT) Change `pg_nav_show` to allow embedded rel-nav buttons
# 11-Oct-2016 (CT) Import from `CHJ`, not `GTW`
# 27-Dec-2016 (CT) Add `breakpoint.really_narrow`
# 12-Jan-2017 (CT) Add `breakpoint.supports_two_column`
# 13-Jan-2017 (CT) Add `body_margin`, `hidden_collapse`, `target_visible`,
# `visible`
# 16-Jan-2017 (CT) Add `border_double`, `border_simple_light`
# 16-Jan-2017 (CT) Change `nav_col` to `nav.main`; modernize nav styling
# 16-Jan-2017 (CT) Add `nav.header`, `nav.rel`
# 19-Jan-2017 (CT) Add `Rule.main_nav_off`, `.main_nav_on`
# 10-May-2017 (CT) Add system-specific fonts to `font_family_normal`
# 28-May-2017 (CT) Add missing `"` in `font_family_normal`
# 29-Mar-2018 (CT) Add `system-ui` to `font_family_normal`
# 15-Apr-2019 (CT) Add `nord colors`
# ««revision-date»»···
#--
from _CHJ._CSS.import_CSS import *
from _CHJ.Parameters import \
( Definition, P, P_dict, Rule_Definition
, Rule, Rule_Attr, Rule_Child, Rule_Class, Rule_Pseudo, Rule_Sibling
)
Color.formatter = RGB_X
class Media_Defaults (Definition) :
"""Provide defaults for CSS fragments in html/*.css"""
# end class color
color_bg_bad = RGB_X ("#FFEEEE")
color_bg_menu = RGB_X ("#DDEEFF")
color_bg_missing = RGB_X ("#FFFFBB")
color_desc = RGB_X ("#666666")
color_focus = SVG_Color ("yellow")
color_heading = RGB_X ("#34444D")
color_heading_closed = RGB_X ("#56666E")
color_heading_sub = RGB_X ("#78888F")
color_jnd_grey = RGB_X ("#F8F8F8")
color_lightest_grey = RGB_X ("#F6F6F6")
color_lighter_grey = RGB_X ("#EDEDED")
color_light_grey = RGB_X ("#DEDEDE")
color_border_grey = RGB_X ("#CCCCCC")
color_medium_grey = RGB_X ("#BEBEBE")
color_half_grey = RGB_X ("#888888")
color_dark_grey = RGB_X ("#444444")
color_darker_grey = RGB_X ("#222222")
color_selected = RGB_X ("#FF6633")
color_status_bad = RGB_X ("#FF6666")
color_status_good = RGB_X ("#AAEEAA")
color_status_missing = RGB_X ("#FFDD00")
color_target = RGB_X ("#FF6633")
css_arrow_color = SVG_Color ("red")
css_arrow_width = Em (1./2)
block_margin_bottom = Em (1./2)
body_margin = TRBL (Em (0.2), Em (0.5))
border_added = "1px solid " + P.color.m_red
border_button = "2px outset " + P.color_medium_grey
border_deleted = "1px solid " + P.color.m_blue
border_double = "2px solid " + P.color_medium_grey
border_double_light = "2px solid " + P.color_light_grey
border_selected = "2px solid " + P.color_selected
border_simple = "1px solid " + P.color_medium_grey
border_simple_light = "1px solid " + P.color_light_grey
button_spec = P_dict \
( border = P.border_button
, cursor = "pointer"
, margin = TRBL0 (t = Em (1./4), b = Em (3./4))
, padding = TRBL (Em (0.5), Em (1.5))
, ** Border (radius = Px (10))
)
col_padding = Rem (0.3125) ### Px (5)
background_color = P.color.p_white
link_color = RGB_X ("#0000EE")
no_link_color = RGB_X ("#333333")
text_color = RGB_X ("#000033")
visited_color = RGB_X ("#551A8B")
# end class afs
class breakpoint (Definition) :
"""Breakpoints for responsive rules"""
really_narrow = P_dict \
( max_width = Px (360)
)
very_narrow = P_dict \
( max_width = Px (420)
)
quite_narrow = P_dict \
( max_width = Px (480)
)
narrow = P_dict \
( max_width = Px (680)
)
small_device = P_dict \
( max_device_width = Px (767)
)
supports_two_column = P_dict \
( min_width = Px (768)
)
broad = P_dict \
( min_width = Px (1280)
)
wide = P_dict \
( min_width = Px (1600)
)
# end class breakpoint
# end class cal
# end class menu_icon
# end class nav
class pure (Definition) :
"""Parameters of `pure` css as of version v0.4.2."""
input_focus_border_color = RGB_X ("#129FEA")
label_width = Em (10.0)
label_margin_right = Em (1.0)
aside_indent = label_width + label_margin_right
# end class pure
# end class tablesorter
# end class Rule
color_spec_error = P_dict \
( background_color = P.background_color
, color = P.color_status_bad
)
color_spec_gallery_heading = P_dict \
( background_color = RGB_P (50, 75, 100)
, color = P.nav.main.background_color
)
color_spec_heading = P_dict \
( background_color = P.background_color
, color = P.color_selected
)
color_spec_normal = P_dict \
( background_color = P.background_color
, color = P.text_color
)
color_spec_pg_head = P_dict \
( background_color = P.background_color
, color = RGB_X ("#0200DE")
)
color_spec_row1 = color_spec_meta = P_dict \
( background_color = "transparent"
, color = P.text_color
)
color_spec_row2 = color_spec_message = P_dict \
( background_color = P.color_lightest_grey
, color = P.text_color
)
color_spec_selected = P_dict \
( background_color = SVG_Color ("yellow")
, color = SVG_Color ("red")
)
color_spec_selected_rev= P_dict \
( background_color = SVG_Color ("red")
, color = SVG_Color ("yellow")
)
color_spec_strong = P_dict \
( background_color = P.background_color
, color = SVG_Color ("blue")
)
color_spec_sub_heading = P_dict \
( background_color = P.background_color
, color = P.color_half_grey
)
color_spec_term = P_dict \
( background_color = RGB_X ("#E6E6E6")
, color = P.color_dark_grey
)
del_spec = P_dict \
( text_decoration = "line-through"
# XXX ???
)
font_family_normal = \
( """"system-ui","-apple-system", BlinkMacSystemFont, """
""""Segoe UI", Roboto, "Fira Sans", """
""""Lucida Grande", verdana, sans-serif"""
)
font_family_pre = \
""""Lucida Sans Typewriter", "Lucida Console", "Courier New", Courier, monospace"""
font_family_print = "serif"
### Don't use specific fonts for `print` because font substitution done
### by a printer can look terribly ugly
font_spec_normal = P_dict \
( font_family = P.font_family_normal
, font_style = "normal"
, font_weight = "normal"
, line_height = P.line_height_normal
)
font_spec_print = P_dict \
( font_spec_normal
, font_family = P.font_family_print
)
font_spec_em = P_dict \
( font_spec_normal
, font_weight = "bold"
)
font_spec_pre = P_dict \
( font_spec_normal
, font_family = P.font_family_pre
)
grid_table_border = "3px ridge gray"
h1_font_size = Percent (125)
h1_font_weight = "bold"
hbox_spec = P_dict \
( display = "block"
, overflow = "hidden"
)
hr_spec = P_dict \
( border = 0
, border_top = "1px solid #CCC"
, display = "block"
, height = Px (1)
, margin = TRBL (Em (1), 0)
, padding = 0
)
input_margin = TRBL (Em (0.1), 0)
input_padding = Em (0.2)
ins_spec = P_dict \
( text_decoration = "none"
# XXX ???
)
line_height_heading = 2.00
line_height_larger = 1.875
line_height_normal = 1.44
line_height_input = 1.143
outline_focus = "2px solid " + P.color_focus
outline_target = "2px dotted " + P.color_target
pg_body_margin_lr = Em (3.6)
pg_head_height = "auto"
pg_header_nav_height = Rem (3.5)
pg_main_max_width = Em (45)
pg_main_min_width = Em (15)
pg_short_nav_font_size = Rem (1.20)
pg_short_nav_top = Em (3)
thumbnail_size = Px (155)
thumbnail_selected_color = P.color_selected
# end class Debugger
# end class Media_Defaults
### __END__ JNJ.Media_Defaults
| 36.107059
| 91
| 0.470105
|
# -*- coding: utf-8 -*-
# Copyright (C) 2011-2019 Mag. Christian Tanzer All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# ****************************************************************************
# This module is part of the package JNJ.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# JNJ.Media_Defaults
#
# Purpose
# Provide defaults for media fragments in html/*.media
#
# Revision Dates
# 1-Jan-2011 (CT) Creation
# 2-Jan-2011 (CT) Creation continued
# 4-Jan-2011 (CT) Creation continued..
# 14-Jan-2011 (CT) Use `GTW.Parameters.Definition` to allow lazy
# references with `P` and `P_dict`
# 22-Mar-2011 (CT) `afs`
# 22-Mar-2011 (CT) `afs` continued
# 29-Mar-2011 (CT) `afs` continued..
# 30-Mar-2011 (CT) `afs` continued...
# 19-May-2011 (CT) `afs` continued....
# 16-Oct-2011 (MG) `Debugger` added
# 24-Oct-2011 (CT) `color_spec_heading_rev` added
# 24-Oct-2011 (CT) `tablesorter` added
# 24-Nov-2011 (CT) Add `color_spec_selected_rev`
# 23-Feb-2012 (CT) Add `color_status_{bad,good,missing}`, `afs.status_size`
# 23-Feb-2012 (CT) Add `color_bg_bad` and `color_bg_missing`
# 1-Mar-2012 (CT) Change `pg_main_max_width` from `Em (50)` to `Em (45)`
# 8-Mar-2012 (CT) Add `color_bg_menu`
# 10-Aug-2012 (CT) Add `color_spec_sub_heading`, `color_spec_term`
# 7-Mar-2013 (CT) Add `border_added` and `border_deleted`
# 3-Apr-2013 (CT) Add `Rule`, `Rule.clearfix`
# 8-Apr-2013 (CT) Add lots of rules to `Rule`, e.g., `disabled`, `focus`...
# 7-Jan-2014 (CT) Add some more muted colors
# 21-Jan-2014 (CT) Add `breakpoint`
# 20-Feb-2014 (CT) Add `Rule.pg_nav_show`, `.pg_nav_show_a`
# 20-Feb-2014 (CT) Add `breakpoint.broad` and `.very_narrow`
# 8-Apr-2014 (CT) Improve `clearfix`
# 12-Apr-2014 (CT) Use `Border.P`, not `P_Border`
# 15-May-2014 (CT) Add `color_lightest_grey`
# 15-May-2014 (CT) Add `breakpoint.quite_narrow`
# 9-Jul-2014 (CT) Add `Rule.rotate_45_left`
# 26-Aug-2014 (CT) Add `pure` parameters
# 3-Sep-2014 (CT) Add `Rule.hidden`
# 3-Dec-2014 (CT) Add `color.alphabet_max_contrast_colors` and
# `color.kellys_max_contrast_colors`
# 16-Jan-2015 (CT) Change `nav_col.width` and `col_padding` to unit `Rem`
# 23-Jan-2015 (CT) Add `color_jnd_grey`, reduce contrast of `color_spec_row1`
# 23-Jan-2015 (CT) Factor `line_height*` parameters
# 15-Feb-2015 (CT) Add `menu_icon`
# 24-Mar-2015 (CT) Add `Rule.input_focus`
# 8-Apr-2015 (CT) Change `font_family_print` to "serif" to void font
# substitution by the printer
# 2-Dec-2015 (CT) Fix `Rule.visited`
# 2-Dec-2015 (CT) Change `nav_col.color_spec_link_current.background_color`
# 31-Dec-2015 (CT) Change `pg_nav_show` to allow embedded rel-nav buttons
# 11-Oct-2016 (CT) Import from `CHJ`, not `GTW`
# 27-Dec-2016 (CT) Add `breakpoint.really_narrow`
# 12-Jan-2017 (CT) Add `breakpoint.supports_two_column`
# 13-Jan-2017 (CT) Add `body_margin`, `hidden_collapse`, `target_visible`,
# `visible`
# 16-Jan-2017 (CT) Add `border_double`, `border_simple_light`
# 16-Jan-2017 (CT) Change `nav_col` to `nav.main`; modernize nav styling
# 16-Jan-2017 (CT) Add `nav.header`, `nav.rel`
# 19-Jan-2017 (CT) Add `Rule.main_nav_off`, `.main_nav_on`
# 10-May-2017 (CT) Add system-specific fonts to `font_family_normal`
# 28-May-2017 (CT) Add missing `"` in `font_family_normal`
# 29-Mar-2018 (CT) Add `system-ui` to `font_family_normal`
# 15-Apr-2019 (CT) Add `nord colors`
# ««revision-date»»···
#--
from _CHJ._CSS.import_CSS import *
from _CHJ.Parameters import \
( Definition, P, P_dict, Rule_Definition
, Rule, Rule_Attr, Rule_Child, Rule_Class, Rule_Pseudo, Rule_Sibling
)
Color.formatter = RGB_X
class Media_Defaults (Definition) :
"""Provide defaults for CSS fragments in html/*.css"""
class color (Definition) :
### color alphabet
### http://eleanormaclure.files.wordpress.com/2011/03/colour-coding.pdf
###
### The RGB color values were taken with gcolor2's color picker from
### colour-coding.pdf
abc_a = RGB_X ("#C5A1CA")
abc_b = RGB_X ("#486EB5")
abc_c = RGB_X ("#82411D")
abc_d = RGB_X ("#401A57")
abc_e = RGB_X ("#1F1E1E")
abc_f = RGB_X ("#335A36")
abc_g = RGB_X ("#78C259")
abc_h = RGB_X ("#EDC99A")
abc_i = RGB_X ("#7F8080")
abc_j = RGB_X ("#BEDEAE")
abc_k = RGB_X ("#877A2F")
abc_l = RGB_X ("#A5C43A")
abc_m = RGB_X ("#9B247F")
abc_n = RGB_X ("#253777")
abc_o = RGB_X ("#E1A131")
abc_p = RGB_X ("#E6A4B5")
abc_q = RGB_X ("#50662E")
abc_r = RGB_X ("#CF2128")
abc_s = RGB_X ("#A4DBDF")
abc_t = RGB_X ("#56968C")
abc_u = RGB_X ("#E0E77B")
abc_v = RGB_X ("#584EA0")
abc_w = RGB_X ("#7D1416")
abc_x = RGB_X ("#F3F190")
abc_y = RGB_X ("#ECDA43")
abc_z = RGB_X ("#D55428")
### muted colors
m_aqua = RGB_X ("#7FDBFF")
m_black = RGB_X ("#111111")
m_blue = RGB_X ("#0088DD")
m_grey = RGB_X ("#AAAAAA")
m_fuchsia = RGB_X ("#F012BE")
m_green = RGB_X ("#00AA00")
m_lime = RGB_X ("#01FF70")
m_maroon = RGB_X ("#85144B")
m_navy = RGB_X ("#001F3F")
m_red = RGB_X ("#CC3333")
m_olive = RGB_X ("#3D9970")
m_orange = RGB_X ("#FFA022")
m_pink = RGB_X ("#DD4499")
m_purple = RGB_X ("#AA33BB")
m_silver = RGB_X ("#DDDDDD")
m_teal = RGB_X ("#33CCCC")
m_white = RGB_X ("#EEEEEE")
m_yellow = RGB_X ("#FFF00F")
### nord colors
### https://www.nordtheme.com/docs/colors-and-palettes
nord0 = n_polar_light_0 = RGB_X ("#2e3440")
nord1 = n_polar_light_1 = RGB_X ("#3b4252")
nord2 = n_polar_light_2 = RGB_X ("#434c5e")
nord3 = n_polar_light_3 = RGB_X ("#4c566a")
nord4 = n_snow_storm_0 = RGB_X ("#d8dee9")
nord5 = n_snow_storm_1 = RGB_X ("#e5e9f0")
nord6 = n_snow_storm_2 = RGB_X ("#eceff4")
nord7 = n_frost_0 = RGB_X ("#8fbcbb")
nord8 = n_frost_1 = RGB_X ("#88c0d0")
nord9 = n_frost_2 = RGB_X ("#81a1c1")
nord10 = n_frost_3 = RGB_X ("#5e81ac")
nord11 = n_aurora_0 = RGB_X ("#bf616a")
nord12 = n_aurora_1 = RGB_X ("#d08770")
nord13 = n_aurora_2 = RGB_X ("#ebcb8b")
nord14 = n_aurora_3 = RGB_X ("#a3be8c")
nord15 = n_aurora_4 = RGB_X ("#b48ead")
### pure colors
p_black = SVG_Color ("black")
p_blue = SVG_Color ("blue")
p_cyan = SVG_Color ("cyan")
p_gray = p_grey = SVG_Color ("gray")
p_green = SVG_Color ("green")
p_lime = SVG_Color ("lime")
p_magenta = SVG_Color ("magenta")
p_maroon = SVG_Color ("maroon")
p_navy = SVG_Color ("navy")
p_olive = SVG_Color ("olive")
p_purple = SVG_Color ("purple")
p_red = SVG_Color ("red")
p_teal = SVG_Color ("teal")
p_white = SVG_Color ("white")
p_yellow = SVG_Color ("yellow")
### Kelly's 22 colors of maximum contrast
### http://www.iscc.org/pdf/PC54_1724_001.pdf
### http://burgess-studio.co.uk/colour/
### http://eleanormaclure.files.wordpress.com/2011/03/colour-coding.pdf
###
### The order of colors in Kelly's list was planned so that there
### would be maximum contrast between colors in a set if the required
### number of colors were always selected in order from the top. So a
### set of five colors should be white, black, yellow, purple, and
### orange. And if seven colors were required, light blue and red
### should be added. Kelly took care of the needs of people with
### defective color vision. The first nine colors would be maximally
### different for such people as well as for people with normal
### vision. These nine colors are also readily distinguishable by
### color name.
###
### The RGB color values were taken with gcolor2's color picker from
### colour-coding.pdf
k_white = RGB_X ("#FFFFFF") # 1
k_black = RGB_X ("#1F1E1E") # 2
k_yellow = RGB_X ("#EBCD3F") # 3
k_purple = RGB_X ("#6F308B") # 4
k_orange = RGB_X ("#DB6A28") # 5
k_light_blue = RGB_X ("#98CEE6") # 6
k_red = RGB_X ("#B91F36") # 7
k_buff = RGB_X ("#C1BC82") # 8
k_gray = k_grey = RGB_X ("#7F8080") # 9 ———————————
k_green = RGB_X ("#62A647") # 10
k_purplish_pink = RGB_X ("#D386B1") # 11
k_blue = RGB_X ("#4578B4") # 12
k_yellowish_pink = RGB_X ("#DD8565") # 13
k_violet = RGB_X ("#493896") # 14
k_orange_yellow = RGB_X ("#E1A131") # 15
k_purplish_red = RGB_X ("#91278B") # 16
k_greenish_yellow = RGB_X ("#E9E857") # 17
k_reddish_brown = RGB_X ("#7D1716") # 18
k_yellow_green = RGB_X ("#93AD3C") # 19
k_yellowish_brown = RGB_X ("#6E3515") # 20
k_reddish_orange = RGB_X ("#D12D27") # 21
k_olive_green = RGB_X ("#2C3617") # 22
kellys_max_contrast_colors = \
[ k_white
, k_black
, k_yellow
, k_purple
, k_orange
, k_light_blue
, k_red
, k_buff
, k_gray
, k_green
, k_purplish_pink
, k_blue
, k_yellowish_pink
, k_violet
, k_orange_yellow
, k_purplish_red
, k_greenish_yellow
, k_reddish_brown
, k_yellow_green
, k_yellowish_brown
, k_reddish_orange
, k_olive_green
]
### The order in the following list tries to maximize contrast,
### analogously to Kelly's list
alphabet_max_contrast_colors = \
[ abc_e
, abc_y
, abc_d
, abc_z
, abc_s
, abc_r
, abc_x
, abc_i
, abc_g
, abc_p
, abc_b
, abc_h
, abc_v
, abc_o
, abc_m
, abc_u
, abc_w
, abc_l
, abc_c
, abc_a
, abc_q
, abc_j
, abc_n
, abc_k
, abc_f
, abc_t
]
# end class color
color_bg_bad = RGB_X ("#FFEEEE")
color_bg_menu = RGB_X ("#DDEEFF")
color_bg_missing = RGB_X ("#FFFFBB")
color_desc = RGB_X ("#666666")
color_focus = SVG_Color ("yellow")
color_heading = RGB_X ("#34444D")
color_heading_closed = RGB_X ("#56666E")
color_heading_sub = RGB_X ("#78888F")
color_jnd_grey = RGB_X ("#F8F8F8")
color_lightest_grey = RGB_X ("#F6F6F6")
color_lighter_grey = RGB_X ("#EDEDED")
color_light_grey = RGB_X ("#DEDEDE")
color_border_grey = RGB_X ("#CCCCCC")
color_medium_grey = RGB_X ("#BEBEBE")
color_half_grey = RGB_X ("#888888")
color_dark_grey = RGB_X ("#444444")
color_darker_grey = RGB_X ("#222222")
color_selected = RGB_X ("#FF6633")
color_status_bad = RGB_X ("#FF6666")
color_status_good = RGB_X ("#AAEEAA")
color_status_missing = RGB_X ("#FFDD00")
color_target = RGB_X ("#FF6633")
css_arrow_color = SVG_Color ("red")
css_arrow_width = Em (1./2)
block_margin_bottom = Em (1./2)
body_margin = TRBL (Em (0.2), Em (0.5))
border_added = "1px solid " + P.color.m_red
border_button = "2px outset " + P.color_medium_grey
border_deleted = "1px solid " + P.color.m_blue
border_double = "2px solid " + P.color_medium_grey
border_double_light = "2px solid " + P.color_light_grey
border_selected = "2px solid " + P.color_selected
border_simple = "1px solid " + P.color_medium_grey
border_simple_light = "1px solid " + P.color_light_grey
button_spec = P_dict \
( border = P.border_button
, cursor = "pointer"
, margin = TRBL0 (t = Em (1./4), b = Em (3./4))
, padding = TRBL (Em (0.5), Em (1.5))
, ** Border (radius = Px (10))
)
col_padding = Rem (0.3125) ### Px (5)
background_color = P.color.p_white
link_color = RGB_X ("#0000EE")
no_link_color = RGB_X ("#333333")
text_color = RGB_X ("#000033")
visited_color = RGB_X ("#551A8B")
class afs (Definition) :
block_margin_bottom = Em (0.1)
border_spec_input = Border.P \
( color = P.R.color_dark_grey
, style = "solid"
, width = Px (1)
)
border_spec_readonly = Border.P \
( color = P.R.color_medium_grey
, style = "solid"
, width = Px (2)
)
border_spec_section = Border.P \
( color = P.R.color_darker_grey
, style = "solid"
, width = TRBL0 (l = Px (2), default = Px (1))
)
color_spec_desc = P_dict \
( background_color = P.R.color_desc
, color = P.R.background_color
)
color_spec_heading = P_dict \
( background_color = P.R.color_heading
, color = P.R.background_color
)
color_spec_heading_closed = P_dict \
( background_color = P.R.color_heading_closed
, color = P.R.background_color
)
color_spec_heading_sub = P_dict \
( background_color = P.R.color_heading_sub
, color = P.R.background_color
)
color_spec_label = P_dict \
( background_color = "inherit"
, color = P.R.text_color
)
color_spec_optional = P.R.color_spec_normal
color_spec_necessary = P_dict \
( background_color = P.R.color_light_grey
, color = P.R.text_color
)
color_spec_readonly = P_dict \
( background_color = P.R.color_light_grey
, color = P.R.color_dark_grey
)
color_spec_required = P_dict \
( background_color = P.R.color_medium_grey
, color = P.R.text_color
)
header_padding = TRBL (Em (0.2), Em (0.2), Em (0.3), Em (0.5))
status_size = Px (12)
# end class afs
class breakpoint (Definition) :
"""Breakpoints for responsive rules"""
really_narrow = P_dict \
( max_width = Px (360)
)
very_narrow = P_dict \
( max_width = Px (420)
)
quite_narrow = P_dict \
( max_width = Px (480)
)
narrow = P_dict \
( max_width = Px (680)
)
small_device = P_dict \
( max_device_width = Px (767)
)
supports_two_column = P_dict \
( min_width = Px (768)
)
broad = P_dict \
( min_width = Px (1280)
)
wide = P_dict \
( min_width = Px (1600)
)
# end class breakpoint
class cal (Definition) :
date_bg = P.R.color_medium_grey
date_padding = TRBL0 (r = Em (1./4), b = Em (1./10))
event_bg = RGB_X ("#FFF8AF")
font_size = Em (0.7)
line_height = 1.5
holiday_bg = RGB_X ("#CCFFFF")
month_color = RGB_X ("#777777")
weekend_color = P.R.color.m_blue
week_bg = P.R.color_heading
week_color = RGB_8 (255, 153, 0)
week_height = Em (8)
# end class cal
class menu_icon (Definition) :
color = P.R.color_selected
line_width = Rem (0.1875)
margin_ab = Em (0.4375)
margin_m = Em (0.125)
width = Em (1.25)
# end class menu_icon
class nav (Definition) :
border_radius = Em (0.625) ### Px (10)
class header (Definition) :
a_color = P.T.color_target
a_margin = 0
a_padding = Em (1)
hbt_sep = Em (1)
# end class header
class main (Definition) :
a_font_size = "small"
a_font_size_current = Em (0.875)
background_color = P.T.background_color
button_spec = P_dict \
( border = P.T.border_button
, color = P.link_color
, margin = 0
, text_align = "center"
)
color_no_link = P.T.color_heading
color_spec_heading = P_dict \
( background_color = P.T.color_heading
, color = P.background_color
)
color_spec_heading_rev = P_dict \
( background_color = P.background_color
, color = P.T.color_heading
)
color_spec_label = P_dict \
( background_color = P.background_color
, color = P.T.text_color
)
color_spec_link = P_dict \
( background_color = P.background_color
, color = P.link_color
)
color_spec_link_current = P_dict \
( background_color = P.T.color_lighter_grey
, color = P.T.color_heading
)
color_spec_no_link = P_dict \
( background_color = P.background_color
, color = P.color_no_link
, opacity = 0.7
)
color_spec_section_current = P_dict \
( background_color = P.background_color
, color = P.T.color_heading
)
color_spec_visited = P_dict \
( color_spec_link
, color = P.visited_color
)
color_spec_web_link_hover = P_dict \
( background_color = P.link_color
, color = P.background_color
)
li_left = Em (0.75)
line_height_larger = 1.50
line_height_normal = 1.35
link_color = P.T.color_selected
mark_color_link = P.T.css_arrow_color
mark_color_section = P.T.color_heading
mark_width = Em (0.40)
max_button_width = Em (20)
padding = TRBL (Rem (1./4), Rem (0.625), 0)
visited_color = P.T.color_heading
vert_padding = Rem (0.2)
width = Rem (11.875) + 3 * P.T.col_padding
# end class main
class rel (Definition) :
a_color = P.T.color_target
a_margin = Em (0.5)
a_padding = Em (1)
# end class rel
# end class nav
class pure (Definition) :
"""Parameters of `pure` css as of version v0.4.2."""
input_focus_border_color = RGB_X ("#129FEA")
label_width = Em (10.0)
label_margin_right = Em (1.0)
aside_indent = label_width + label_margin_right
# end class pure
class tablesorter (Definition) :
color_marker = P.R.background_color
margin_top = Px (8)
opacity = 0.75
width = Px (5)
# end class tablesorter
class Rule (Rule_Definition) :
clearfix = Rule_Pseudo \
( "after"
, clear = "both"
, content = "' '"
, display = "table"
# http://nicolasgallagher.com/micro-clearfix-hack/
)
disabled = Rule_Class \
( "disabled"
, opacity = 0.5
)
focus = Rule_Pseudo \
( "focus"
, P.R.color_spec_selected
)
focus_outline = Rule_Pseudo \
( "focus"
, P.R.color_spec_selected
, outline = P.R.outline_focus
)
hidden = Rule \
( display = "none"
, visibility = "hidden"
)
hidden_collapse = Rule \
( display = "none"
, visibility = "collapse"
)
hover = Rule_Pseudo \
( "hover"
, P.R.color_spec_selected
)
hover_rev = Rule_Pseudo \
( "hover"
, P.R.color_spec_selected_rev
)
input_focus = Rule_Pseudo \
( "focus"
, background_color = "inherit"
, border_color = P.R.pure.input_focus_border_color
, color = RGB_X ("#0078E7")
, outline = P.R.pure.input_focus_border_color
)
link = Rule_Pseudo \
( "link"
, color = P.R.link_color
)
main_nav_off = Rule \
( height = 0
, padding = 0
, width = 0
)
main_nav_on = Rule \
( height = "auto"
, padding = P.R.nav.main.padding
, width = "auto"
)
rotate_45_left = Rule \
( Transform ("rotate(-45deg)")
, display = "inline-block"
)
row_even = Rule_Pseudo \
( "nth-child(2n)"
, P.R.color_spec_row2
)
row_odd = Rule_Pseudo \
( "nth-child(2n+1)"
, P.R.color_spec_row1
)
target = Rule_Pseudo \
( "target"
, P.R.color_spec_selected
)
target_outline = Rule_Pseudo \
( "target"
, P.R.color_spec_selected
, outline = P.R.outline_focus
)
target_visible = Rule_Pseudo \
( "target"
, visibility = "visible"
)
target_visible_block = Rule_Pseudo \
( "target"
, display = "block"
, visibility = "visible"
)
visible = Rule_Pseudo \
( "target"
, visibility = "visible"
)
visible_block = Rule_Pseudo \
( display = "block"
, visibility = "visible"
)
visited = Rule_Pseudo \
( "visited"
, color = P.R.visited_color
)
# end class Rule
color_spec_error = P_dict \
( background_color = P.background_color
, color = P.color_status_bad
)
color_spec_gallery_heading = P_dict \
( background_color = RGB_P (50, 75, 100)
, color = P.nav.main.background_color
)
color_spec_heading = P_dict \
( background_color = P.background_color
, color = P.color_selected
)
color_spec_normal = P_dict \
( background_color = P.background_color
, color = P.text_color
)
color_spec_pg_head = P_dict \
( background_color = P.background_color
, color = RGB_X ("#0200DE")
)
color_spec_row1 = color_spec_meta = P_dict \
( background_color = "transparent"
, color = P.text_color
)
color_spec_row2 = color_spec_message = P_dict \
( background_color = P.color_lightest_grey
, color = P.text_color
)
color_spec_selected = P_dict \
( background_color = SVG_Color ("yellow")
, color = SVG_Color ("red")
)
color_spec_selected_rev= P_dict \
( background_color = SVG_Color ("red")
, color = SVG_Color ("yellow")
)
color_spec_strong = P_dict \
( background_color = P.background_color
, color = SVG_Color ("blue")
)
color_spec_sub_heading = P_dict \
( background_color = P.background_color
, color = P.color_half_grey
)
color_spec_term = P_dict \
( background_color = RGB_X ("#E6E6E6")
, color = P.color_dark_grey
)
del_spec = P_dict \
( text_decoration = "line-through"
# XXX ???
)
font_family_normal = \
( """"system-ui","-apple-system", BlinkMacSystemFont, """
""""Segoe UI", Roboto, "Fira Sans", """
""""Lucida Grande", verdana, sans-serif"""
)
font_family_pre = \
""""Lucida Sans Typewriter", "Lucida Console", "Courier New", Courier, monospace"""
font_family_print = "serif"
### Don't use specific fonts for `print` because font substitution done
### by a printer can look terribly ugly
font_spec_normal = P_dict \
( font_family = P.font_family_normal
, font_style = "normal"
, font_weight = "normal"
, line_height = P.line_height_normal
)
font_spec_print = P_dict \
( font_spec_normal
, font_family = P.font_family_print
)
font_spec_em = P_dict \
( font_spec_normal
, font_weight = "bold"
)
font_spec_pre = P_dict \
( font_spec_normal
, font_family = P.font_family_pre
)
grid_table_border = "3px ridge gray"
h1_font_size = Percent (125)
h1_font_weight = "bold"
hbox_spec = P_dict \
( display = "block"
, overflow = "hidden"
)
hr_spec = P_dict \
( border = 0
, border_top = "1px solid #CCC"
, display = "block"
, height = Px (1)
, margin = TRBL (Em (1), 0)
, padding = 0
)
input_margin = TRBL (Em (0.1), 0)
input_padding = Em (0.2)
ins_spec = P_dict \
( text_decoration = "none"
# XXX ???
)
line_height_heading = 2.00
line_height_larger = 1.875
line_height_normal = 1.44
line_height_input = 1.143
outline_focus = "2px solid " + P.color_focus
outline_target = "2px dotted " + P.color_target
pg_body_margin_lr = Em (3.6)
pg_head_height = "auto"
pg_header_nav_height = Rem (3.5)
pg_main_max_width = Em (45)
pg_main_min_width = Em (15)
pg_short_nav_font_size = Rem (1.20)
pg_short_nav_top = Em (3)
thumbnail_size = Px (155)
thumbnail_selected_color = P.color_selected
class Debugger (Definition) :
background_color = P.R.background_color
console_border_color = P.R.color_border_grey
console_background_color = P.R.color_lightest_grey
console_text_color = P.R.color.m_black
form_text_color = P.R.color_dark_grey
traceback_background_color = P.R.background_color
traceback_text_color = P.R.background_color
# end class Debugger
# end class Media_Defaults
### __END__ JNJ.Media_Defaults
| 0
| 18,191
| 216
|
7e8520464ef78923e2bcd35f840d3b3c36c23689
| 76
|
py
|
Python
|
Exercise_4_9.py
|
kushrami/Python-Crash-Course-book-Excersice
|
7093181940a90d9f4bab5775ef56f57963450393
|
[
"Apache-2.0"
] | null | null | null |
Exercise_4_9.py
|
kushrami/Python-Crash-Course-book-Excersice
|
7093181940a90d9f4bab5775ef56f57963450393
|
[
"Apache-2.0"
] | null | null | null |
Exercise_4_9.py
|
kushrami/Python-Crash-Course-book-Excersice
|
7093181940a90d9f4bab5775ef56f57963450393
|
[
"Apache-2.0"
] | null | null | null |
#list comprehension
cubes = [cube**3 for cube in range(1,11)]
print(cubes)
| 15.2
| 41
| 0.710526
|
#list comprehension
cubes = [cube**3 for cube in range(1,11)]
print(cubes)
| 0
| 0
| 0
|
00380a4e96afe9b21aff8fb8b7107dd165f0876b
| 38,092
|
py
|
Python
|
src/gui.py
|
mgely/scriptq
|
7339abfdefac8a1df6f3c0928e6f22bae9620e05
|
[
"MIT"
] | null | null | null |
src/gui.py
|
mgely/scriptq
|
7339abfdefac8a1df6f3c0928e6f22bae9620e05
|
[
"MIT"
] | 1
|
2020-06-10T08:08:24.000Z
|
2020-06-10T08:08:24.000Z
|
src/gui.py
|
mgely/scriptq
|
7339abfdefac8a1df6f3c0928e6f22bae9620e05
|
[
"MIT"
] | null | null | null |
import tkinter as tk
from tkinter import messagebox, filedialog, simpledialog, scrolledtext
from tkinter import PhotoImage
from tkinter import ttk
import subprocess
from threading import Thread
import time
import tempfile
from os import path
import sys
try:
from . import settings
except ImportError:
# Running from source
import settings
# Contains the graphics featured on the buttons
graphics_directory = path.join(path.dirname(__file__), "graphics")
'''
Hiearchy of Tkinter frames:
GuiWindow.master = tk.Tk()
GuiWindow(ttk.Frame)
BatchingFrame.frame(ttk.Frame)
BatchingFrame.vbar(ttk.Scrollbar)
BatchingFrame(tk.Canvas)
BatchingFrame.canvas_content(tk.Frame)
Note: this is an amalgamation of code from different projects.
It could probably be simplified dramatically.
'''
class GuiWindow(ttk.Frame):
'''
Highest level window and frame.
Here we set the name and size of the window,
and prepare for hosting the actual content of the program.
If unittest is set to True, we don't start the
mainloop, but wait for a script to trigger
events and manually update the GUI.
'''
class BatchingFrame(tk.Canvas):
'''
This is the place where the widgets corresponding
to scripts live.
It is also the brain of the program, and controls
the state of the different scripts, the launching of
scripts, etc...
'''
def remove_all(self):
'''
Removes all the Scripts, from the last to the first,
excluding the topmost InsertWidget
'''
for position in range(len(self.scripts) - 1, 0, -1):
self.remove(position)
def build_output_window(self):
'''
Shows the output window which contains the
continuously updated output of the currently
running script (stdout and stderr).
Or, if no script is running, contains
the content of the latest run script.
'''
if self.output_window_visible:
# the output is already visible
# in this case bring the window to the top
self.output_window.lift()
self.output_window.attributes("-topmost", True)
self.output_window.attributes("-topmost", False)
return
# Open up the output window
self.output_window = tk.Toplevel(self.master)
self.output_window.title("Script queuer | Output")
self.output_window.geometry("400x400")
# Keep track of the window being visible
self.output_window_visible = True
# Window size cannot be reduced beyond
# a certain size
self.output_window.minsize(200, 150)
# When closing the window run self.on_closing_output_window
self.output_window.protocol("WM_DELETE_WINDOW",
self.on_closing_output_window)
# Put a scrollable text region in it, and make is stretchable
self.output_text_widget = ScrolledLabel(self.output_window)
self.output_text_widget.grid(column=0, row=0, sticky='news')
self.output_window.rowconfigure(0, weight=1)
self.output_window.columnconfigure(0, weight=1)
# Add a button to toggle following the output / autoscrolling
b = ToggleAutoscrollButton(self.output_window, text='Autoscroll')
b.grid(column=0, row=1, sticky='nws')
if self.running_script is not None:
# Is there is no running script,
# show the log of the last run script
self.output_text_widget.insert(self.running_script.log)
self.scroll_output_window_down()
def on_closing_output_window(self):
'''
Function called when the output window is closed
'''
# Keep track of the window state
self.output_window_visible = False
# Close the window
self.output_window.destroy()
def insert(self, position, script_path=None):
'''
Will insert a new script after the row indicated by
the input integer `position`.
Optionally one can specify the script path (for
unittesting purposes).
'''
if script_path is None:
# If no script path was provided
# prompt user for file name
if self.latest_searched_directory == None:
script_path = filedialog.askopenfilename()
else:
# If a script was already inserted,
# open the file prompt at the same directory
script_path = filedialog.askopenfilename(
initialdir=self.latest_searched_directory)
if script_path == "":
# User cancelled
return
# keep track of the directory the user navigated to
self.latest_searched_directory = path.dirname(script_path)
# Creates a new script widget, by default it will be queued
sw = ScriptWidget(self, script_path=script_path, state='queued')
# add it to the list of scripts
self.scripts.insert(position + 1, sw)
# update the scripts states and graphical information
self.update_script_widgets()
def move(self, position, new_position=None):
'''
Move a script from a position (row `position`) to a
new position (after row `new_position`).
The new position will be chosen in a popup window
by the user, or given as a kwarg (for unittesting purposes).
'''
if new_position is None:
# No postion was given: prompt user
# with a popup window
# Determine message to be displayed in popup
if self.state == 'running':
# If running, do not allow script to be placed in first postion
# (above the script which is running)
message = " 1 = place below row 1\n2 = place below row 2\n etc..."
minvalue = 1
else:
# If stopped
message = " 0 = place first \n 1 = place below row 1\n etc..."
minvalue = 0
# Open popup window
new_position = tk.simpledialog.askinteger("Move to",
message,
parent=self.master,
minvalue=minvalue,
maxvalue=len(
self.scripts))
if new_position is None:
# User cancelled
return
# the position the user sees does not
# take into account the rows of "done" scripts
# position_0 is the position of the first "not done"
# script.
new_position += self.position_0
# Insert the script at the new position
self.scripts.insert(new_position, self.scripts[position])
# Remove the script at the old position
if new_position > position:
self.scripts.pop(position)
else:
# If the script is moved up, then
# the old position is actually +1
self.scripts.pop(position + 1)
# Update script states and graphical information
self.update_script_widgets()
def remove(self, position):
'''
Remove a script from a position.
'''
# Destroy the ScriptWidget object
self.scripts[position].destroy()
# Remove it from the self.scripts list
self.scripts.pop(position)
# Update script states and graphical information
self.update_script_widgets()
def run(self, position):
'''
Run the script located at row `position`
'''
# Useful information about the script to be run
self.running_script = self.scripts[position]
script_path = self.scripts[position].script_path
self.running_script_position = position
self.running_script.log = ''
# Delete the contents of the output window
if self.output_window_visible:
self.output_text_widget.clear()
# Start the script and
# setup the communication
# with subprocess
self.start_script_process(script_path)
# Start the periodic monitoring of the script,
# to capture the output, but also detect the end/error
self.after(self.t_output_monitoring, self.monitor_script_process)
# Update the states of this object and the script
self.state = 'running'
self.running_script.state = 'running'
# Update script states and graphical information
self.update_script_widgets()
def start_script_process(self, script):
'''
Start the script subprocess
--- the -u option foces stdout, stderr streams to be
unbuffered, which allows us to collect these outputs in real tim,
rather than wait to the end of the scripts
--- the cwd is chosen to be the folder in which
the script is located
'''
self.script_process = subprocess.Popen(['python', '-u', script],
cwd=path.dirname(script),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1)
'''
This list will be populated
with contents of the subprocess
`stdout` by the `reader` function in a
seperate thread.
It's the bridge between the subprocess
and the log file and output window.
'''
self.line_buffer = []
# in a seperate thread, collect the output
# of the subprocess and load it into
# line_buffer list
self.buffer_filling_thread = Thread(target=reader,
args=(self.script_process.stdout,
self.line_buffer))
self.buffer_filling_thread.daemon = True
self.buffer_filling_thread.start()
def write_to_output(self, to_write):
'''
write `to_write` both to the output window
and to the log file of the running scripts
'''
if self.output_window_visible:
self.output_text_widget.insert(to_write)
self.running_script.log += to_write
def monitor_script_process(self):
'''
Whilst the script is running, copy what the
`reader` function has put into the `self.line_buffer`
and write it to the output and log files.
This function will detect when the running script crashed
or ended, append the output/log accordingly, and
run the next queued script.
'''
# write all contents of the `self.line_buffer` list
# to the output/log
while self.line_buffer:
self.write_to_output(self.line_buffer.pop(0).decode("utf-8"))
# if autoscroll is activated, scroll the output window
# to the latest written
if self.output_window.follow:
self.scroll_output_window_down()
# poll checks on the status of the subprocess
poll = self.script_process.poll()
if poll is None:
# Hasnt crashed or ended
# monitor again in a time `self.t_output_monitoring`
self.after(self.t_output_monitoring, self.monitor_script_process)
else:
self.treat_end_of_script(poll)
self.treat_next_queued_script(poll)
def treat_end_of_script(self, poll):
'''
Called whenever a script crashes or ends.
Appends the output/log to give maximum
information to the user about causes of crashes.
'''
if poll != 0:
# Something went wrong
while True:
# Get Error Log and write to output/log
line = self.script_process.stderr.readline()
if not line:
break
else:
self.write_to_output(line.decode("utf-8"))
# Scroll the output window to the bottom
self.scroll_output_window_down()
# If `self.state` is stopped, then it's the user
# who interrupted the script, write this into the output\log
if self.state == 'stopped':
self.write_to_output(self.interrupted_error_message)
# Scroll the output window to the bottom
self.scroll_output_window_down()
def treat_next_queued_script(self, poll):
'''
Called when a script crashes or ends,
to carry out the actions which follow:
- starting a new queued script if the
script ended/crashed on its own
- stopping the run if the user forced a stop
- notifying the user via email if enabled
'''
if poll != 0 and self.state == 'stopped':
# User interrupted the script
# The script is stopped and made ready to go again
self.running_script.state = 'ready'
# It is also duplicated and marked above as a
# stopped script, so that the user may also inspect the
# logging file
stopped = self.running_script
duplicate = ScriptWidget(self,
script_path=stopped.script_path,
state='ended')
duplicate.success = 'stopped'
duplicate.log = stopped.log
self.scripts.insert(self.running_script_position, duplicate)
# Update script states and graphical information
self.update_script_widgets()
else:
if poll != 0:
# Script stopped because of an error
self.running_script.state = 'ended'
self.running_script.success = 'failed'
elif poll == 0:
# Script successfully ended
self.running_script.state = 'ended'
self.running_script.success = 'done'
if settings.gmail_notifications['enable']:
self.gmail_notify()
if self.running_script_position + 1 < len(self.scripts):
# more scripts are queued: run the next one
self.run(position=self.running_script_position + 1)
else:
# no more scripts to be run: just update visual information
self.state = 'stopped'
self.update_script_widgets()
def stop(self):
'''
Triggered by a user clicking the stop button
all one needs to do is set the state to `stopped`
and force the script to stop, the automatic
monitoring of the running script in `monitor_script_process`
will take care of the following actions
'''
self.state = 'stopped'
# Interrupt process
self.script_process.kill()
def update_script_widgets(self):
'''
Updates the states of the ScriptWidget objects
and updates the graphical information displayed.
All is determined by the `self.states` list
and the `self.state` variable.
'''
# The self.scripts list should never be empty
# as a failsafe we always populate it in that case
# with the insert widget
if len(self.scripts) == 0:
self.scripts = [InsertWidget(self)]
return
# The row is a property of the non-done scripts
# it is displayed in the GUI starting from 1
row = 1
for i, s in enumerate(self.scripts):
# All scripts are given a position, running from 0 upwards
# this is not necessarily the same as the row and acts
# as a unique identifier of the script
s.position = i
# Scripts which are done are given no row information
s.row = None
if s.state in ['running', 'ready', 'queued'] or row > 1:
if row == 1:
# First script running/to-run
# Helps in converting rows given by the user
# to the position identifier of a script
self.position_0 = i
# Since this is the first script which has not already been run
# it should be either running or stopped
if self.state == 'running':
s.state = 'running'
self.running_script = s
self.running_script_position = i
elif self.state == 'stopped':
s.state = 'ready'
elif row > 1:
# this script is lower down the queue:
# if they were just moved for example, we should
# adjust their state accordingly
s.state = 'queued'
# These non-done scripts are given a row
s.row = row
row += 1
for i, s in enumerate(self.scripts):
# Place the script in the grid
s.grid(row=i, column=0, sticky='news')
# Populate it with buttons etc...
s.add_widgets()
# Adjust the scrollable region of the GUI
self.update()
self.config(scrollregion=self.bbox("all"))
def build_gridframe(self):
"""
This frame will be divided into a grid hosting the
canvas, scrollbars, (and potentially a menubar in the future if needed)
"""
self.frame = ttk.Frame()
# Places the Frame widget self.frame in the parent
# in a grid
self.frame.grid()
# Configure the frames grid
self.frame.grid(sticky="nswe") # make frame container sticky
self.frame.rowconfigure(0, weight=1) # make canvas expandable in x
self.frame.columnconfigure(0, weight=1) # make canvas expandable in y
def build_menubar(self):
"""
Builds the File, Edit, ... menu bar situated at the top of
the window.
Not used for the moment...
"""
# initialize the menubar object
self.menubar = tk.Menu(self.frame)
####################################
# FILE cascade menu build
####################################
# add new item to the menubar
menu = tk.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(label="File", menu=menu)
####################################
# VIEW cascade menu build
####################################
# add new item to the menubar
menu = tk.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(label="View", menu=menu)
# add cascade menu items
menu.add_command(label="Output", command=self.build_output_window)
# Add the menubar to the application
self.master.config(menu=self.menubar)
def build_scrollbars(self):
"""
Builds a vertical scrollbars and places
it in the window
"""
self.vbar = ttk.Scrollbar(self.frame, orient="vertical")
self.vbar.grid(row=0, column=1, sticky="ns")
def build_canvas(self):
"""
Initializes the canvas from which this object inherits and
places it in the grid of our frame
"""
tk.Canvas.__init__(
self,
self.frame,
bd=0,
highlightthickness=0,
yscrollcommand=self.vbar.set,
confine=False,
bg="white",
)
self.grid(row=0, column=0, sticky="nswe")
def configure_scrollbars(self):
"""
Define what functions the scrollbars should call
when we interact with them, and make scrolling
on the mouse do something similar
"""
self.vbar.configure(command=self.scroll_y)
self.bind("<MouseWheel>", self.scroll_y_wheel)
def scroll_y(self, *args, **kwargs):
"""
Is called when the user interacts with the vertical scroll bar
"""
# stop from scolling up beyond a certain point
if float(args[1]) < 0:
args = (args[0], "0")
# shift canvas vertically
self.yview(*args)
time.sleep(0.01)
# Update scrollable area
self.update()
self.config(scrollregion=self.bbox("all"))
def scroll_y_wheel(self, event):
"""
Triggered by the user scrolling (in combination with no particular key presses).
"""
# Determine which direction the user is scrolling
# if using windows, then event.delta has also a different
# amplitude depending on how fast the user is scrolling,
# but we ignore that
if event.num == 5 or event.delta < 0:
direction = 1
if event.num == 4 or event.delta > 0:
direction = -1
# Move the canvas appropriately, and stop
# the user from scrolling to far out
if direction == 1:
if self.canvasy(self.winfo_height()) < 2 * self.bbox("all")[3]:
self.yview_scroll(direction, tk.UNITS)
elif direction == -1:
if self.canvasy(0) > self.bbox("all")[1]:
self.yview_scroll(direction, tk.UNITS)
self.update()
# if we scroll above the top row, move a little down..
if self.canvasy(0) < self.bbox("all")[1]:
self.yview_moveto(0)
# Update the scrollable region
self.update()
self.config(scrollregion=self.bbox("all"))
def build_canvas_content(self):
'''
Build a window which will contain the widgets
'''
self.canvas_content = tk.Frame(self)
self.create_window((0, 0),
window=self.canvas_content,
anchor='nw',
width=1000)
self.canvas_content.columnconfigure(0, weight=1)
class ScriptWidget(tk.Frame):
'''
Widget (tkinter frame) in which are stored all the graphical
elements and information about a script.
'''
def next_script_state(self):
'''
Returns the state of the script below the current
one. Returns None is this is the last script.
'''
try:
return self.parent.scripts[self.position + 1].state
except IndexError:
# This script is last in line
return None
def add_widgets(self):
'''
Builds all graphical elements
depending on the state and information
about the script.
'''
# remove all previously bult graphical elements
for w in self.all_widgets:
w.destroy()
self.all_widgets = []
##################
# INSERT BUTTON
##################
if self.next_script_state() in ['ready', 'queued', None]:
b = ImageButton(
self,
image='insert.gif',
command=(lambda: self.parent.insert(self.position)))
else:
b = ImageButton(self, image='half_blank.gif')
b.config(state=tk.DISABLED)
b.grid(row=0, column=0, sticky='swe', padx=(5, 0))
self.all_widgets.append(b)
##################
# ROW LABEL
##################
if self.state == 'ended':
l = ImageLabel(self, image='blank.gif', compound=tk.CENTER)
else:
l = ImageLabel(self,
image='blank.gif',
compound=tk.CENTER,
text=self.row)
l.grid(row=0, column=1, sticky='new')
self.all_widgets.append(l)
##################
# STATE LABEL
##################
if self.state == 'ended':
text = self.success
else:
text = self.state
b = ImageLabel(self,
text=text,
image='label_' + self.state + self.success + ".gif",
compound=tk.CENTER)
b.grid(row=0, column=2, sticky='new')
self.all_widgets.append(b)
##################
# REMOVE BUTTON
##################
if self.state == 'running':
b = ImageButton(self, image='blank.gif')
b.config(state=tk.DISABLED)
else:
b = ImageButton(
self,
image='remove.gif',
command=(lambda: self.parent.remove(self.position)))
b.grid(row=0, column=3, sticky='new', pady=self.pady)
self.all_widgets.append(b)
##################
# MOVE BUTTON
##################
if self.state in ['queued', 'ready']:
b = ImageButton(self,
image='move.gif',
command=(lambda: self.parent.move(self.position)))
else:
b = ImageButton(self, image='blank.gif')
b.config(state=tk.DISABLED)
b.grid(row=0, column=4, sticky='new', pady=self.pady)
self.all_widgets.append(b)
##################
# RUN/STOP BUTTON
##################
if self.state == 'running':
b = ImageButton(self, image='stop.gif', command=self.parent.stop)
elif self.state == 'ready':
b = ImageButton(self,
image='run.gif',
command=(lambda: self.parent.run(self.position)))
else:
b = ImageButton(self, image='blank.gif')
b.config(state=tk.DISABLED)
b.grid(row=0, column=5, sticky='new', pady=self.pady)
self.all_widgets.append(b)
##################
# LOG/OUTPUT BUTTON
##################
if self.state == 'ended':
b = ImageButton(self,
text="view log",
command=self.view_log,
image='blank.gif',
compound=tk.CENTER)
elif self.state in ['running', 'ready']:
b = ImageButton(self,
text="view output",
command=self.parent.build_output_window,
image='blank.gif',
compound=tk.CENTER)
else:
b = ImageButton(self,
text="",
command=self.parent.build_output_window,
image='blank.gif',
compound=tk.CENTER)
b.config(state=tk.DISABLED)
self.all_widgets.append(b)
b.grid(row=0, column=6, sticky='ne', pady=self.pady, padx=(2, 10))
##################
# SCRIPT PATH LABEL
##################
b = tk.Label(
self,
text=self.script_path,
anchor=tk.W,
)
b.grid(row=0,
column=7,
columnspan=1,
sticky='new',
pady=self.pady,
padx=(0, 40))
self.columnconfigure(7, weight=1)
self.all_widgets.append(b)
self.update()
# Wrap the path text
b.config(wraplength=b.winfo_width() - 50)
class InsertWidget(ScriptWidget):
'''Like Script Widget, but with just an insert button.
'''
def add_widgets(self):
'''
Add the graphical elements of the widget
'''
if self.next_script_state() in ['ready', 'queued', None]:
b = ImageButton(
self,
image='insert.gif',
command=(lambda: self.parent.insert(self.position)))
else:
b = ImageButton(self, image='half_blank.gif')
b.config(state=tk.DISABLED)
b.grid(row=0, column=0, sticky='swe', padx=(5, 0))
class ImageButton(ttk.Button):
'''Wrapper around the ttk.Button class
which automizes the importation of the
buttons picture.
'''
class ImageLabel(ttk.Label):
"""docstring for ImageButton"""
class ToggleAutoscrollButton(tk.Radiobutton):
"""Button which turns auto scrolling on and off.
"""
def click(self):
'''
Called upon clicking the button
'''
if self.state.get():
# If autoscrolling is on
self.config(value=False)
self.parent.follow = False
else:
# If autoscrolling is off
self.config(value=True)
self.state.set(True)
self.parent.follow = True
class ScrolledLabel(scrolledtext.ScrolledText):
"""wrapper around scrolledtext, to make
the text read-only
"""
def reader(f, buffer):
'''Utility function runing in a thread
which transfers any lines from the
pipe `f` into the list `buffer`
'''
while True:
line = f.readline()
if line:
buffer.append(line)
else:
break
if __name__ == '__main__':
GuiWindow()
| 33.590829
| 88
| 0.55891
|
import tkinter as tk
from tkinter import messagebox, filedialog, simpledialog, scrolledtext
from tkinter import PhotoImage
from tkinter import ttk
import subprocess
from threading import Thread
import time
import tempfile
from os import path
import sys
try:
from . import settings
except ImportError:
# Running from source
import settings
# Contains the graphics featured on the buttons
graphics_directory = path.join(path.dirname(__file__), "graphics")
'''
Hiearchy of Tkinter frames:
GuiWindow.master = tk.Tk()
GuiWindow(ttk.Frame)
BatchingFrame.frame(ttk.Frame)
BatchingFrame.vbar(ttk.Scrollbar)
BatchingFrame(tk.Canvas)
BatchingFrame.canvas_content(tk.Frame)
Note: this is an amalgamation of code from different projects.
It could probably be simplified dramatically.
'''
class GuiWindow(ttk.Frame):
'''
Highest level window and frame.
Here we set the name and size of the window,
and prepare for hosting the actual content of the program.
If unittest is set to True, we don't start the
mainloop, but wait for a script to trigger
events and manually update the GUI.
'''
def __init__(self, unittesting=False):
# Initialize the frame, inside the root window (tk.Tk())
ttk.Frame.__init__(self, master=tk.Tk())
# Set the name to appear in the title bar
self.master.title("Script queuer")
# Set the initial size of the window in pixels
self.master.geometry("1000x400")
# Only resizable in the y direction
self.master.resizable(False, True)
# Make the frame an expandable grid
self.master.rowconfigure(0, weight=1)
self.master.columnconfigure(0, weight=1)
# Populate that grid with the batching frame
self.bf = BatchingFrame(self.master)
# Bring scriptq to the front
self.master.lift()
self.master.attributes("-topmost", True)
self.master.attributes("-topmost", False)
if unittesting:
self.update()
else:
try:
self.mainloop()
except Exception as e:
messagebox.showinfo(
"Oops.. The script queuer crashed with error\n" + str(e))
class BatchingFrame(tk.Canvas):
'''
This is the place where the widgets corresponding
to scripts live.
It is also the brain of the program, and controls
the state of the different scripts, the launching of
scripts, etc...
'''
def __init__(self, master, **kwargs):
# Master is the highest level,
# root window here tk.Tk()
self.master = master
# When opening a script, this is
# where the window will open to by default
self.latest_searched_directory = None
# currently or latest running ScriptWidget
self.running_script = None
# Build another frame, which will contain
# the canvas and the scrollbar
self.build_gridframe()
# We may want to have a menubar to add functionality
# in the future, so I'm keeping this commented out.
# self.build_menubar()
# Add the vertical scrollbar
self.build_scrollbars()
# Add the canvas which will host the
# Frame containing the script widgets
# Note: a canvas is necessary here to make the
# whole GUI scrollable.
# (there's probably a simpler alternative though..)
self.build_canvas()
# configure vertical scrollbar
self.configure_scrollbars()
# build a window in the canvas which
# will contain the ScriptWidgets
self.build_canvas_content()
# if True, the output window is built and visible
self.output_window_visible = False
# Build the output window
self.build_output_window()
# This determines how oftem we collect and display
# the output of a running script
self.t_output_monitoring = 100 #ms
# Either 'stopped' or 'running'
self.state = 'stopped'
# When we press stop, this message is appended
# to the log of the script output
self.interrupted_error_message = 'INTERRUPTED BY SCRIPT QUEUER'
# Default opening screen
# We start just with the insertion widget
self.scripts = [
InsertWidget(self),
]
# Useful function which goes through the list
# self.scripts, and displays the corresponding graphical content
self.update_script_widgets()
def remove_all(self):
'''
Removes all the Scripts, from the last to the first,
excluding the topmost InsertWidget
'''
for position in range(len(self.scripts) - 1, 0, -1):
self.remove(position)
def build_output_window(self):
'''
Shows the output window which contains the
continuously updated output of the currently
running script (stdout and stderr).
Or, if no script is running, contains
the content of the latest run script.
'''
if self.output_window_visible:
# the output is already visible
# in this case bring the window to the top
self.output_window.lift()
self.output_window.attributes("-topmost", True)
self.output_window.attributes("-topmost", False)
return
# Open up the output window
self.output_window = tk.Toplevel(self.master)
self.output_window.title("Script queuer | Output")
self.output_window.geometry("400x400")
# Keep track of the window being visible
self.output_window_visible = True
# Window size cannot be reduced beyond
# a certain size
self.output_window.minsize(200, 150)
# When closing the window run self.on_closing_output_window
self.output_window.protocol("WM_DELETE_WINDOW",
self.on_closing_output_window)
# Put a scrollable text region in it, and make is stretchable
self.output_text_widget = ScrolledLabel(self.output_window)
self.output_text_widget.grid(column=0, row=0, sticky='news')
self.output_window.rowconfigure(0, weight=1)
self.output_window.columnconfigure(0, weight=1)
# Add a button to toggle following the output / autoscrolling
b = ToggleAutoscrollButton(self.output_window, text='Autoscroll')
b.grid(column=0, row=1, sticky='nws')
if self.running_script is not None:
# Is there is no running script,
# show the log of the last run script
self.output_text_widget.insert(self.running_script.log)
self.scroll_output_window_down()
def on_closing_output_window(self):
'''
Function called when the output window is closed
'''
# Keep track of the window state
self.output_window_visible = False
# Close the window
self.output_window.destroy()
def insert(self, position, script_path=None):
'''
Will insert a new script after the row indicated by
the input integer `position`.
Optionally one can specify the script path (for
unittesting purposes).
'''
if script_path is None:
# If no script path was provided
# prompt user for file name
if self.latest_searched_directory == None:
script_path = filedialog.askopenfilename()
else:
# If a script was already inserted,
# open the file prompt at the same directory
script_path = filedialog.askopenfilename(
initialdir=self.latest_searched_directory)
if script_path == "":
# User cancelled
return
# keep track of the directory the user navigated to
self.latest_searched_directory = path.dirname(script_path)
# Creates a new script widget, by default it will be queued
sw = ScriptWidget(self, script_path=script_path, state='queued')
# add it to the list of scripts
self.scripts.insert(position + 1, sw)
# update the scripts states and graphical information
self.update_script_widgets()
def move(self, position, new_position=None):
'''
Move a script from a position (row `position`) to a
new position (after row `new_position`).
The new position will be chosen in a popup window
by the user, or given as a kwarg (for unittesting purposes).
'''
if new_position is None:
# No postion was given: prompt user
# with a popup window
# Determine message to be displayed in popup
if self.state == 'running':
# If running, do not allow script to be placed in first postion
# (above the script which is running)
message = " 1 = place below row 1\n2 = place below row 2\n etc..."
minvalue = 1
else:
# If stopped
message = " 0 = place first \n 1 = place below row 1\n etc..."
minvalue = 0
# Open popup window
new_position = tk.simpledialog.askinteger("Move to",
message,
parent=self.master,
minvalue=minvalue,
maxvalue=len(
self.scripts))
if new_position is None:
# User cancelled
return
# the position the user sees does not
# take into account the rows of "done" scripts
# position_0 is the position of the first "not done"
# script.
new_position += self.position_0
# Insert the script at the new position
self.scripts.insert(new_position, self.scripts[position])
# Remove the script at the old position
if new_position > position:
self.scripts.pop(position)
else:
# If the script is moved up, then
# the old position is actually +1
self.scripts.pop(position + 1)
# Update script states and graphical information
self.update_script_widgets()
def remove(self, position):
'''
Remove a script from a position.
'''
# Destroy the ScriptWidget object
self.scripts[position].destroy()
# Remove it from the self.scripts list
self.scripts.pop(position)
# Update script states and graphical information
self.update_script_widgets()
def run(self, position):
'''
Run the script located at row `position`
'''
# Useful information about the script to be run
self.running_script = self.scripts[position]
script_path = self.scripts[position].script_path
self.running_script_position = position
self.running_script.log = ''
# Delete the contents of the output window
if self.output_window_visible:
self.output_text_widget.clear()
# Start the script and
# setup the communication
# with subprocess
self.start_script_process(script_path)
# Start the periodic monitoring of the script,
# to capture the output, but also detect the end/error
self.after(self.t_output_monitoring, self.monitor_script_process)
# Update the states of this object and the script
self.state = 'running'
self.running_script.state = 'running'
# Update script states and graphical information
self.update_script_widgets()
def start_script_process(self, script):
'''
Start the script subprocess
--- the -u option foces stdout, stderr streams to be
unbuffered, which allows us to collect these outputs in real tim,
rather than wait to the end of the scripts
--- the cwd is chosen to be the folder in which
the script is located
'''
self.script_process = subprocess.Popen(['python', '-u', script],
cwd=path.dirname(script),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1)
'''
This list will be populated
with contents of the subprocess
`stdout` by the `reader` function in a
seperate thread.
It's the bridge between the subprocess
and the log file and output window.
'''
self.line_buffer = []
# in a seperate thread, collect the output
# of the subprocess and load it into
# line_buffer list
self.buffer_filling_thread = Thread(target=reader,
args=(self.script_process.stdout,
self.line_buffer))
self.buffer_filling_thread.daemon = True
self.buffer_filling_thread.start()
def write_to_output(self, to_write):
'''
write `to_write` both to the output window
and to the log file of the running scripts
'''
if self.output_window_visible:
self.output_text_widget.insert(to_write)
self.running_script.log += to_write
def scroll_output_window_down(self):
if self.output_window_visible:
self.output_text_widget.see("end")
def monitor_script_process(self):
'''
Whilst the script is running, copy what the
`reader` function has put into the `self.line_buffer`
and write it to the output and log files.
This function will detect when the running script crashed
or ended, append the output/log accordingly, and
run the next queued script.
'''
# write all contents of the `self.line_buffer` list
# to the output/log
while self.line_buffer:
self.write_to_output(self.line_buffer.pop(0).decode("utf-8"))
# if autoscroll is activated, scroll the output window
# to the latest written
if self.output_window.follow:
self.scroll_output_window_down()
# poll checks on the status of the subprocess
poll = self.script_process.poll()
if poll is None:
# Hasnt crashed or ended
# monitor again in a time `self.t_output_monitoring`
self.after(self.t_output_monitoring, self.monitor_script_process)
else:
self.treat_end_of_script(poll)
self.treat_next_queued_script(poll)
def treat_end_of_script(self, poll):
'''
Called whenever a script crashes or ends.
Appends the output/log to give maximum
information to the user about causes of crashes.
'''
if poll != 0:
# Something went wrong
while True:
# Get Error Log and write to output/log
line = self.script_process.stderr.readline()
if not line:
break
else:
self.write_to_output(line.decode("utf-8"))
# Scroll the output window to the bottom
self.scroll_output_window_down()
# If `self.state` is stopped, then it's the user
# who interrupted the script, write this into the output\log
if self.state == 'stopped':
self.write_to_output(self.interrupted_error_message)
# Scroll the output window to the bottom
self.scroll_output_window_down()
def treat_next_queued_script(self, poll):
'''
Called when a script crashes or ends,
to carry out the actions which follow:
- starting a new queued script if the
script ended/crashed on its own
- stopping the run if the user forced a stop
- notifying the user via email if enabled
'''
if poll != 0 and self.state == 'stopped':
# User interrupted the script
# The script is stopped and made ready to go again
self.running_script.state = 'ready'
# It is also duplicated and marked above as a
# stopped script, so that the user may also inspect the
# logging file
stopped = self.running_script
duplicate = ScriptWidget(self,
script_path=stopped.script_path,
state='ended')
duplicate.success = 'stopped'
duplicate.log = stopped.log
self.scripts.insert(self.running_script_position, duplicate)
# Update script states and graphical information
self.update_script_widgets()
else:
if poll != 0:
# Script stopped because of an error
self.running_script.state = 'ended'
self.running_script.success = 'failed'
elif poll == 0:
# Script successfully ended
self.running_script.state = 'ended'
self.running_script.success = 'done'
if settings.gmail_notifications['enable']:
self.gmail_notify()
if self.running_script_position + 1 < len(self.scripts):
# more scripts are queued: run the next one
self.run(position=self.running_script_position + 1)
else:
# no more scripts to be run: just update visual information
self.state = 'stopped'
self.update_script_widgets()
def gmail_notify(self):
try:
import smtplib
message = 'Subject: [scriptq] script %s\n\n' % self.running_script.success
message += "Path -- %s\n" % self.running_script.script_path
message += "Status -- %s\n" % self.running_script.success
message += "Log -- \n%s" % self.running_script.log
# creates SMTP session
s = smtplib.SMTP('smtp.gmail.com', 587)
# start TLS for security
s.starttls()
# Authentication
s.login(settings.gmail_notifications['sender_email'],
settings.gmail_notifications['sender_password'])
# sending the mail
s.sendmail(settings.gmail_notifications['sender_email'],
settings.gmail_notifications['receiver_emails'],
message)
# terminating the session
s.quit()
except Exception as e:
messagebox.showinfo(
"Sending notification email failed with error:\n" + str(e))
def stop(self):
'''
Triggered by a user clicking the stop button
all one needs to do is set the state to `stopped`
and force the script to stop, the automatic
monitoring of the running script in `monitor_script_process`
will take care of the following actions
'''
self.state = 'stopped'
# Interrupt process
self.script_process.kill()
def update_script_widgets(self):
'''
Updates the states of the ScriptWidget objects
and updates the graphical information displayed.
All is determined by the `self.states` list
and the `self.state` variable.
'''
# The self.scripts list should never be empty
# as a failsafe we always populate it in that case
# with the insert widget
if len(self.scripts) == 0:
self.scripts = [InsertWidget(self)]
return
# The row is a property of the non-done scripts
# it is displayed in the GUI starting from 1
row = 1
for i, s in enumerate(self.scripts):
# All scripts are given a position, running from 0 upwards
# this is not necessarily the same as the row and acts
# as a unique identifier of the script
s.position = i
# Scripts which are done are given no row information
s.row = None
if s.state in ['running', 'ready', 'queued'] or row > 1:
if row == 1:
# First script running/to-run
# Helps in converting rows given by the user
# to the position identifier of a script
self.position_0 = i
# Since this is the first script which has not already been run
# it should be either running or stopped
if self.state == 'running':
s.state = 'running'
self.running_script = s
self.running_script_position = i
elif self.state == 'stopped':
s.state = 'ready'
elif row > 1:
# this script is lower down the queue:
# if they were just moved for example, we should
# adjust their state accordingly
s.state = 'queued'
# These non-done scripts are given a row
s.row = row
row += 1
for i, s in enumerate(self.scripts):
# Place the script in the grid
s.grid(row=i, column=0, sticky='news')
# Populate it with buttons etc...
s.add_widgets()
# Adjust the scrollable region of the GUI
self.update()
self.config(scrollregion=self.bbox("all"))
def build_gridframe(self):
"""
This frame will be divided into a grid hosting the
canvas, scrollbars, (and potentially a menubar in the future if needed)
"""
self.frame = ttk.Frame()
# Places the Frame widget self.frame in the parent
# in a grid
self.frame.grid()
# Configure the frames grid
self.frame.grid(sticky="nswe") # make frame container sticky
self.frame.rowconfigure(0, weight=1) # make canvas expandable in x
self.frame.columnconfigure(0, weight=1) # make canvas expandable in y
def build_menubar(self):
"""
Builds the File, Edit, ... menu bar situated at the top of
the window.
Not used for the moment...
"""
# initialize the menubar object
self.menubar = tk.Menu(self.frame)
####################################
# FILE cascade menu build
####################################
# add new item to the menubar
menu = tk.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(label="File", menu=menu)
####################################
# VIEW cascade menu build
####################################
# add new item to the menubar
menu = tk.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(label="View", menu=menu)
# add cascade menu items
menu.add_command(label="Output", command=self.build_output_window)
# Add the menubar to the application
self.master.config(menu=self.menubar)
def build_scrollbars(self):
"""
Builds a vertical scrollbars and places
it in the window
"""
self.vbar = ttk.Scrollbar(self.frame, orient="vertical")
self.vbar.grid(row=0, column=1, sticky="ns")
def build_canvas(self):
"""
Initializes the canvas from which this object inherits and
places it in the grid of our frame
"""
tk.Canvas.__init__(
self,
self.frame,
bd=0,
highlightthickness=0,
yscrollcommand=self.vbar.set,
confine=False,
bg="white",
)
self.grid(row=0, column=0, sticky="nswe")
def configure_scrollbars(self):
"""
Define what functions the scrollbars should call
when we interact with them, and make scrolling
on the mouse do something similar
"""
self.vbar.configure(command=self.scroll_y)
self.bind("<MouseWheel>", self.scroll_y_wheel)
def scroll_y(self, *args, **kwargs):
"""
Is called when the user interacts with the vertical scroll bar
"""
# stop from scolling up beyond a certain point
if float(args[1]) < 0:
args = (args[0], "0")
# shift canvas vertically
self.yview(*args)
time.sleep(0.01)
# Update scrollable area
self.update()
self.config(scrollregion=self.bbox("all"))
def scroll_y_wheel(self, event):
"""
Triggered by the user scrolling (in combination with no particular key presses).
"""
# Determine which direction the user is scrolling
# if using windows, then event.delta has also a different
# amplitude depending on how fast the user is scrolling,
# but we ignore that
if event.num == 5 or event.delta < 0:
direction = 1
if event.num == 4 or event.delta > 0:
direction = -1
# Move the canvas appropriately, and stop
# the user from scrolling to far out
if direction == 1:
if self.canvasy(self.winfo_height()) < 2 * self.bbox("all")[3]:
self.yview_scroll(direction, tk.UNITS)
elif direction == -1:
if self.canvasy(0) > self.bbox("all")[1]:
self.yview_scroll(direction, tk.UNITS)
self.update()
# if we scroll above the top row, move a little down..
if self.canvasy(0) < self.bbox("all")[1]:
self.yview_moveto(0)
# Update the scrollable region
self.update()
self.config(scrollregion=self.bbox("all"))
def build_canvas_content(self):
'''
Build a window which will contain the widgets
'''
self.canvas_content = tk.Frame(self)
self.create_window((0, 0),
window=self.canvas_content,
anchor='nw',
width=1000)
self.canvas_content.columnconfigure(0, weight=1)
class ScriptWidget(tk.Frame):
'''
Widget (tkinter frame) in which are stored all the graphical
elements and information about a script.
'''
def __init__(self, parent, script_path=None, state=None, success=''):
super(ScriptWidget, self).__init__(parent.canvas_content)
# A reference to the canvas in which
# the widget is placed
self.parent = parent
'''
string representing the state
of the script, can be one of:
- ended
- ready (waiting for user to click run)
- running
- queued
'''
self.state = state
'''
Is not None only if the script is ended.
Can then be one of:
- done (ran successfully)
- failed (there was an error in the script)
- stopped (the user interrupted the script)
'''
self.success = success
# Full, absolute path to the script
self.script_path = script_path
# Row of the script displayed in the GUI
# None if the script has ended,
# 1 and above if not
self.row = None
# Position of the script regardless of the state
# Goes from 0 up
self.position = None
# Vertical padding of the graphical elements
self.pady = (1, 1)
# Stores all the widgets displayed
self.all_widgets = []
def next_script_state(self):
'''
Returns the state of the script below the current
one. Returns None is this is the last script.
'''
try:
return self.parent.scripts[self.position + 1].state
except IndexError:
# This script is last in line
return None
def add_widgets(self):
'''
Builds all graphical elements
depending on the state and information
about the script.
'''
# remove all previously bult graphical elements
for w in self.all_widgets:
w.destroy()
self.all_widgets = []
##################
# INSERT BUTTON
##################
if self.next_script_state() in ['ready', 'queued', None]:
b = ImageButton(
self,
image='insert.gif',
command=(lambda: self.parent.insert(self.position)))
else:
b = ImageButton(self, image='half_blank.gif')
b.config(state=tk.DISABLED)
b.grid(row=0, column=0, sticky='swe', padx=(5, 0))
self.all_widgets.append(b)
##################
# ROW LABEL
##################
if self.state == 'ended':
l = ImageLabel(self, image='blank.gif', compound=tk.CENTER)
else:
l = ImageLabel(self,
image='blank.gif',
compound=tk.CENTER,
text=self.row)
l.grid(row=0, column=1, sticky='new')
self.all_widgets.append(l)
##################
# STATE LABEL
##################
if self.state == 'ended':
text = self.success
else:
text = self.state
b = ImageLabel(self,
text=text,
image='label_' + self.state + self.success + ".gif",
compound=tk.CENTER)
b.grid(row=0, column=2, sticky='new')
self.all_widgets.append(b)
##################
# REMOVE BUTTON
##################
if self.state == 'running':
b = ImageButton(self, image='blank.gif')
b.config(state=tk.DISABLED)
else:
b = ImageButton(
self,
image='remove.gif',
command=(lambda: self.parent.remove(self.position)))
b.grid(row=0, column=3, sticky='new', pady=self.pady)
self.all_widgets.append(b)
##################
# MOVE BUTTON
##################
if self.state in ['queued', 'ready']:
b = ImageButton(self,
image='move.gif',
command=(lambda: self.parent.move(self.position)))
else:
b = ImageButton(self, image='blank.gif')
b.config(state=tk.DISABLED)
b.grid(row=0, column=4, sticky='new', pady=self.pady)
self.all_widgets.append(b)
##################
# RUN/STOP BUTTON
##################
if self.state == 'running':
b = ImageButton(self, image='stop.gif', command=self.parent.stop)
elif self.state == 'ready':
b = ImageButton(self,
image='run.gif',
command=(lambda: self.parent.run(self.position)))
else:
b = ImageButton(self, image='blank.gif')
b.config(state=tk.DISABLED)
b.grid(row=0, column=5, sticky='new', pady=self.pady)
self.all_widgets.append(b)
##################
# LOG/OUTPUT BUTTON
##################
if self.state == 'ended':
b = ImageButton(self,
text="view log",
command=self.view_log,
image='blank.gif',
compound=tk.CENTER)
elif self.state in ['running', 'ready']:
b = ImageButton(self,
text="view output",
command=self.parent.build_output_window,
image='blank.gif',
compound=tk.CENTER)
else:
b = ImageButton(self,
text="",
command=self.parent.build_output_window,
image='blank.gif',
compound=tk.CENTER)
b.config(state=tk.DISABLED)
self.all_widgets.append(b)
b.grid(row=0, column=6, sticky='ne', pady=self.pady, padx=(2, 10))
##################
# SCRIPT PATH LABEL
##################
b = tk.Label(
self,
text=self.script_path,
anchor=tk.W,
)
b.grid(row=0,
column=7,
columnspan=1,
sticky='new',
pady=self.pady,
padx=(0, 40))
self.columnconfigure(7, weight=1)
self.all_widgets.append(b)
self.update()
# Wrap the path text
b.config(wraplength=b.winfo_width() - 50)
def view_log(self):
# Open up the output window
self.log_window = tk.Toplevel(self.parent.master)
self.log_window.title("Script queuer | Log | " + self.script_path)
# Opening size of the window
self.log_window.geometry("400x400")
# Minimum size of the window
self.log_window.minsize(200, 150)
# Put a scrollable text region in it
self.log_text_widget = ScrolledLabel(self.log_window)
self.log_text_widget.grid(column=0, row=0, sticky='news')
# Add the log text
self.log_text_widget.insert(self.log)
# Scroll all the way down to the end
self.log_text_widget.see("end")
# Make the scrollable text stretch with the window
self.log_window.rowconfigure(0, weight=1)
self.log_window.columnconfigure(0, weight=1)
class InsertWidget(ScriptWidget):
'''Like Script Widget, but with just an insert button.
'''
def __init__(self, parent):
super(InsertWidget, self).__init__(parent,
script_path=None,
state=None,
success=None)
def add_widgets(self):
'''
Add the graphical elements of the widget
'''
if self.next_script_state() in ['ready', 'queued', None]:
b = ImageButton(
self,
image='insert.gif',
command=(lambda: self.parent.insert(self.position)))
else:
b = ImageButton(self, image='half_blank.gif')
b.config(state=tk.DISABLED)
b.grid(row=0, column=0, sticky='swe', padx=(5, 0))
class ImageButton(ttk.Button):
'''Wrapper around the ttk.Button class
which automizes the importation of the
buttons picture.
'''
def __init__(self, *args, image=None, **kwargs):
# Import image
image = PhotoImage(file=path.join(graphics_directory, image))
# Make two times smaller
image = image.subsample(2, 2)
super(ImageButton, self).__init__(*args, image=image, **kwargs)
# This is necessary otherwise the picture dosnt appear somehow
self.image = image
class ImageLabel(ttk.Label):
"""docstring for ImageButton"""
def __init__(self, *args, image=None, **kwargs):
# Import image
image = PhotoImage(file=path.join(graphics_directory, image))
# Make two times smaller
image = image.subsample(2, 2)
super(ImageLabel, self).__init__(*args, image=image, **kwargs)
# This is necessary otherwise the picture dosnt appear somehow
self.image = image
class ToggleAutoscrollButton(tk.Radiobutton):
"""Button which turns auto scrolling on and off.
"""
def __init__(self, parent, text):
self.parent = parent
# The button is checked when this variable is set to True
self.state = tk.BooleanVar()
self.state.set(True)
# The auto-scrolling is activated in the parent widget
# when this variable is set to True
self.parent.follow = True
super(ToggleAutoscrollButton, self).__init__(parent,
text=text,
variable=self.state,
value=True,
command=self.click)
def click(self):
'''
Called upon clicking the button
'''
if self.state.get():
# If autoscrolling is on
self.config(value=False)
self.parent.follow = False
else:
# If autoscrolling is off
self.config(value=True)
self.state.set(True)
self.parent.follow = True
class ScrolledLabel(scrolledtext.ScrolledText):
"""wrapper around scrolledtext, to make
the text read-only
"""
def __init__(self, *args, **kwargs):
super(ScrolledLabel, self).__init__(*args, **kwargs)
self.configure(state='disabled')
def insert(self, text):
self.configure(state='normal')
super(ScrolledLabel, self).insert(tk.INSERT, text)
self.configure(state='disabled')
def clear(self):
self.configure(state='normal')
self.delete("1.0", "end")
self.configure(state='disabled')
def reader(f, buffer):
'''Utility function runing in a thread
which transfers any lines from the
pipe `f` into the list `buffer`
'''
while True:
line = f.readline()
if line:
buffer.append(line)
else:
break
if __name__ == '__main__':
GuiWindow()
| 8,293
| 0
| 351
|
e083ad3a3c94ca1b1fe9170e1f7a73ca987df66a
| 250
|
py
|
Python
|
Thesis_WebCrawler/coursera/scrape_coursera.py
|
cdhekne/My_Thesis
|
7f60dc48ea7de68c66a702816271ce94832b8fa0
|
[
"Apache-2.0"
] | null | null | null |
Thesis_WebCrawler/coursera/scrape_coursera.py
|
cdhekne/My_Thesis
|
7f60dc48ea7de68c66a702816271ce94832b8fa0
|
[
"Apache-2.0"
] | 1
|
2016-10-25T23:21:17.000Z
|
2016-10-25T23:21:17.000Z
|
Thesis_WebCrawler/coursera/scrape_coursera.py
|
GradThesis/Thesis
|
7f60dc48ea7de68c66a702816271ce94832b8fa0
|
[
"Apache-2.0"
] | null | null | null |
import coursera_requests as c
import sys
try:
# c.getCourseraCategories()
c.getCourseraCourses()
# c.getCourseraInstructors()
# c.getCourseraSessions()
# c.getCourseraUniversities()
except:
print ("Unexpected error:", sys.exc_info()[0])
raise
| 20.833333
| 47
| 0.756
|
import coursera_requests as c
import sys
try:
# c.getCourseraCategories()
c.getCourseraCourses()
# c.getCourseraInstructors()
# c.getCourseraSessions()
# c.getCourseraUniversities()
except:
print ("Unexpected error:", sys.exc_info()[0])
raise
| 0
| 0
| 0
|
97daa73929b125d81963d3faf813e0c563052bb2
| 599
|
py
|
Python
|
make_log_table.py
|
kevinko/rabin
|
ad80bb12a455c25ded958f0ad86564d9465175cf
|
[
"BSD-3-Clause"
] | 17
|
2015-04-11T16:54:30.000Z
|
2021-11-12T05:15:04.000Z
|
make_log_table.py
|
kevinko/rabin
|
ad80bb12a455c25ded958f0ad86564d9465175cf
|
[
"BSD-3-Clause"
] | 3
|
2015-07-24T04:46:42.000Z
|
2019-11-27T11:40:31.000Z
|
make_log_table.py
|
kevinko/rabin
|
ad80bb12a455c25ded958f0ad86564d9465175cf
|
[
"BSD-3-Clause"
] | 6
|
2015-06-24T14:50:29.000Z
|
2019-05-29T07:08:46.000Z
|
#!/usr/bin/python
# Copyright 2012, Kevin Ko <kevin@faveset.com>. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
#
# Generates an 8-bit log base 2 table.
# Print out an inverse 8-bit log table
print "-1, 0, 1, 1," # log(i) for i in 0, 1, 2, 3
print "2, 2, 2, 2," # ... 4, 5, 6, 7
print "3, 3, 3, 3, 3, 3, 3, 3," # ... 8-15
lt(4) # 16-31
# 32-63
[lt(5) for i in xrange(2)]
# 64-127
[lt(6) for i in xrange(4)]
# 128-255
[lt(7) for i in xrange(8)]
| 23.038462
| 69
| 0.589316
|
#!/usr/bin/python
# Copyright 2012, Kevin Ko <kevin@faveset.com>. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
#
# Generates an 8-bit log base 2 table.
def lt(n):
print (",".join([str(n) for i in xrange(16)]) + ",")
# Print out an inverse 8-bit log table
print "-1, 0, 1, 1," # log(i) for i in 0, 1, 2, 3
print "2, 2, 2, 2," # ... 4, 5, 6, 7
print "3, 3, 3, 3, 3, 3, 3, 3," # ... 8-15
lt(4) # 16-31
# 32-63
[lt(5) for i in xrange(2)]
# 64-127
[lt(6) for i in xrange(4)]
# 128-255
[lt(7) for i in xrange(8)]
| 44
| 0
| 23
|
7ae29eddfd970a4827e122bd3cce94f398f44f32
| 46
|
py
|
Python
|
LibSerial4/__init__.py
|
thiagolemedasilva/LibSerial4
|
29b155ae4096f5e9e4bd18a2dd33018347298e1e
|
[
"MIT"
] | null | null | null |
LibSerial4/__init__.py
|
thiagolemedasilva/LibSerial4
|
29b155ae4096f5e9e4bd18a2dd33018347298e1e
|
[
"MIT"
] | null | null | null |
LibSerial4/__init__.py
|
thiagolemedasilva/LibSerial4
|
29b155ae4096f5e9e4bd18a2dd33018347298e1e
|
[
"MIT"
] | null | null | null |
from LibSerial4.LibSerial4 import Serial, Uart
| 46
| 46
| 0.869565
|
from LibSerial4.LibSerial4 import Serial, Uart
| 0
| 0
| 0
|
3369a1c91cb44fe5318448852ed32ffe37ae2507
| 3,825
|
py
|
Python
|
authors/apps/articles/migrations/0001_initial.py
|
andela/ah-backend-stark
|
c38810e221f95567262034b860ee0512cf15f102
|
[
"BSD-3-Clause"
] | null | null | null |
authors/apps/articles/migrations/0001_initial.py
|
andela/ah-backend-stark
|
c38810e221f95567262034b860ee0512cf15f102
|
[
"BSD-3-Clause"
] | 29
|
2018-09-25T13:53:06.000Z
|
2021-06-10T20:51:58.000Z
|
authors/apps/articles/migrations/0001_initial.py
|
andela/ah-backend-stark
|
c38810e221f95567262034b860ee0512cf15f102
|
[
"BSD-3-Clause"
] | 2
|
2019-08-02T12:23:24.000Z
|
2019-11-05T12:22:23.000Z
|
# Generated by Django 2.1.2 on 2018-11-22 17:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 47.222222
| 145
| 0.585359
|
# Generated by Django 2.1.2 on 2018-11-22 17:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('profiles', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('slug', models.SlugField(blank=True, max_length=255, unique=True)),
('description', models.CharField(max_length=500)),
('body', models.TextField()),
('tagList', models.CharField(blank=True, max_length=2000)),
('image', models.URLField(blank=True)),
('createdAt', models.DateTimeField(auto_now_add=True)),
('updatedAt', models.DateTimeField(auto_now_add=True)),
('rating', models.FloatField(default=0)),
('ratingsCount', models.IntegerField(default=0)),
('read_time', models.CharField(max_length=100)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['createdAt'],
},
),
migrations.CreateModel(
name='ArticlesRead',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='profiles.Profile')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article')),
('parent_comment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='articles.Comment')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['timestamp'],
},
),
migrations.CreateModel(
name='Favourite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Likes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action', models.BooleanField()),
('action_at', models.DateTimeField(auto_now_add=True)),
('action_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article')),
],
),
]
| 0
| 3,645
| 23
|
84de6959d1cf90895494230c52ee66aae1877921
| 95
|
py
|
Python
|
Chapter 3/stateless.py
|
PacktPublishing/Machine-Learning-Model-Serving-Patterns-and-Best-Practices
|
390ae5868e023f0e417f5dca23eab69e848c5f91
|
[
"MIT"
] | null | null | null |
Chapter 3/stateless.py
|
PacktPublishing/Machine-Learning-Model-Serving-Patterns-and-Best-Practices
|
390ae5868e023f0e417f5dca23eab69e848c5f91
|
[
"MIT"
] | null | null | null |
Chapter 3/stateless.py
|
PacktPublishing/Machine-Learning-Model-Serving-Patterns-and-Best-Practices
|
390ae5868e023f0e417f5dca23eab69e848c5f91
|
[
"MIT"
] | null | null | null |
if __name__ == "__main__":
print(fun(5)) # Always returns 25
| 15.833333
| 37
| 0.6
|
def fun(x):
return x * x
if __name__ == "__main__":
print(fun(5)) # Always returns 25
| 7
| 0
| 22
|
5013fdff381c0cc0e6850f21d14cefe9667b2bba
| 908
|
py
|
Python
|
plot.py
|
Aravind-Suresh/non-uniform-betweenness-centrality
|
4473ea157d93322a8680c1cfc18bfa69d00bfc56
|
[
"MIT"
] | null | null | null |
plot.py
|
Aravind-Suresh/non-uniform-betweenness-centrality
|
4473ea157d93322a8680c1cfc18bfa69d00bfc56
|
[
"MIT"
] | null | null | null |
plot.py
|
Aravind-Suresh/non-uniform-betweenness-centrality
|
4473ea157d93322a8680c1cfc18bfa69d00bfc56
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import sys
"""
Utility script to plot Execution time Vs Number of vertices based on the log-files obtained by running tests/*.sh
"""
lines = open(sys.argv[1]).readlines()[1::2]
x = map(lambda xx: 5*eval(xx.strip().split('\t')[1].split(' ')[1]), lines[::2])
# x = map(lambda xx: 400*eval(xx.strip().split(' ')[1].split('\t')[0]), lines[::2])
y = map(lambda xx: eval(xx.split(' ')[2]), lines[1::2])
t = {}
c = {}
for xx in x:
c[xx] = 0
t[xx] = 0
for i in range(len(y)):
xx = x[i]
t[xx] += y[i]
c[xx] += 1
y_plot = []
for i in range(len(y)):
xx = x[i]
y_plot.append(t[xx]*0.0001*25/c[xx])
# y_plot.append(t[xx]*14.711*0.6*0.5*10*16/c[xx])
print x
print y_plot
# print t[700]*10.0/c[700]
# print y_plot
plt.xlabel("Edges | Number of vertices = 5000")
# plt.xlabel("Vertices")
plt.ylabel("Execution time ( s )")
plt.plot(x,y_plot,'ro')
plt.show()
| 25.942857
| 113
| 0.598018
|
import matplotlib.pyplot as plt
import sys
"""
Utility script to plot Execution time Vs Number of vertices based on the log-files obtained by running tests/*.sh
"""
lines = open(sys.argv[1]).readlines()[1::2]
x = map(lambda xx: 5*eval(xx.strip().split('\t')[1].split(' ')[1]), lines[::2])
# x = map(lambda xx: 400*eval(xx.strip().split(' ')[1].split('\t')[0]), lines[::2])
y = map(lambda xx: eval(xx.split(' ')[2]), lines[1::2])
t = {}
c = {}
for xx in x:
c[xx] = 0
t[xx] = 0
for i in range(len(y)):
xx = x[i]
t[xx] += y[i]
c[xx] += 1
y_plot = []
for i in range(len(y)):
xx = x[i]
y_plot.append(t[xx]*0.0001*25/c[xx])
# y_plot.append(t[xx]*14.711*0.6*0.5*10*16/c[xx])
print x
print y_plot
# print t[700]*10.0/c[700]
# print y_plot
plt.xlabel("Edges | Number of vertices = 5000")
# plt.xlabel("Vertices")
plt.ylabel("Execution time ( s )")
plt.plot(x,y_plot,'ro')
plt.show()
| 0
| 0
| 0
|
ec87cbc371454a77008029d3d80e1d5a2d9cde1b
| 1,236
|
py
|
Python
|
microcosm_metrics/main.py
|
globality-corp/microcosm-metrics
|
ca63c7c7ef67c637c6f20cd37eac301f07e32c6d
|
[
"Apache-2.0"
] | 1
|
2021-05-27T20:17:51.000Z
|
2021-05-27T20:17:51.000Z
|
microcosm_metrics/main.py
|
globality-corp/microcosm-metrics
|
ca63c7c7ef67c637c6f20cd37eac301f07e32c6d
|
[
"Apache-2.0"
] | 1
|
2018-10-09T13:16:17.000Z
|
2018-10-09T13:16:17.000Z
|
microcosm_metrics/main.py
|
globality-corp/microcosm-metrics
|
ca63c7c7ef67c637c6f20cd37eac301f07e32c6d
|
[
"Apache-2.0"
] | 1
|
2019-03-17T03:46:28.000Z
|
2019-03-17T03:46:28.000Z
|
"""
Test CLI for metric integration.
"""
from argparse import ArgumentParser
from getpass import getuser
from time import sleep
from microcosm.api import create_object_graph
from microcosm.loaders import load_from_dict
from microcosm_metrics.naming import name_for
def publish():
"""
Publish a metric (for testing).
"""
args = parse_args()
statsd = create_statsd_client(args)
if args.action == "increment":
statsd.increment(name_for(getuser(), args.action))
elif args.action == "histogram":
statsd.histogram(name_for(getuser(), args.action), 1.0)
try:
# wait a little to allow the delivery of the metric before we exit
sleep(1.0)
except KeyboardInterrupt:
pass
| 23.320755
| 92
| 0.674757
|
"""
Test CLI for metric integration.
"""
from argparse import ArgumentParser
from getpass import getuser
from time import sleep
from microcosm.api import create_object_graph
from microcosm.loaders import load_from_dict
from microcosm_metrics.naming import name_for
def parse_args():
parser = ArgumentParser()
parser.add_argument("--host", default="localhost")
parser.add_argument("--action", choices=["increment", "histogram"], default="increment")
return parser.parse_args()
def create_statsd_client(args):
loader = load_from_dict(dict(
metrics=dict(
host=args.host,
),
))
graph = create_object_graph("example", loader=loader)
graph.use("metrics")
graph.lock()
return graph.metrics
def publish():
"""
Publish a metric (for testing).
"""
args = parse_args()
statsd = create_statsd_client(args)
if args.action == "increment":
statsd.increment(name_for(getuser(), args.action))
elif args.action == "histogram":
statsd.histogram(name_for(getuser(), args.action), 1.0)
try:
# wait a little to allow the delivery of the metric before we exit
sleep(1.0)
except KeyboardInterrupt:
pass
| 443
| 0
| 46
|
e8dab8438c4953bc3ee00db7c8f6087572852097
| 2,854
|
py
|
Python
|
test_metrics_maes.py
|
maximdanilchenko/fusionBasedRecSys
|
42b47a6cb2691967aad8c65932b932f8ee5e231d
|
[
"MIT"
] | null | null | null |
test_metrics_maes.py
|
maximdanilchenko/fusionBasedRecSys
|
42b47a6cb2691967aad8c65932b932f8ee5e231d
|
[
"MIT"
] | null | null | null |
test_metrics_maes.py
|
maximdanilchenko/fusionBasedRecSys
|
42b47a6cb2691967aad8c65932b932f8ee5e231d
|
[
"MIT"
] | null | null | null |
from metrics import *
from itembased_recommender_system import *
import shelve
import matplotlib.pyplot as plt
genData('base','u2.base')
genData('test','u2.test')
print("data ready")
base = transform(shelve.open('base'))
test = transform(shelve.open('test'))
##base = {'max':{'odin doma':3,'labirint straha':5,'detektiv':2,'komnata':4},
## 'dima':{'odin doma':5,'labirint straha':1,'detektiv':5},
## 'alex':{'odin doma':5,'pila':2,'komnata':3,'grabim bank':3,'labirint straha':1,'detektiv':4,'dom s privideniamy':3},
## 'den':{'odin doma':2,'grabim bank':3,'labirint straha':5,'dom s privideniamy':5},
## 'kirill':{'grabim bank':3,'labirint straha':4,'detektiv':1,'dom s privideniamy':5},
## 'olga':{'odin doma':3,'pila':4,'detektiv':4,'komnata':1,'dom s privideniamy':3},
## 'lera':{'odin doma':4,'pila':3,'grabim bank':4,'labirint straha':1},
## 'anna':{'pila':4,'grabim bank':2,'labirint straha':5,'komnata':4,'detektiv':4,'dom s privideniamy':4}}
##
##test = {'max':{'pila':4,'dom s privideniamy':3},
## 'dima':{'pila':2,'dom s privideniamy':1},
## 'kirill':{'odin doma':3,'pila':4},
## 'olga':{'grabim bank':4,'labirint straha':1}}
print("opened with size %d"%len(base))
tr = transform(base)
print("transformed")
ns = [30,100,200,300,500,0]
##ns = [1,2,3,4,5,6]
cls = ['b','g','r','c','m','y']
markers = ['.',',','o','v','^','<','>','p','*','+']
cls_n = 0
for n in ns:
metrix = [JMSD,PCC,CPCC,SPCC,Jaccard,MSD,COS,ACOS]
maes = {}
for sim in metrix:
if (n != 0):
itMtr = itemMatrix(tr,n,sim)
else:
itMtr = itemMatrix(tr,n,sim,False)
orig_recs = {}
test_recs = {}
for user in test:
orig_recs[user] = {}
test_recs[user] = {}
for item in test[user]:
rec = recommendOne(base,tr,itMtr,item,user)
if (rec != 200):
orig_recs[user][item] = rec
test_recs[user][item] = test[user][item]
mae = MAE(test_recs,orig_recs)
print("Mae for %s is %f"%(sim.__name__,mae))
maes[sim.__name__] = mae
print(maes)
labels = sorted(maes)
x = [0,1,2,3,4,5,6,7]
y = [maes[i] for i in labels]
plt.plot(x,y,color = cls[cls_n],marker = markers[cls_n],linewidth=1.5,label = 'k = '+str(n))
## plt.plot(x,y,'ko')
cls_n += 1
plt.title('maes, user-based')
plt.axis([-1,8,0.7,1.1])
plt.ylabel('MAE')
plt.xlabel('metrics')
plt.xticks(x,labels,rotation='vertical')
plt.subplots_adjust(bottom=0.15)
plt.legend()
##for i,j in zip(x,y):
## plt.annotate(str(round(j,4)),xy=(i-0.5,j+0.01))
plt.show()
##plt.savefig('E:/Diploma/results/maes-item-based_n'+str(n)+'.png')
| 38.567568
| 131
| 0.546952
|
from metrics import *
from itembased_recommender_system import *
import shelve
import matplotlib.pyplot as plt
genData('base','u2.base')
genData('test','u2.test')
print("data ready")
base = transform(shelve.open('base'))
test = transform(shelve.open('test'))
##base = {'max':{'odin doma':3,'labirint straha':5,'detektiv':2,'komnata':4},
## 'dima':{'odin doma':5,'labirint straha':1,'detektiv':5},
## 'alex':{'odin doma':5,'pila':2,'komnata':3,'grabim bank':3,'labirint straha':1,'detektiv':4,'dom s privideniamy':3},
## 'den':{'odin doma':2,'grabim bank':3,'labirint straha':5,'dom s privideniamy':5},
## 'kirill':{'grabim bank':3,'labirint straha':4,'detektiv':1,'dom s privideniamy':5},
## 'olga':{'odin doma':3,'pila':4,'detektiv':4,'komnata':1,'dom s privideniamy':3},
## 'lera':{'odin doma':4,'pila':3,'grabim bank':4,'labirint straha':1},
## 'anna':{'pila':4,'grabim bank':2,'labirint straha':5,'komnata':4,'detektiv':4,'dom s privideniamy':4}}
##
##test = {'max':{'pila':4,'dom s privideniamy':3},
## 'dima':{'pila':2,'dom s privideniamy':1},
## 'kirill':{'odin doma':3,'pila':4},
## 'olga':{'grabim bank':4,'labirint straha':1}}
print("opened with size %d"%len(base))
tr = transform(base)
print("transformed")
ns = [30,100,200,300,500,0]
##ns = [1,2,3,4,5,6]
cls = ['b','g','r','c','m','y']
markers = ['.',',','o','v','^','<','>','p','*','+']
cls_n = 0
for n in ns:
metrix = [JMSD,PCC,CPCC,SPCC,Jaccard,MSD,COS,ACOS]
maes = {}
for sim in metrix:
if (n != 0):
itMtr = itemMatrix(tr,n,sim)
else:
itMtr = itemMatrix(tr,n,sim,False)
orig_recs = {}
test_recs = {}
for user in test:
orig_recs[user] = {}
test_recs[user] = {}
for item in test[user]:
rec = recommendOne(base,tr,itMtr,item,user)
if (rec != 200):
orig_recs[user][item] = rec
test_recs[user][item] = test[user][item]
mae = MAE(test_recs,orig_recs)
print("Mae for %s is %f"%(sim.__name__,mae))
maes[sim.__name__] = mae
print(maes)
labels = sorted(maes)
x = [0,1,2,3,4,5,6,7]
y = [maes[i] for i in labels]
plt.plot(x,y,color = cls[cls_n],marker = markers[cls_n],linewidth=1.5,label = 'k = '+str(n))
## plt.plot(x,y,'ko')
cls_n += 1
plt.title('maes, user-based')
plt.axis([-1,8,0.7,1.1])
plt.ylabel('MAE')
plt.xlabel('metrics')
plt.xticks(x,labels,rotation='vertical')
plt.subplots_adjust(bottom=0.15)
plt.legend()
##for i,j in zip(x,y):
## plt.annotate(str(round(j,4)),xy=(i-0.5,j+0.01))
plt.show()
##plt.savefig('E:/Diploma/results/maes-item-based_n'+str(n)+'.png')
| 0
| 0
| 0
|
a2bcc15bc5cd3222f1d3285db315bac3a27b1e83
| 1,289
|
py
|
Python
|
qasm/circuits/combined_constraints_v2.py
|
JamesGopsill/ICED21-Quantum-Design
|
bbb3d60639f0dbb81aa18165647eb4a0769d1a26
|
[
"MIT"
] | null | null | null |
qasm/circuits/combined_constraints_v2.py
|
JamesGopsill/ICED21-Quantum-Design
|
bbb3d60639f0dbb81aa18165647eb4a0769d1a26
|
[
"MIT"
] | null | null | null |
qasm/circuits/combined_constraints_v2.py
|
JamesGopsill/ICED21-Quantum-Design
|
bbb3d60639f0dbb81aa18165647eb4a0769d1a26
|
[
"MIT"
] | null | null | null |
from qiskit.circuit.quantumcircuit import QuantumCircuit, QuantumRegister, ClassicalRegister
| 18.955882
| 92
| 0.6827
|
from qiskit.circuit.quantumcircuit import QuantumCircuit, QuantumRegister, ClassicalRegister
def combined_constraints_v2() -> QuantumCircuit:
# Initialise the quantum register
qreg = QuantumRegister(10, "q")
creg = ClassicalRegister(4, "c")
# Initialise the circuit
circuit = QuantumCircuit(qreg, creg)
circuit.h(qreg[0])
circuit.h(qreg[1])
circuit.h(qreg[2])
circuit.h(qreg[3])
circuit.barrier()
circuit.ccx(qreg[0], qreg[1], qreg[4])
circuit.cx(qreg[4], qreg[0])
circuit.barrier()
circuit.ccx(qreg[2], qreg[3], qreg[5])
circuit.cx(qreg[5], qreg[2])
circuit.barrier()
circuit.ccx(qreg[0], qreg[2], qreg[6])
circuit.x(qreg[0])
circuit.x(qreg[2])
circuit.barrier()
circuit.ccx(qreg[0], qreg[2], qreg[6])
circuit.x(qreg[0])
circuit.x(qreg[2])
circuit.barrier()
circuit.ccx(qreg[1], qreg[3], qreg[7])
circuit.x(qreg[1])
circuit.x(qreg[3])
circuit.barrier()
circuit.ccx(qreg[1], qreg[3], qreg[7])
circuit.x(qreg[1])
circuit.x(qreg[3])
circuit.barrier()
circuit.ccx(qreg[6], qreg[7], qreg[8])
circuit.cx(qreg[8], qreg[3])
circuit.barrier()
circuit.ccx(qreg[2], qreg[3], qreg[9])
circuit.cx(qreg[9], qreg[2])
circuit.barrier()
circuit.measure(0, 0)
circuit.measure(1, 1)
circuit.measure(2, 2)
circuit.measure(3, 3)
return circuit
| 1,174
| 0
| 23
|
1167c2e8ebd9cd5d4995f31fedeb5bf1dfb82d45
| 2,492
|
py
|
Python
|
meiduo_mall/meiduo_mall/apps/oauth/utils.py
|
bapewing/MeiduoMall
|
2b1634de2b7bc9eec80716770894824d668ec314
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/oauth/utils.py
|
bapewing/MeiduoMall
|
2b1634de2b7bc9eec80716770894824d668ec314
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/oauth/utils.py
|
bapewing/MeiduoMall
|
2b1634de2b7bc9eec80716770894824d668ec314
|
[
"MIT"
] | null | null | null |
import json
import logging
from urllib.parse import urlencode, parse_qs
from urllib.request import urlopen
from django.conf import settings
from oauth.exceptions import QQAPIException
logger = logging.getLogger('django')
class OauthQQ(object):
"""
QQ认证工具类
"""
def get_auth_url(self):
"""
生成访问qq的拼接路径
:return:
"""
parameters = {
'response_type': 'code',
'client_id': self.app_id,
'redirect_uri': self.redirect_uri,
'state': self.state,
'scope': 'get_user_info',
}
return 'https://graph.qq.com/oauth2.0/authorize?' + urlencode(parameters)
def get_access_token(self, code):
"""
获取QQ的access_token
:param code: 重定向域路径中code
:return: access_token
"""
parameters = {
'grant_type': 'authorization_code',
'client_id': self.app_id,
'client_secret': self.app_key,
'code': code,
'redirect_uri': self.redirect_uri
}
url = 'https://graph.qq.com/oauth2.0/token?' + urlencode(parameters)
# access_token=FE04************************CCE2&expires_in=7776000&refresh_token=88E4************************BE14
try:
response = urlopen(url).read().decode()
response_dict = parse_qs(response)
access_token = response_dict.get("access_token")[0]
except Exception as e:
logger.error(e)
raise QQAPIException('获取access_token异常')
return access_token
def get_openid(self, access_token):
"""
获取QQ用户的openid
:param access_token: 服务器获取的access_token
:return: openid
"""
url = 'https://graph.qq.com/oauth2.0/me?access_token=' + access_token
# callback( {"client_id":"YOUR_APPID","openid":"YOUR_OPENID"} );
try:
reponse = urlopen(url).read().decode()
# TODO:可以换种方式操作字符串
reponse_dict = json.loads(reponse[10:-4])
except Exception as e:
logger.error(e)
raise QQAPIException('获取openid异常')
return reponse_dict.get('openid')
| 30.390244
| 121
| 0.58748
|
import json
import logging
from urllib.parse import urlencode, parse_qs
from urllib.request import urlopen
from django.conf import settings
from oauth.exceptions import QQAPIException
logger = logging.getLogger('django')
class OauthQQ(object):
"""
QQ认证工具类
"""
def __init__(self, app_id=None, app_key=None, redirect_uri=None, state=None):
self.app_id = app_id or settings.QQ_APP_ID
self.app_key = app_key or settings.QQ_APP_KEY
self.redirect_uri = redirect_uri or settings.QQ_REDIRECT_URI
self.state = state or settings.QQ_STATE
def get_auth_url(self):
"""
生成访问qq的拼接路径
:return:
"""
parameters = {
'response_type': 'code',
'client_id': self.app_id,
'redirect_uri': self.redirect_uri,
'state': self.state,
'scope': 'get_user_info',
}
return 'https://graph.qq.com/oauth2.0/authorize?' + urlencode(parameters)
def get_access_token(self, code):
"""
获取QQ的access_token
:param code: 重定向域路径中code
:return: access_token
"""
parameters = {
'grant_type': 'authorization_code',
'client_id': self.app_id,
'client_secret': self.app_key,
'code': code,
'redirect_uri': self.redirect_uri
}
url = 'https://graph.qq.com/oauth2.0/token?' + urlencode(parameters)
# access_token=FE04************************CCE2&expires_in=7776000&refresh_token=88E4************************BE14
try:
response = urlopen(url).read().decode()
response_dict = parse_qs(response)
access_token = response_dict.get("access_token")[0]
except Exception as e:
logger.error(e)
raise QQAPIException('获取access_token异常')
return access_token
def get_openid(self, access_token):
"""
获取QQ用户的openid
:param access_token: 服务器获取的access_token
:return: openid
"""
url = 'https://graph.qq.com/oauth2.0/me?access_token=' + access_token
# callback( {"client_id":"YOUR_APPID","openid":"YOUR_OPENID"} );
try:
reponse = urlopen(url).read().decode()
# TODO:可以换种方式操作字符串
reponse_dict = json.loads(reponse[10:-4])
except Exception as e:
logger.error(e)
raise QQAPIException('获取openid异常')
return reponse_dict.get('openid')
| 278
| 0
| 27
|
bbc176cb39896e1eb962622b41c7feb57f5208ed
| 17,306
|
py
|
Python
|
src/state_base.py
|
akolishchak/doom-net-pytorch
|
96bad5b15c9c5267d494cd5791481801cd6d2107
|
[
"MIT"
] | 143
|
2017-01-30T01:43:58.000Z
|
2021-11-15T07:53:22.000Z
|
src/state_base.py
|
akolishchak/doom-net-pytorch
|
96bad5b15c9c5267d494cd5791481801cd6d2107
|
[
"MIT"
] | 7
|
2017-12-28T02:42:08.000Z
|
2020-05-23T23:12:33.000Z
|
src/state_base.py
|
akolishchak/doom-net-pytorch
|
96bad5b15c9c5267d494cd5791481801cd6d2107
|
[
"MIT"
] | 27
|
2017-02-03T09:20:10.000Z
|
2020-07-19T21:35:28.000Z
|
#
# aac_state_base.py, doom-net
#
# Created by Andrey Kolishchak on 01/21/17.
#
import os
import datetime
import glob
from multiprocessing.pool import ThreadPool
from threading import Thread
import time
import h5py
import bisect
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
from device import device
from model import Model
from state_model import StateModel
from state_controller import AdvantageActorCriticController
import vizdoom
| 45.067708
| 229
| 0.570322
|
#
# aac_state_base.py, doom-net
#
# Created by Andrey Kolishchak on 01/21/17.
#
import os
import datetime
import glob
from multiprocessing.pool import ThreadPool
from threading import Thread
import time
import h5py
import bisect
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
from device import device
from model import Model
from state_model import StateModel
from state_controller import AdvantageActorCriticController
import vizdoom
class StateBase:
def __init__(self, args):
super().__init__()
def run_train(self, args):
for iter in range(100):
self.generate_data(args)
self.train_state_model(args)
self.train_controller(args)
def generate_data(self, args):
print("Generate data...")
def worker(id, args):
state_model = Model.create(StateModel, args, args.state_model)
state_model.eval()
controller = Model.create(AdvantageActorCriticController, args, args.checkpoint_file)
controller.eval()
new_controller = not os.path.isfile(args.checkpoint_file)
game = args.instance_class(args.vizdoom_config, args.wad_path, args.skiprate, actions=args.action_set, id=id)
state = args.instance_class.NormalizedState(screen=None, depth=None, labels=None, variables=None)
state.screen = torch.Tensor(1, *args.screen_size)
state.variables = torch.Tensor(1, args.variable_num)
action_onehot = torch.zeros(1, args.button_num, device=device)
cells = StateModel.get_cells(1)
episode_num = 2
max_step = 1000
for episode in range(episode_num):
step_state = game.get_state_normalized()
episode_screens = []
episode_variables = []
episode_actions = []
episode_vars = []
action = 0
for step in range(max_step):
# convert state to torch tensors
state.screen[0, :] = torch.from_numpy(step_state.screen)
state.variables[0, :] = torch.from_numpy(step_state.variables)
# compute an action
if not new_controller:
with torch.set_grad_enabled(False):
observation = state_model.features(state.screen.to(device), state.variables.to(device))
action = controller.forward(observation, cells[-2])
cells, pred = state_model(observation, action_onehot.zero_().scatter_(-1, action, 1), cells)
else:
action = torch.randint(0, args.button_num, (1, 1), dtype=torch.long, device=device)
action_onehot.zero_().scatter_(-1, action, 1)
episode_screens.append(step_state.screen)
episode_variables.append(step_state.variables)
episode_actions.append(action_onehot.cpu().numpy()[0])
# render
step_state, _, finished, vars = game.step_normalized(action[0, 0])
episode_vars.append(vars)
if finished:
print("episode return: {}".format(game.get_episode_return()))
cells = state_model.set_nonterminal(cells, torch.zeros(1, 1))
break
#
# save episodes data to file
#
filename = os.path.join(args.h5_path,
'{:%Y-%m-%d %H-%M-%S}-{}-{}.hd5'.format(datetime.datetime.now(), id, episode))
print(filename)
file = h5py.File(filename, 'w')
file.create_dataset('screens', data=episode_screens, dtype='float32', compression='gzip')
file.create_dataset('variables', data=episode_variables, dtype='float32', compression='gzip')
file.create_dataset('actions', data=episode_actions, dtype='float32', compression='gzip')
file.create_dataset('vars', data=episode_vars, dtype='float32', compression='gzip')
game.new_episode()
threads = []
for i in range(5):
thread = Thread(target=worker, args=(i, args))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def train_state_model(self, args):
print("Train state model...")
state_model = Model.create(StateModel, args, args.state_model)
state_model.train()
def data_generator(args):
batch_size = args.batch_size
episode_size = args.episode_size
screens = []
variables = []
actions = []
vars = []
for filename in glob.glob(os.path.join(args.h5_path, '*.hd5')):
file = h5py.File(filename, 'r')
screens.append(file['screens'])
variables.append(file['variables'])
actions.append(file['actions'])
vars.append(file['vars'])
#
episodes_num = len(screens)
step_screens = np.ndarray(shape=(batch_size, *screens[0].shape[1:]), dtype=np.float32)
step_variables = np.ndarray(shape=(batch_size, *variables[0].shape[1:]), dtype=np.float32)
step_actions = np.ndarray(shape=(batch_size, *actions[0].shape[1:]), dtype=np.float32)
step_vars = np.ndarray(shape=(batch_size, *vars[0].shape[1:]), dtype=np.int)
step_nonterminals = np.ones(shape=(batch_size, 1), dtype=np.float32)
# select episodes for the initial batch
batch_episodes = np.random.randint(episodes_num, size=batch_size)
batch_episodes_length = np.array([len(actions[episode]) for episode in batch_episodes])
batch_episodes_step = np.zeros(batch_size, dtype=np.int)
iter_num = batch_episodes_length.mean().astype(np.int)*episodes_num//batch_size
for iter in range(iter_num):
for i in range(batch_size):
episode = batch_episodes[i]
step = batch_episodes_step[i]
length = batch_episodes_length[i]
step_screens[i, :] = screens[episode][step]
step_variables[i, :] = variables[episode][step]
step_actions[i, :] = actions[episode][step]
step_vars[i, :] = vars[episode][step]+1
batch_episodes_step[i] += 1
if batch_episodes_step[i] >= length:
step_nonterminals[i] = 0.0
# reached terminal state, select a new episode
episode = np.random.randint(episodes_num)
batch_episodes[i] = episode
batch_episodes_step[i] = 0
else:
if step_variables[i, -1] == 0:
step_nonterminals[i] = 1.0
else:
step_nonterminals[i] = 0.0
yield torch.from_numpy(step_screens), \
torch.from_numpy(step_variables), \
torch.from_numpy(step_actions), \
torch.from_numpy(step_vars), \
torch.from_numpy(step_nonterminals)
training_data_loader = data_generator(args)
optimizer = optim.AdamW(state_model.parameters(), lr=5e-4, weight_decay=1e-4, amsgrad=True)
cells = StateModel.get_cells(args.batch_size)
epoch_num = 1
for epoch in range(epoch_num):
mean_loss = 0
mean_accuracy = 0
updates = 0
batch_time = time.time()
for batch, (screens, variables, actions, vars, nonterminals) in enumerate(training_data_loader):
screens, variables, actions, vars, nonterminals = \
screens.to(device), variables.to(device), actions.to(device), vars.to(device), nonterminals.to(device)
observation = state_model.features(screens, variables)
cells, pred = state_model(observation, actions, cells)
cells = state_model.set_nonterminal(cells, nonterminals)
loss = F.nll_loss(pred, vars)
mean_loss += loss.item()
updates += 1
_, pred_vars = pred.max(1)
mean_accuracy += (pred_vars == vars).float().mean()
if batch % args.episode_size == args.episode_size - 1:
loss.backward()
grads = []
weights = []
for p in state_model.parameters():
if p.grad is not None:
grads.append(p.grad.data.view(-1))
weights.append(p.data.view(-1))
grads = torch.cat(grads, 0)
grads_norm = grads.norm()
weights = torch.cat(weights, 0)
weights_norm = weights.norm()
assert grads_norm == grads_norm
optimizer.step()
optimizer.zero_grad()
cells = state_model.reset(cells)
mean_loss /= updates
mean_accuracy /= updates
print("episode loss = {:f}, accuracy = {:f}, grads_norm = {:f}, weights_norm = {:f} train_time = {:.3f}".format(mean_loss, mean_accuracy, grads_norm, weights_norm, time.time() - batch_time))
mean_loss = 0
mean_accuracy = 0
updates = 0
batch_time = time.time()
if batch >= 5000:
break
torch.save(state_model.state_dict(), args.state_model)
def train_controller(self, args):
print("Controller training...")
controller = Model.create(AdvantageActorCriticController, args) #, args.load)
controller.train()
optimizer = optim.AdamW(controller.parameters(), lr=5e-4, amsgrad=True)
#if args.load is not None and os.path.isfile(args.load + '_optimizer.pth'):
# optimizer_dict = torch.load(args.load+'_optimizer.pth')
# optimizer.load_state_dict(optimizer_dict)
assert args.state_model is not None
state_model = Model.create(StateModel, args, args.state_model)
state_model.eval()
optimizer.zero_grad()
state = args.instance_class.NormalizedState(screen=None, depth=None, labels=None, variables=None)
state.screen = torch.Tensor(args.batch_size, *args.screen_size)
state.variables = torch.Tensor(args.batch_size, args.variable_num)
vars = torch.Tensor(args.batch_size, args.variable_num).long()
reward = torch.Tensor(args.batch_size, 1)
nonterminal = torch.Tensor(args.batch_size, 1)
action_onehot = torch.zeros(args.batch_size, len(args.action_set), device=device)
cells = StateModel.get_cells(args.batch_size)
games = []
for i in range(args.batch_size):
games.append(args.instance_class(args.vizdoom_config, args.wad_path, args.skiprate, actions=args.action_set, id=i))
pool = ThreadPool()
def get_state(game):
id = game.get_id()
normalized_state = game.get_state_normalized()
state.screen[id, :] = torch.from_numpy(normalized_state.screen)
state.variables[id, :] = torch.from_numpy(normalized_state.variables)
pool.map(get_state, games)
# start training
for episode in range(args.episode_num):
batch_time = time.time()
rewards = []
nonterminals = []
episode_return = 0
episode_accuracy = 0
for step in range(args.episode_size):
# get action
with torch.set_grad_enabled(False):
observation = state_model.features(state.screen.to(device), state.variables.to(device))
action = controller.forward(observation, cells[-2])
with torch.set_grad_enabled(False):
cells, pred = state_model(observation, action_onehot.zero_().scatter_(-1, action, 1), cells)
action = action.cpu()
#print(action.squeeze())
# step and get new state
def step_game(game):
id = game.get_id()
normalized_state, step_reward, finished, step_vars = game.step_normalized(action[id, 0])
state.screen[id, :] = torch.from_numpy(normalized_state.screen)
state.variables[id, :] = torch.from_numpy(normalized_state.variables)
reward[id, 0] = step_reward
vars[id] = torch.from_numpy(step_vars+1)
if finished:
#episode_return[id] = float(game.get_episode_return())
# cut rewards from future actions
nonterminal[id] = 0
else:
nonterminal[id] = 1
pool.map(step_game, games)
#rewards.append(reward.clone())
# mse as reward for exploration policy
_, pred_vars = pred.max(1)
episode_accuracy += (pred_vars == vars.to(device)).float().mean()
exploration_reward = F.nll_loss(pred, vars.to(device), reduce=False).mean(dim=-1)
exploration_reward = exploration_reward[:, None].cpu()*0.1
episode_return += exploration_reward.mean()
rewards.append(exploration_reward)
noterminal_copy = nonterminal.clone()
nonterminals.append(noterminal_copy)
cells = state_model.set_nonterminal(cells, noterminal_copy)
# update model
controller.backward(rewards, nonterminals)
grads = []
weights = []
for p in controller.parameters():
if p.grad is not None:
grads.append(p.grad.view(-1))
weights.append(p.view(-1))
grads = torch.cat(grads, 0)
weights = torch.cat(weights, 0)
grads_norm = grads.norm()
weights_norm = weights.norm()
assert grads_norm == grads_norm
optimizer.step()
optimizer.zero_grad()
episode_accuracy /= args.episode_size
episode_return /= args.episode_size
if episode % 1 == 0:
print("{}: mean_return = {:f}, mean_accuracy= {:f}, grads_norm = {:f}, weights_norm = {:f}, batch_time = {:.3f}".format(episode, episode_return, episode_accuracy, grads_norm, weights_norm, time.time()-batch_time))
if episode % args.checkpoint_rate == 0:
torch.save(controller.state_dict(), args.checkpoint_file)
#torch.save(optimizer.state_dict(), args.checkpoint_file+'_optimizer.pth')
# terminate games
pool.map(lambda game: game.release(), games)
torch.save(controller.state_dict(), args.checkpoint_file)
#torch.save(optimizer.state_dict(), args.checkpoint_file+'_optimizer.pth')
def run_test(self, args):
print("testing...")
controller = Model.create(AdvantageActorCriticController, args, args.load)
controller.eval()
assert args.state_model is not None
state_model = Model.create(StateModel, args, args.state_model)
state_model.eval()
game = args.instance_class(
args.vizdoom_config, args.wad_path, args.skiprate, visible=True, mode=vizdoom.Mode.ASYNC_PLAYER, actions=args.action_set)
step_state = game.get_state_normalized()
state = args.instance_class.NormalizedState(screen=None, depth=None, labels=None, variables=None)
state.screen = torch.Tensor(1, *args.screen_size)
state.variables = torch.Tensor(1, args.variable_num)
action_onehot = torch.zeros(1, len(args.action_set), device=device)
cells = StateModel.get_cells(1)
while True:
# convert state to torch tensors
state.screen[0, :] = torch.from_numpy(step_state.screen)
state.variables[0, :] = torch.from_numpy(step_state.variables)
# compute an action
with torch.set_grad_enabled(False):
observation = state_model.features(state.screen.to(device), state.variables.to(device))
action = controller.forward(observation, cells[-2])
cells, pred = state_model(observation, action_onehot.zero_().scatter_(-1, action, 1), cells)
action = action.cpu()
print(action)
# render
step_state, _, finished, _ = game.step_normalized(action[0, 0])
if finished:
print("episode return: {}".format(game.get_episode_return()))
cells = state_model.set_nonterminal(torch.zeros(1, 1))
| 16,596
| -5
| 184
|
d85c96f9a4ab5c97f33e2f69851fd5509bf3d8bc
| 1,027
|
py
|
Python
|
dataset/TransNAS-Bench-101/models/utils/utils.py
|
mindspore-ai/contrib
|
85dccac7a2ba6e962092ecd51aefd962d7f2aeac
|
[
"Apache-2.0"
] | 2
|
2021-11-10T06:16:55.000Z
|
2022-02-22T11:30:04.000Z
|
dataset/TransNAS-Bench-101/models/utils/utils.py
|
mindspore-ai/contrib
|
85dccac7a2ba6e962092ecd51aefd962d7f2aeac
|
[
"Apache-2.0"
] | null | null | null |
dataset/TransNAS-Bench-101/models/utils/utils.py
|
mindspore-ai/contrib
|
85dccac7a2ba6e962092ecd51aefd962d7f2aeac
|
[
"Apache-2.0"
] | 1
|
2022-03-22T06:03:15.000Z
|
2022-03-22T06:03:15.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""utils"""
import sys
from pathlib import Path
lib_dir = (Path(__file__).parent / '..').resolve()
if str(lib_dir) not in sys.path:
sys.path.insert(0, str(lib_dir))
############################
# operations for all tasks #
############################
| 30.205882
| 78
| 0.627069
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""utils"""
import sys
from pathlib import Path
lib_dir = (Path(__file__).parent / '..').resolve()
if str(lib_dir) not in sys.path:
sys.path.insert(0, str(lib_dir))
############################
# operations for all tasks #
############################
def merge_list(lists):
merged = []
for li in lists:
merged += li
return merged
| 77
| 0
| 23
|
a155bdc515a1f2882b5a75befd2139acb09c7731
| 1,410
|
py
|
Python
|
tests/classes/test_channel_group_registry.py
|
vkottler/vtelem
|
3908f2445249ddb5f2057fe3a7f46c204b677bbb
|
[
"MIT"
] | 3
|
2021-02-03T01:14:44.000Z
|
2022-02-27T00:14:13.000Z
|
tests/classes/test_channel_group_registry.py
|
vkottler/vtelem
|
3908f2445249ddb5f2057fe3a7f46c204b677bbb
|
[
"MIT"
] | 27
|
2021-05-04T21:19:41.000Z
|
2022-02-27T00:49:18.000Z
|
tests/classes/test_channel_group_registry.py
|
vkottler/vtelem
|
3908f2445249ddb5f2057fe3a7f46c204b677bbb
|
[
"MIT"
] | null | null | null |
"""
vtelem - Test the channel-group registry's correctness.
"""
# module under test
from vtelem.enums.primitive import Primitive
from vtelem.channel.group_registry import ChannelGroupRegistry
from vtelem.telemetry.environment import TelemetryEnvironment
# internal
from . import EnumA
def test_group_registry_basic():
"""Test simple functionality of a group registry."""
env = TelemetryEnvironment(2 ** 8)
assert env.add_from_enum(EnumA) >= 0
reg = ChannelGroupRegistry(env)
groups = [
reg.create_group("a"),
reg.create_group("b"),
reg.create_group("c"),
]
# add channels to each group
for group in groups:
reg.add_channel(group, "a", Primitive.UINT32, 1.0)
reg.add_channel(group, "b", Primitive.UINT32, 1.0)
reg.add_channel(group, "c", Primitive.UINT32, 1.0)
reg.add_enum_channel(group, "test_enum", "enum_a", 1.0)
# write channels in each group
for group in groups:
with reg.group(group) as data:
data["a"] = 1
data["b"] = 2
data["c"] = 3
data["test_enum"] = "b"
# read channels in each group to make sure correct values were written
for group in groups:
with reg.group(group) as data:
assert data["a"] == 1
assert data["b"] == 2
assert data["c"] == 3
assert data["test_enum"] == "b"
| 29.375
| 74
| 0.61844
|
"""
vtelem - Test the channel-group registry's correctness.
"""
# module under test
from vtelem.enums.primitive import Primitive
from vtelem.channel.group_registry import ChannelGroupRegistry
from vtelem.telemetry.environment import TelemetryEnvironment
# internal
from . import EnumA
def test_group_registry_basic():
"""Test simple functionality of a group registry."""
env = TelemetryEnvironment(2 ** 8)
assert env.add_from_enum(EnumA) >= 0
reg = ChannelGroupRegistry(env)
groups = [
reg.create_group("a"),
reg.create_group("b"),
reg.create_group("c"),
]
# add channels to each group
for group in groups:
reg.add_channel(group, "a", Primitive.UINT32, 1.0)
reg.add_channel(group, "b", Primitive.UINT32, 1.0)
reg.add_channel(group, "c", Primitive.UINT32, 1.0)
reg.add_enum_channel(group, "test_enum", "enum_a", 1.0)
# write channels in each group
for group in groups:
with reg.group(group) as data:
data["a"] = 1
data["b"] = 2
data["c"] = 3
data["test_enum"] = "b"
# read channels in each group to make sure correct values were written
for group in groups:
with reg.group(group) as data:
assert data["a"] == 1
assert data["b"] == 2
assert data["c"] == 3
assert data["test_enum"] == "b"
| 0
| 0
| 0
|
6eaccc68cf3c4a05bd6471980fcbe35ac2f815a9
| 775
|
py
|
Python
|
extra_scripts/plot_naive_expansion_length.py
|
wdecoster/exrede
|
3e1f81d55365c360ee0d2d620c5239b5f3488825
|
[
"MIT"
] | null | null | null |
extra_scripts/plot_naive_expansion_length.py
|
wdecoster/exrede
|
3e1f81d55365c360ee0d2d620c5239b5f3488825
|
[
"MIT"
] | null | null | null |
extra_scripts/plot_naive_expansion_length.py
|
wdecoster/exrede
|
3e1f81d55365c360ee0d2d620c5239b5f3488825
|
[
"MIT"
] | null | null | null |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
res = [line.strip().split('\t') for line in open("sizes.txt").readlines()]
a = []
for line in res:
sizes = [int(i) for i in line[1].split(',')]
strands = [j for j in line[2].split(',')]
for si, st in zip(sizes, strands):
a.append((line[0], si, st))
df = pd.DataFrame(a, columns=['name', 'length', 'strand1'])
df.loc[df.strand1 == "True", 'strand'] = '-'
df.loc[df.strand1 == "False", 'strand'] = '+'
df.replace(dict(d6843='Patient1',
d5945='Patient2'),
regex=True,
inplace=True
)
plt.close("all")
sns.swarmplot(x="name", y="length", data=df, hue="strand")
plt.legend(loc='upper left', title="strand", frameon=False)
plt.show()
| 27.678571
| 74
| 0.590968
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
res = [line.strip().split('\t') for line in open("sizes.txt").readlines()]
a = []
for line in res:
sizes = [int(i) for i in line[1].split(',')]
strands = [j for j in line[2].split(',')]
for si, st in zip(sizes, strands):
a.append((line[0], si, st))
df = pd.DataFrame(a, columns=['name', 'length', 'strand1'])
df.loc[df.strand1 == "True", 'strand'] = '-'
df.loc[df.strand1 == "False", 'strand'] = '+'
df.replace(dict(d6843='Patient1',
d5945='Patient2'),
regex=True,
inplace=True
)
plt.close("all")
sns.swarmplot(x="name", y="length", data=df, hue="strand")
plt.legend(loc='upper left', title="strand", frameon=False)
plt.show()
| 0
| 0
| 0
|
582bb6d45d5243b53f6b225ab85affb63b8d114d
| 11,102
|
py
|
Python
|
src/pyetllib/etllib/tools/streamtools.py
|
slouchart/pyetllib
|
133df36a1628f413cd60a86e4c7eac2738844d17
|
[
"MIT"
] | 2
|
2020-04-01T10:08:02.000Z
|
2021-03-07T15:18:14.000Z
|
src/pyetllib/etllib/tools/streamtools.py
|
slouchart/pyetllib
|
133df36a1628f413cd60a86e4c7eac2738844d17
|
[
"MIT"
] | null | null | null |
src/pyetllib/etllib/tools/streamtools.py
|
slouchart/pyetllib
|
133df36a1628f413cd60a86e4c7eac2738844d17
|
[
"MIT"
] | 1
|
2020-10-13T13:23:02.000Z
|
2020-10-13T13:23:02.000Z
|
from itertools import starmap, filterfalse, zip_longest
from itertools import chain
import functools
from functools import reduce as reduce_
import operator
from collections import namedtuple
import toolz
from toolz import pipe as pipe_, compose as compose_
from ._iterators import _iterators_controller, _controlled_iterator
"""
designing a lookup scheme.
accepts any Iterable
for each item in this iterable
- computes a key value from the item data using a provided callable
- checks if a provided lookup map contains this key
if yes:
- outputs this item optionally enriched with the lookup data using
another provided callable
if no:
if rejects are enabled:
- outputs this item in a separate iterator
"""
@stream_converter.dispatch(tuple, tuple)
@stream_converter.dispatch(dict, dict)
@stream_converter.dispatch(namedtuple, namedtuple)
@stream_converter.dispatch(tuple, dict, key_type=str)
@stream_converter.dispatch(tuple, dict, key_type=int)
@stream_converter.dispatch(tuple, namedtuple)
@stream_converter.dispatch(dict, tuple)
@stream_converter.dispatch(dict, namedtuple)
@stream_converter.dispatch(namedtuple, dict)
@stream_converter.dispatch(namedtuple, tuple)
def xargs(g, funcs, as_iterable=False):
"""returns a function that accepts a tuple as an arguments and then
maps each element of this tuple to one of the funcs generating another
tuple in the process. Finally, the function g is called with the tuple
elements as arguments.
If the tuple does not contain enough elements to map all the funcs,
the last element is repeated to provide an argument to the remaining funcs
"""
return inner
| 26.245863
| 96
| 0.580796
|
from itertools import starmap, filterfalse, zip_longest
from itertools import chain
import functools
from functools import reduce as reduce_
import operator
from collections import namedtuple
import toolz
from toolz import pipe as pipe_, compose as compose_
from ._iterators import _iterators_controller, _controlled_iterator
def aggregate(aggregator, groupings):
if aggregator is None:
aggregator = lambda x: x # noqa: E731
for k, g in dict(groupings).items():
yield k, aggregator(g)
def call_next(iterable):
it = iter(iterable)
def inner():
try:
return next(it)
except StopIteration:
return None
return inner
def call_next_starred(*items):
return call_next(iter(items))
def compose(*funcs):
return compose_(*funcs)
def filtertruefalse(predicate, iterable):
src_1, src_2 = replicate(iterable)
return filter(predicate, src_1), filterfalse(predicate, src_2)
def groupby(key, iterable):
return toolz.groupby(key, iterable)
def join(*iterables, fill_value=None):
iterators = [iter(it) for it in iterables]
stopped_iterators = {it: False for it in iterators}
while True:
result = []
for it in iterators:
try:
item = next(it)
except StopIteration:
item = fill_value
stopped_iterators[it] = True
result.append(item)
if all(stopped_iterators.values()):
return
else:
yield tuple(result)
"""
designing a lookup scheme.
accepts any Iterable
for each item in this iterable
- computes a key value from the item data using a provided callable
- checks if a provided lookup map contains this key
if yes:
- outputs this item optionally enriched with the lookup data using
another provided callable
if no:
if rejects are enabled:
- outputs this item in a separate iterator
"""
def lookup(iterable, key=lambda x: x, lookup_map=None,
merge=False, enable_rejects=False):
if lookup_map is None:
lookup_map = {}
if merge and callable(merge):
func_merge = merge
elif merge:
def func_merge(a, b):
return a, b
else:
def func_merge(a, _):
return a
def lookup_(it):
def map_merge(e):
return func_merge(e, lookup_map[key(e)])
return map(
map_merge,
filter(
lambda x: key(x) in lookup_map,
it
)
)
if enable_rejects:
src1, src2 = replicate(iterable)
return lookup_(iter(src1)), \
filterfalse(
lambda x: key(x) in lookup_map,
iter(src2)
)
else:
return lookup_(iter(iterable))
def mcompose(*funcs):
def _composer(f, g):
def inner(*args, **kwargs):
return f(*g(*args, **kwargs))
return inner
return reduce_(
_composer,
funcs
)
class pipable(object):
def __init__(self, callable_):
self._callable = callable_
assert callable(self._callable)
def __call__(self, *args, **kwargs):
result = self._callable(*args, **kwargs)
if callable(result):
return pipable(result)
else:
return result
def __or__(self, other):
return pipable(pipeline(self, other))
def pipeline(*funcs):
return compose_(*reversed(funcs))
def pipe_data_through(data, *steps):
return pipe_(data, *steps)
def reduce(function, iterable, initial=None):
return functools.reduce(
function, iterable, initial
)
class replicate(_iterators_controller):
def create_controlled_iterators(self, n=2):
return tuple(
_controlled_iterator(self) for _ in range(n)
)
def dispatch_item(self, item, requester):
for it in self._iterators:
it.send(item)
def select(predicates, iterable, strict=False):
if predicates is None or len(predicates) == 0:
clauses = (
lambda x: bool(x),
lambda x: not bool(x)
) # works either if strict is True
else:
def or_(f1, f2):
return xargs(
operator.or_, (f1, f2)
)
def and_not_(f1, f2):
return xargs(
operator.and_,
(
f1,
compose_(
operator.not_,
f2
)
)
)
last_clause = lambda a: False # noqa: E731
if strict:
clauses = list()
for predicate in predicates:
last_clause = and_not_(
predicate,
last_clause
)
clauses.append(last_clause)
else:
clauses = list(predicates)
else_ = compose_(
operator.not_,
functools.reduce(
or_,
predicates,
last_clause
)
)
clauses = tuple(clauses) + (else_, )
iterators = replicate(
iter(iterable),
len(clauses)
)
return tuple(
starmap(
filter,
zip(
clauses,
iterators
)
)
)
class split(_iterators_controller):
def create_controlled_iterators(self, expected_length=-1):
if expected_length > 0:
nb_splitter = expected_length
else:
try:
first = next(self._it)
nb_splitter = len(self._splitter_func(first))
self._it = chain([first], self._it) # reset the main iterator
except StopIteration:
nb_splitter = 0
if nb_splitter:
return tuple(
_controlled_iterator(self)
for _ in range(nb_splitter)
)
else:
return tuple()
def __init__(self, func, *args, expected_length=-1, **kwargs):
self._splitter_func = func
assert callable(self._splitter_func)
super().__init__(*args, expected_length=expected_length, **kwargs)
def dispatch_item(self, item, requester):
try:
split_item = self._splitter_func(item)
except Exception:
raise RuntimeError("Exception in the splitting function")
for index, t in enumerate(split_item):
try:
self._iterators[index].send(t)
except IndexError:
raise ValueError(
"Encountered a tuple with length "
"exceeding the number of output "
"iterators: " +
f"{len(split_item)} > "
f"{len(self._iterators)}"
)
class stream_converter:
_dispatcher = dict()
def __init__(self, from_, to_, *args, **kwargs):
key_type = kwargs.pop('key_type', None)
if (from_, to_, key_type) in stream_converter._dispatcher:
meth_factory = stream_converter._dispatcher[
(from_, to_, key_type)
] # a kind of goofy name isn't it?
self._method = meth_factory(*args, **kwargs)
# be insured no meth has been cooked here, only methODs :)
else:
raise KeyError(f"Type association "
f"'({from_.__name__}, {to_.__name__})' "
f"unsupported by stream_converter")
def __call__(self, data):
return self._method(data)
def __or__(self, other): # chain converters using |
assert callable(other)
return pipable(pipeline(self, other))
@classmethod
def dispatch(cls, from_, to_, key_type=None):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
assert (from_, to_, key_type) not in cls._dispatcher, \
f"A dispatch entry already exists for " \
f"({from_.__name__}, {to_.__name__}, " \
f"{key_type.__name__ if key_type is not None else str(key_type)})" # noqa: E501
cls._dispatcher[(from_, to_, key_type)] = wrapper
return wrapper
return decorator
@stream_converter.dispatch(tuple, tuple)
@stream_converter.dispatch(dict, dict)
@stream_converter.dispatch(namedtuple, namedtuple)
def _identity():
return toolz.identity
@stream_converter.dispatch(tuple, dict, key_type=str)
def _convert_tuple_to_dict_with_keys(keys):
if keys and len(keys):
return lambda d: dict(zip(keys, d))
else:
raise ValueError("Converter requires non-empty 'keys' argument")
@stream_converter.dispatch(tuple, dict, key_type=int)
def _convert_tuple_to_dict():
return lambda t: dict(enumerate(t))
@stream_converter.dispatch(tuple, namedtuple)
def _convert_tuple_to_named_tuple(keys, typename='DataStructure'):
if keys and len(keys):
return namedtuple(typename, keys)._make
else:
raise ValueError("Converter requires non-empty 'keys' argument")
@stream_converter.dispatch(dict, tuple)
def _convert_dict_to_tuple():
return lambda d: tuple(d.values())
@stream_converter.dispatch(dict, namedtuple)
def _convert_dict_to_namedtuple(typename='DataStructure'):
return lambda d: namedtuple(typename, d.keys())._make(d.values())
@stream_converter.dispatch(namedtuple, dict)
def _convert_namedtuple_to_dict():
return lambda nt: dict(nt._asdict())
@stream_converter.dispatch(namedtuple, tuple)
def _convert_namedtuple_to_tuple():
return lambda nt: tuple(nt)
def stream_generator(keys, funcs, nb_items):
key_func = {}
for k, f in zip_longest(keys, funcs, fillvalue=lambda: None):
if callable(f):
key_func[k] = f
else:
key_func[k] = lambda: f # noqa: E731
def make_data():
return {
k_: f_() for k_, f_ in key_func.items()
}
if nb_items >= 0:
for _ in range(nb_items):
yield make_data()
elif nb_items < 0:
while True:
yield make_data()
def xargs(g, funcs, as_iterable=False):
"""returns a function that accepts a tuple as an arguments and then
maps each element of this tuple to one of the funcs generating another
tuple in the process. Finally, the function g is called with the tuple
elements as arguments.
If the tuple does not contain enough elements to map all the funcs,
the last element is repeated to provide an argument to the remaining funcs
"""
def inner(*args):
evaluated_funcs = tuple(
starmap(
lambda f, arg: f(arg),
zip_longest(funcs, args, fillvalue=args[-1])
)
)
return g(evaluated_funcs) if as_iterable else g(*evaluated_funcs)
return inner
| 8,379
| 186
| 831
|
5d4a5045147d1f57d04c1ce247f1b2f1b13ede17
| 2,309
|
py
|
Python
|
tests/mocks.py
|
DJRavinszkha/pykeen
|
d79fe39f83bc2831137f22be6421b37568694cf4
|
[
"MIT"
] | null | null | null |
tests/mocks.py
|
DJRavinszkha/pykeen
|
d79fe39f83bc2831137f22be6421b37568694cf4
|
[
"MIT"
] | null | null | null |
tests/mocks.py
|
DJRavinszkha/pykeen
|
d79fe39f83bc2831137f22be6421b37568694cf4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Mocks for tests."""
from typing import Optional, Tuple
import torch
from torch import nn
from pykeen.models import EntityRelationEmbeddingModel, Model
from pykeen.nn.emb import EmbeddingSpecification, RepresentationModule
from pykeen.triples import TriplesFactory
__all__ = [
'CustomRepresentations',
'MockModel',
]
class CustomRepresentations(RepresentationModule):
"""A custom representation module with minimal implementation."""
class MockModel(EntityRelationEmbeddingModel):
"""A mock model returning fake scores."""
def _generate_fake_scores(self, batch: torch.LongTensor) -> torch.FloatTensor:
"""Generate fake scores s[b, i] = i of size (batch_size, num_entities)."""
batch_size = batch.shape[0]
batch_scores = self.scores.view(1, -1).repeat(batch_size, 1)
assert batch_scores.shape == (batch_size, self.num_entities)
return batch_scores
| 37.241935
| 100
| 0.705067
|
# -*- coding: utf-8 -*-
"""Mocks for tests."""
from typing import Optional, Tuple
import torch
from torch import nn
from pykeen.models import EntityRelationEmbeddingModel, Model
from pykeen.nn.emb import EmbeddingSpecification, RepresentationModule
from pykeen.triples import TriplesFactory
__all__ = [
'CustomRepresentations',
'MockModel',
]
class CustomRepresentations(RepresentationModule):
"""A custom representation module with minimal implementation."""
def __init__(self, num_entities: int, shape: Tuple[int, ...] = (2,)):
super().__init__(max_id=num_entities, shape=shape)
self.x = nn.Parameter(torch.rand(*shape))
def forward(self, indices: Optional[torch.LongTensor] = None) -> torch.FloatTensor: # noqa:D102
n = self.max_id if indices is None else indices.shape[0]
return self.x.unsqueeze(dim=0).repeat(n, *(1 for _ in self.shape))
class MockModel(EntityRelationEmbeddingModel):
"""A mock model returning fake scores."""
def __init__(self, triples_factory: TriplesFactory):
super().__init__(
triples_factory=triples_factory,
entity_representations=EmbeddingSpecification(embedding_dim=50),
relation_representations=EmbeddingSpecification(embedding_dim=50),
)
num_entities = self.num_entities
self.scores = torch.arange(num_entities, dtype=torch.float)
def _generate_fake_scores(self, batch: torch.LongTensor) -> torch.FloatTensor:
"""Generate fake scores s[b, i] = i of size (batch_size, num_entities)."""
batch_size = batch.shape[0]
batch_scores = self.scores.view(1, -1).repeat(batch_size, 1)
assert batch_scores.shape == (batch_size, self.num_entities)
return batch_scores
def score_hrt(self, hrt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
return self._generate_fake_scores(batch=hrt_batch)
def score_t(self, hr_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
return self._generate_fake_scores(batch=hr_batch)
def score_h(self, rt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
return self._generate_fake_scores(batch=rt_batch)
def reset_parameters_(self) -> Model: # noqa: D102
pass # Not needed for unittest
| 1,177
| 0
| 189
|
82a0f8ff772d456c1a957e6548993cb6ed8fd932
| 3,968
|
py
|
Python
|
colander_validators/__init__.py
|
ixmatus/colander-validators
|
68b43e5d6596383ff604da5f2237d185bd5780c2
|
[
"BSD-3-Clause"
] | 2
|
2015-06-01T15:17:23.000Z
|
2015-06-01T18:40:31.000Z
|
colander_validators/__init__.py
|
ixmatus/colander-validators
|
68b43e5d6596383ff604da5f2237d185bd5780c2
|
[
"BSD-3-Clause"
] | null | null | null |
colander_validators/__init__.py
|
ixmatus/colander-validators
|
68b43e5d6596383ff604da5f2237d185bd5780c2
|
[
"BSD-3-Clause"
] | null | null | null |
import re
def email(value):
"""Validate an email address
>>> email("barney@purpledino.com")
True
>>> email("barneydino.com")
'An email address must contain a single @'
"""
usernameRE = re.compile(r"^[^ \t\n\r@<>()]+$", re.I)
domainRE = re.compile(r'''
^(?:[a-z0-9][a-z0-9\-]{0,62}\.)+ # (sub)domain - alpha followed by 62max chars (63 total)
[a-z]{2,}$ # TLD
''', re.I | re.VERBOSE)
messages = dict(
empty='Please enter an email address',
noAt='An email address must contain a single @',
badUsername='The username portion of the email address is invalid'
' (the portion before the @: {username!s}',
socketError='An error occured when trying to connect to the server:'
' {error!s}',
badDomain='The domain portion of the email address is invalid'
' (the portion after the @: {domain!s}',
domainDoesNotExist='The domain of the email address does not exist'
' (the portion after the @: {domain!s}')
if not value:
return messages['empty']
value = value.strip()
splitted = value.split('@', 1)
try:
username, domain=splitted
except ValueError:
return messages['noAt']
if not usernameRE.search(username):
return messages['badUsername'].format(username=username)
if not domainRE.search(domain):
return messages['badDomain'].format(domain=domain)
return True
def url(value):
"""Validate a URL completely
>>> url("ixmat.us")
True
>>> url("ixmat")
'You must provide a full domain name (like ixmat.com)'
"""
messages = dict(
noScheme='You must start your URL with http://, https://, etc',
badURL='That is not a valid URL',
httpError='An error occurred when trying to access the URL: {error!s}',
socketError='An error occured when trying to connect to the server: {error!s}',
notFound='The server responded that the page could not be found',
status='The server responded with a bad status code ({status!s})',
noTLD='You must provide a full domain name (like {domain!s}.com)')
url_re = re.compile(r'''
^(http|https)://
(?:[%:\w]*@)? # authenticator
(?: # ip or domain
(?P<ip>(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))|
(?P<domain>[a-z0-9][a-z0-9\-]{,62}\.)* # subdomain
(?P<tld>[a-z]{2,63}|xn--[a-z0-9\-]{2,59}) # top level domain
)
(?::[0-9]{1,5})? # port
# files/delims/etc
(?P<path>/[a-z0-9\-\._~:/\?#\[\]@!%\$&\'\(\)\*\+,;=]*)?
$
''', re.I | re.VERBOSE)
scheme_re = re.compile(r'^[a-zA-Z]+:')
value = value.strip()
if not scheme_re.search(value):
value = "http://" + value
value = encode_idna(value)
match = scheme_re.search(value)
if not match:
return messages['noScheme']
value = match.group(0).lower() + value[len(match.group(0)):]
match = url_re.search(value)
if not match:
return messages['badURL']
if not match.group('domain'):
return messages['noTLD'].format(domain=match.group('tld'))
return True
| 32.260163
| 106
| 0.521925
|
import re
def email(value):
"""Validate an email address
>>> email("barney@purpledino.com")
True
>>> email("barneydino.com")
'An email address must contain a single @'
"""
usernameRE = re.compile(r"^[^ \t\n\r@<>()]+$", re.I)
domainRE = re.compile(r'''
^(?:[a-z0-9][a-z0-9\-]{0,62}\.)+ # (sub)domain - alpha followed by 62max chars (63 total)
[a-z]{2,}$ # TLD
''', re.I | re.VERBOSE)
messages = dict(
empty='Please enter an email address',
noAt='An email address must contain a single @',
badUsername='The username portion of the email address is invalid'
' (the portion before the @: {username!s}',
socketError='An error occured when trying to connect to the server:'
' {error!s}',
badDomain='The domain portion of the email address is invalid'
' (the portion after the @: {domain!s}',
domainDoesNotExist='The domain of the email address does not exist'
' (the portion after the @: {domain!s}')
if not value:
return messages['empty']
value = value.strip()
splitted = value.split('@', 1)
try:
username, domain=splitted
except ValueError:
return messages['noAt']
if not usernameRE.search(username):
return messages['badUsername'].format(username=username)
if not domainRE.search(domain):
return messages['badDomain'].format(domain=domain)
return True
def url(value):
"""Validate a URL completely
>>> url("ixmat.us")
True
>>> url("ixmat")
'You must provide a full domain name (like ixmat.com)'
"""
messages = dict(
noScheme='You must start your URL with http://, https://, etc',
badURL='That is not a valid URL',
httpError='An error occurred when trying to access the URL: {error!s}',
socketError='An error occured when trying to connect to the server: {error!s}',
notFound='The server responded that the page could not be found',
status='The server responded with a bad status code ({status!s})',
noTLD='You must provide a full domain name (like {domain!s}.com)')
url_re = re.compile(r'''
^(http|https)://
(?:[%:\w]*@)? # authenticator
(?: # ip or domain
(?P<ip>(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))|
(?P<domain>[a-z0-9][a-z0-9\-]{,62}\.)* # subdomain
(?P<tld>[a-z]{2,63}|xn--[a-z0-9\-]{2,59}) # top level domain
)
(?::[0-9]{1,5})? # port
# files/delims/etc
(?P<path>/[a-z0-9\-\._~:/\?#\[\]@!%\$&\'\(\)\*\+,;=]*)?
$
''', re.I | re.VERBOSE)
scheme_re = re.compile(r'^[a-zA-Z]+:')
value = value.strip()
if not scheme_re.search(value):
value = "http://" + value
value = encode_idna(value)
match = scheme_re.search(value)
if not match:
return messages['noScheme']
value = match.group(0).lower() + value[len(match.group(0)):]
match = url_re.search(value)
if not match:
return messages['badURL']
if not match.group('domain'):
return messages['noTLD'].format(domain=match.group('tld'))
return True
def encode_idna(value):
from urllib.parse import urlparse, urlunparse
scheme, netloc, path, params, query, fragment = urlparse(value)
try:
netloc = netloc.encode('idna')
netloc = netloc.decode('ascii')
return str(urlunparse((scheme,
netloc,
path,
params,
query,
fragment)))
except UnicodeError:
return value
| 534
| 0
| 23
|
4db4f3928bb1714fb5828395c74a12d60da6ce54
| 7,963
|
py
|
Python
|
TwitchApi.py
|
borgej/BrexBot
|
2413bd4ec978ed40b8e02b1452dcfac9451ee2c5
|
[
"Apache-2.0"
] | null | null | null |
TwitchApi.py
|
borgej/BrexBot
|
2413bd4ec978ed40b8e02b1452dcfac9451ee2c5
|
[
"Apache-2.0"
] | null | null | null |
TwitchApi.py
|
borgej/BrexBot
|
2413bd4ec978ed40b8e02b1452dcfac9451ee2c5
|
[
"Apache-2.0"
] | null | null | null |
import json
import urllib
import requests
import Config
import logging
logging.basicConfig(level=logging.DEBUG)
# Class to access the new Twitch 'HELIX' API
# The class receives it's authentication from a dictionary (HEADERS) in the Config.py file.
# The "Bearer" token must have the required scopes to perform successful API calls.
# Function to make a JSON request to the Twitch API
# Get the ID of a given username
# Gather all available data for a specified user
# Get first 100 moderators of a channel. "Pagination" must be used for more than 100 results.
# Request requires a valid Bearer (Helix Oauth) token.
# Check if a specified user is a moderator in the channel
# Check if a specified user is following the channel
# Check if a viewer is subscribed to teh channel
# Creates a clip from the live stream. Test when live as
# when not live it shows a previously created clip.
# Check if a user is banned from the channel
# Get followed channel since date
# The following functions use the Twitch V5 API and require a separate token (OAuth)
# The below will retrieve current "Chatters" in a channel.
# THESE ARE NOT A TWITCH API FUNCTIONS - UNDOCUMENTED
# This has a delayed refresh time (currently unknown).
# Note: Due to some viewers/bots being connected anon to the channel
# this will only show chatters and not all viewers.
| 42.811828
| 131
| 0.613337
|
import json
import urllib
import requests
import Config
import logging
logging.basicConfig(level=logging.DEBUG)
# Class to access the new Twitch 'HELIX' API
# The class receives it's authentication from a dictionary (HEADERS) in the Config.py file.
# The "Bearer" token must have the required scopes to perform successful API calls.
class TwitchApi:
def __init__(self, channel_name=Config.CHANNEL_NAME):
self.channel_id = self.get_user_id(channel_name)
# Function to make a JSON request to the Twitch API
def json_data(self, url, user=None):
try:
req = urllib.request.Request(url, headers=Config.HEADERS)
resp = urllib.request.urlopen(req)
twitch_data = json.loads(json.dumps(json.loads(resp.read())))
return twitch_data
except Exception as e:
logging.error("Error parsing JSON data.", e)
# Get the ID of a given username
def get_user_id(self, username=Config.CHANNEL_NAME):
try:
url = 'https://api.twitch.tv/helix/users?login=' + username
user_data = self.json_data(url, username)
user_id = user_data['data'][0]['id']
return user_id
except Exception as e:
logging.error("Unable to retrieve the user ID for " + username, e)
# Gather all available data for a specified user
def get_user_data(self, username=Config.CHANNEL_NAME):
try:
url = 'https://api.twitch.tv/helix/users?login=' + username
user_data = self.json_data(url, username)
user = user_data['data'][0]
return user
except Exception as e:
logging.error("Unable to retrieve the data for " + username, e)
def get_channel_id(self, channel_name=Config.CHANNEL_NAME):
try:
return self.channel_id
except Exception as e:
logging.error("Could not retrieve channel ID for " + channel_name, e)
# Get first 100 moderators of a channel. "Pagination" must be used for more than 100 results.
# Request requires a valid Bearer (Helix Oauth) token.
def get_moderators(self):
try:
url = 'https://api.twitch.tv/helix/moderation/moderators?broadcaster_id=' + self.channel_id
moderator_data = self.json_data(url)
moderator_names = []
for index, item in enumerate(moderator_data['data']):
moderator_names.append(item['user_name'])
return moderator_names
except Exception as e:
logging.error("Could not retrieve Moderator list for the channel", e)
def get_followers(self):
try:
url = 'https://api.twitch.tv/helix/users/follows?to_id=' + self.channel_id
follower_data = self.json_data(url)
follower_names = []
for index, item in enumerate(follower_data['data']):
follower_names.append(item['from_name'])
return follower_names
except Exception as e:
logging.error("Could not retrieve Follower list for the channel", e)
# Check if a specified user is a moderator in the channel
def is_moderator(self, viewer):
try:
viewer_id = self.get_user_id(viewer)
url = 'https://api.twitch.tv/helix/moderation/moderators?broadcaster_id=' + self.channel_id + '&user_id=' + viewer_id
moderator_data = self.json_data(url)
if moderator_data['data'] is None:
return False
else:
return True
except Exception as e:
logging.error("Unable to determine if " + viewer + " is a moderator.", e)
# Check if a specified user is following the channel
def is_follower(self, viewer):
try:
viewer_id = self.get_user_id(viewer)
url = 'https://api.twitch.tv/helix/users/follows?to_id=' + self.channel_id + '&from_id=' + viewer_id
follow_data = self.json_data(url)
if follow_data['total'] == 0:
return False
else:
return True
except Exception as e:
logging.error("Unable to determine if " + viewer + " is following the channel.", e)
# Check if a viewer is subscribed to teh channel
def is_subscriber(self, viewer):
try:
viewer_id = self.get_user_id(viewer)
url = 'https://api.twitch.tv/helix/subscriptions?broadcaster_id=' + self.channel_id + '&user_id=' + viewer_id + '&tier'
sub_data = self.json_data(url)
if not sub_data['data']:
return False
else:
return True
except Exception as e:
logging.error("Unable to determin if " + viewer + " is subscribed to the channel.", e)
# Creates a clip from the live stream. Test when live as
# when not live it shows a previously created clip.
def create_clip(self):
try:
url = 'https://api.twitch.tv/helix/clips?broadcaster_id=' + self.channel_id
clip_data = self.json_data(url)
return clip_data
except Exception as e:
logging.error("Couldn't create clip.", e)
# Check if a user is banned from the channel
def is_banned(self, viewer):
try:
viewer_id = self.get_user_id(viewer)
url = 'https://api.twitch.tv/helix/moderation/banned?broadcaster_id=' + self.channel_id + '&user_id=' + viewer_id
banned_data = self.json_data(url)
if banned_data['data'] is None:
return False
else:
return True
except Exception as e:
logging.error("Unable to check if " + viewer + " is banned from the channel.", e)
# Get followed channel since date
def follower_since(self, viewer):
try:
viewer_id = self.get_user_id(viewer)
url = 'https://api.twitch.tv/helix/users/follows?to_id=' + self.channel_id + '&from_id=' + viewer_id
follow_data = self.json_data(url)
if follow_data['total'] == 0:
return None
else:
return follow_data['data'][0]['followed_at']
except Exception as e:
logging.error("Unable to determine if " + viewer + " is following the channel.", e)
# The following functions use the Twitch V5 API and require a separate token (OAuth)
def update_channel(self, title, game):
try:
url = 'https://api.twitch.tv/kraken/channels/' + self.get_channel_id() + '?api_version=5'
headers = Config.V5HEADERS
data = {'channel[status]': title, 'channel[game]': game, 'channel[channel_feed_enabled]': 'true'}
response = requests.put(url=url, headers=headers, params=data)
return response
except Exception as e:
logging.error('Unable to perform V5 API call', e)
return None
# The below will retrieve current "Chatters" in a channel.
# THESE ARE NOT A TWITCH API FUNCTIONS - UNDOCUMENTED
# This has a delayed refresh time (currently unknown).
# Note: Due to some viewers/bots being connected anon to the channel
# this will only show chatters and not all viewers.
def get_chatter_data(self, channel):
try:
url = 'https://tmi.twitch.tv/group/user/' + channel + '/chatters'
chatter_data = self.json_data(url)
return chatter_data
except Exception as e:
logging.error('Unable to retrieve chatter data. ', e)
return None
def all_chatter_names(self, channel):
try:
chatter_data = self.get_chatter_data(channel)['chatters']
chatters = [item for sublist in chatter_data.values() for item in sublist]
return chatters
except Exception as e:
logging.error('Unable to retrieve chatter names. ', e)
return None
| 6,102
| -5
| 441
|
c41ff4afe695367041a744827d6210a2ecd34a31
| 829
|
py
|
Python
|
src/var.py
|
JayHeng/pzh-knockout-round
|
c34111da8d69f748e1bc3f7a658426ad59f79af7
|
[
"BSD-3-Clause"
] | null | null | null |
src/var.py
|
JayHeng/pzh-knockout-round
|
c34111da8d69f748e1bc3f7a658426ad59f79af7
|
[
"BSD-3-Clause"
] | null | null | null |
src/var.py
|
JayHeng/pzh-knockout-round
|
c34111da8d69f748e1bc3f7a658426ad59f79af7
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
g_playerDict = {'player1':u"nobody",
'player2':u"nobody",
'player3':u"nobody",
'player4':u"nobody",
'player5':u"nobody",
'player6':u"nobody",
'player7':u"nobody",
'player8':u"nobody",
'player9':u"nobody",
'player10':u"nobody",
'player11':u"nobody",
'player12':u"nobody",
'player13':u"nobody",
'player14':u"nobody",
'player15':u"nobody",
'player16':u"nobody",
}
| 26.741935
| 37
| 0.463209
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
g_playerDict = {'player1':u"nobody",
'player2':u"nobody",
'player3':u"nobody",
'player4':u"nobody",
'player5':u"nobody",
'player6':u"nobody",
'player7':u"nobody",
'player8':u"nobody",
'player9':u"nobody",
'player10':u"nobody",
'player11':u"nobody",
'player12':u"nobody",
'player13':u"nobody",
'player14':u"nobody",
'player15':u"nobody",
'player16':u"nobody",
}
def getPlayer():
global g_playerDict
return g_playerDict
def setPlayer( *args ):
global g_playerDict
g_playerDict = args[0]
| 96
| 0
| 46
|
5c5ab3597807a949bef2e7b63629871211bc1d50
| 7,620
|
py
|
Python
|
web/forms/account.py
|
HongDaMa/sass_project
|
d971a4307a4057ecb0bea72c34e6c2112cfc2497
|
[
"Apache-2.0"
] | 1
|
2021-03-01T10:50:05.000Z
|
2021-03-01T10:50:05.000Z
|
web/forms/account.py
|
HongDaMa/sass_project
|
d971a4307a4057ecb0bea72c34e6c2112cfc2497
|
[
"Apache-2.0"
] | 5
|
2021-06-08T21:20:53.000Z
|
2022-03-12T00:24:58.000Z
|
web/forms/account.py
|
HongDaMa/sass_project
|
d971a4307a4057ecb0bea72c34e6c2112cfc2497
|
[
"Apache-2.0"
] | null | null | null |
#!E:\py_virtual_env\saas_project\Scripts\python.exe
# -*- coding: utf-8 -*-
from django import forms
from web import models
from django.core.validators import RegexValidator,ValidationError
from django.core.exceptions import ValidationError
from django.conf import settings
from django_redis import get_redis_connection
from utils import encrypt
from utils.tencent.sms import send_sms_single
import random
| 31.229508
| 89
| 0.599213
|
#!E:\py_virtual_env\saas_project\Scripts\python.exe
# -*- coding: utf-8 -*-
from django import forms
from web import models
from django.core.validators import RegexValidator,ValidationError
from django.core.exceptions import ValidationError
from django.conf import settings
from django_redis import get_redis_connection
from utils import encrypt
from utils.tencent.sms import send_sms_single
import random
class BootStrapForm(object):
BootStrapForm_exclude = []
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
for name,field in self.fields.items():
if name in self.BootStrapForm_exclude:
continue
field.widget.attrs['class'] = 'form-control'
field.widget.attrs['placeholder'] = '请输入%s'%(field.label)
class RegisterModelForm(BootStrapForm,forms.ModelForm):
mobile_phone = forms.CharField(
label='手机号',
required = True,
validators=[RegexValidator(r"^(1[3|4|5|6|7|8|9])\d{9}$")],
error_messages={'required':'手机号不能为空。'}
)
email = forms.EmailField(
label='邮箱',
required=True,
error_messages={'required':'邮箱不能为空。'}
)
password = forms.CharField(
label='密码',
required = True,
widget=forms.PasswordInput(),
error_messages={'required':'密码不能为空。'}
)
confirm_password = forms.CharField(
label='重复密码',
required = True,
widget=forms.PasswordInput(),
error_messages={'required':'请输入重复密码。'}
)
code = forms.CharField(
label='验证码',
required = True,
error_messages={'required':'验证码不能为空。'}
)
class Meta:
model = models.UserInfo
fields = ['username','email','password','confirm_password','mobile_phone','code']
def clean_username(self):
"""
username 的钩子函数
:return: 用户名
"""
username = self.cleaned_data.get('username')
exists = models.UserInfo.objects.filter(username= username).exists()
if exists:
raise ValidationError('用户名已存在,请重新输入!')
else:
return username
def clean_email(self):
"""
email 的钩子函数
:return: 邮箱地址
"""
email = self.cleaned_data.get('email')
exists = models.UserInfo.objects.filter(email=email).exists()
if exists:
raise ValidationError('邮箱已存在,请重新输入!')
else:
return email
def clean_password(self):
password = self.cleaned_data.get('password')
#加密&返回
return encrypt.md5(password)
def clean_confirm_password(self):
"""
confirm_password 的钩子函数
:return: confirm_password重复密码
"""
password = self.cleaned_data.get('password')
confirm_password = self.cleaned_data.get('confirm_password')
if password != encrypt.md5(confirm_password):
raise ValidationError('两次输入密码不一致')
else:
return confirm_password
def clean_code(self):
"""
code 的钩子函数
:return: 正确的验证码
"""
mobile_phone = self.cleaned_data.get('mobile_phone')
code = self.cleaned_data.get('code')
conn = get_redis_connection("default")
# 将验证码写入redis
value = conn.get(mobile_phone)
#将返回的二进制字符串转化成字符串
if value != None:
value = str(value,encoding='utf-8')
print(value)
if value == None:
raise ValidationError('验证码不存在或者已过期!')
elif value != None and value != code:
raise ValidationError('验证码错误,请重新输入!')
else:
return code
class LoginForm(BootStrapForm,forms.Form):
username = forms.CharField(label='邮箱或者手机号', required=True)
password = forms.CharField(
label='密码',
required=True,
widget=forms.PasswordInput(),
error_messages={'required': '密码不能为空。'}
)
code = forms.CharField(
label='验证码',
required=True,
error_messages={'required': '验证码不能为空。'}
)
#重新__init__将request传入
def __init__(self,request,*args,**kwargs):
super().__init__(*args,**kwargs)
self.request = request
def clean_password(self):
password = self.cleaned_data['password']
#加密&返回
return encrypt.md5(password)
def clean_code(self):
"""
code的钩子函数
:return:
"""
code = self.cleaned_data['code']
image_code = self.request.session.get('image_code')
if not image_code:
raise ValidationError('验证码不存在,或者已过期')
elif code.upper() != image_code.upper():
raise ValidationError('验证码错误')
else:
return code
class Login_SmsForm(BootStrapForm,forms.Form):
mobile_phone = forms.CharField(
label='手机号',
required=True,
validators=[RegexValidator(r"^(1[3|4|5|6|7|8|9])\d{9}$")],
error_messages={'required': '手机号不能为空'}
)
code = forms.CharField(
label='验证码',
required=True,
error_messages={'required': '验证码不能为空。'}
)
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
for name,field in self.fields.items():
field.widget.attrs['class'] = 'form-control'
field.widget.attrs['placeholder'] = '请输入%s'%(field.label)
def clean_mobile_phone(self):
mobile_phone = self.cleaned_data.get('mobile_phone')
exists = models.UserInfo.objects.filter(mobile_phone=mobile_phone).exists()
if not exists:
raise ValidationError('手机号尚未注册,请先注册!')
else:
return mobile_phone
def clean_code(self):
code = self.cleaned_data.get('code')
mobile_phone = self.cleaned_data.get('mobile_phone')
if not mobile_phone:
return code
conn = get_redis_connection('default')
redis_code = conn.get(mobile_phone)
if not redis_code:
raise ValidationError('验证码失效或未发送,请重新发送')
redis_str_code = redis_code.decode('utf-8')
if code != redis_str_code:
raise ValidationError('验证码错误,请重新输入')
return code
class SendsmsForm(forms.Form):
mobile_phone = forms.CharField(
label='手机号',
required=True,
validators=[RegexValidator(r"^(1[3|4|5|6|7|8|9])\d{9}$")],
error_messages={'required': '手机号不能为空'}
)
def __init__(self,request,*args,**kwargs):
super().__init__(*args,**kwargs)
self.request = request
def clean_mobile_phone(self):
"""手机号验证的钩子函数"""
mobile_phone = self.cleaned_data['mobile_phone']
#校验短信模板是否有问题
tpl = self.request.GET.get('tpl')
#校验数据库中是否已经有手机号
template_id = settings.TENCET_SMS_TEMPLATES.get(tpl)
print(template_id)
if not template_id:
raise ValidationError('短信模板错误')
exists = models.UserInfo.objects.filter(mobile_phone=mobile_phone).exists()
if tpl == 'login':
if not exists:
raise ValidationError('手机号不存在')
else:
# 校验数据库中是否已有手机号
if exists:
raise ValidationError('手机号已存在')
code = random.randrange(1000,9999)
# 利用腾讯sdk发送短信
sms = send_sms_single(mobile_phone,template_id,[code,])
if sms['result'] != 0:
raise ValidationError("短信发送失败,{}".format(sms['errmsg']))
# 去连接池中获取一个连接
conn = get_redis_connection("default")
#将验证码写入redis
conn.set(mobile_phone, code, ex=180)
value = conn.get(mobile_phone)
print(value)
return mobile_phone
| 1,797
| 6,108
| 115
|
52a139c5e62ffa00b24af6c10f5d0ac8fb2a1335
| 431
|
py
|
Python
|
CodingBat/Python/Logic-1/cigar_party.py
|
Togohogo1/pg
|
ee3c36acde47769c66ee13a227762ee677591375
|
[
"MIT"
] | null | null | null |
CodingBat/Python/Logic-1/cigar_party.py
|
Togohogo1/pg
|
ee3c36acde47769c66ee13a227762ee677591375
|
[
"MIT"
] | 1
|
2021-10-14T18:26:56.000Z
|
2021-10-14T18:26:56.000Z
|
CodingBat/Python/Logic-1/cigar_party.py
|
Togohogo1/pg
|
ee3c36acde47769c66ee13a227762ee677591375
|
[
"MIT"
] | 1
|
2021-08-06T03:39:55.000Z
|
2021-08-06T03:39:55.000Z
|
'''
When squirrels get together for a party, they like to have cigars. A squirrel
party is successful when the number of cigars is between 40 and 60,
inclusive. Unless it is the weekend, in which case there is no upper bound on
the number of cigars. Return True if the party with the given values is
successful, or False otherwise.
'''
| 39.181818
| 77
| 0.758701
|
'''
When squirrels get together for a party, they like to have cigars. A squirrel
party is successful when the number of cigars is between 40 and 60,
inclusive. Unless it is the weekend, in which case there is no upper bound on
the number of cigars. Return True if the party with the given values is
successful, or False otherwise.
'''
def cigar_party(cigars, is_weekend):
return cigars >= 40 and (cigars <= 60 or is_weekend)
| 72
| 0
| 23
|
537517161f8f98490b06de9ff4e5fad7cffa2079
| 7,138
|
py
|
Python
|
release/stubs.min/System/Windows/Forms/__init___parts/NotifyIcon.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Forms/__init___parts/NotifyIcon.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Forms/__init___parts/NotifyIcon.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
class NotifyIcon(Component, IComponent, IDisposable):
"""
Specifies a component that creates an icon in the notification area. This class cannot be inherited.
NotifyIcon()
NotifyIcon(container: IContainer)
"""
def Dispose(self):
""" Dispose(self: NotifyIcon,disposing: bool) """
pass
def GetService(self, *args):
"""
GetService(self: Component,service: Type) -> object
Returns an object that represents a service provided by the System.ComponentModel.Component or
by its System.ComponentModel.Container.
service: A service provided by the System.ComponentModel.Component.
Returns: An System.Object that represents a service provided by the System.ComponentModel.Component,or
null if the System.ComponentModel.Component does not provide the specified service.
"""
pass
def MemberwiseClone(self, *args):
"""
MemberwiseClone(self: MarshalByRefObject,cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity,which will cause the
object to be assigned a new identity when it is marshaled across a remoting boundary. A value of
false is usually appropriate. true to copy the current System.MarshalByRefObject object's
identity to its clone,which will cause remoting client calls to be routed to the remote server
object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def ShowBalloonTip(self, timeout, tipTitle=None, tipText=None, tipIcon=None):
"""
ShowBalloonTip(self: NotifyIcon,timeout: int,tipTitle: str,tipText: str,tipIcon: ToolTipIcon)
Displays a balloon tip with the specified title,text,and icon in the taskbar for the specified
time period.
timeout: The time period,in milliseconds,the balloon tip should display.
tipTitle: The title to display on the balloon tip.
tipText: The text to display on the balloon tip.
tipIcon: One of the System.Windows.Forms.ToolTipIcon values.
ShowBalloonTip(self: NotifyIcon,timeout: int)
Displays a balloon tip in the taskbar for the specified time period.
timeout: The time period,in milliseconds,the balloon tip should display.
"""
pass
def __enter__(self, *args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self, *args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, container=None):
"""
__new__(cls: type)
__new__(cls: type,container: IContainer)
"""
pass
BalloonTipIcon = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the icon to display on the balloon tip associated with the System.Windows.Forms.NotifyIcon.
Get: BalloonTipIcon(self: NotifyIcon) -> ToolTipIcon
Set: BalloonTipIcon(self: NotifyIcon)=value
"""
BalloonTipText = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the text to display on the balloon tip associated with the System.Windows.Forms.NotifyIcon.
Get: BalloonTipText(self: NotifyIcon) -> str
Set: BalloonTipText(self: NotifyIcon)=value
"""
BalloonTipTitle = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the title of the balloon tip displayed on the System.Windows.Forms.NotifyIcon.
Get: BalloonTipTitle(self: NotifyIcon) -> str
Set: BalloonTipTitle(self: NotifyIcon)=value
"""
CanRaiseEvents = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating whether the component can raise an event.
"""
ContextMenu = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the shortcut menu for the icon.
Get: ContextMenu(self: NotifyIcon) -> ContextMenu
Set: ContextMenu(self: NotifyIcon)=value
"""
ContextMenuStrip = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the shortcut menu associated with the System.Windows.Forms.NotifyIcon.
Get: ContextMenuStrip(self: NotifyIcon) -> ContextMenuStrip
Set: ContextMenuStrip(self: NotifyIcon)=value
"""
DesignMode = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode.
"""
Events = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the list of event handlers that are attached to this System.ComponentModel.Component.
"""
Icon = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the current icon.
Get: Icon(self: NotifyIcon) -> Icon
Set: Icon(self: NotifyIcon)=value
"""
Tag = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets an object that contains data about the System.Windows.Forms.NotifyIcon.
Get: Tag(self: NotifyIcon) -> object
Set: Tag(self: NotifyIcon)=value
"""
Text = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the ToolTip text displayed when the mouse pointer rests on a notification area icon.
Get: Text(self: NotifyIcon) -> str
Set: Text(self: NotifyIcon)=value
"""
Visible = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets a value indicating whether the icon is visible in the notification area of the taskbar.
Get: Visible(self: NotifyIcon) -> bool
Set: Visible(self: NotifyIcon)=value
"""
BalloonTipClicked = None
BalloonTipClosed = None
BalloonTipShown = None
Click = None
DoubleClick = None
MouseClick = None
MouseDoubleClick = None
MouseDown = None
MouseMove = None
MouseUp = None
| 23.95302
| 221
| 0.666293
|
class NotifyIcon(Component, IComponent, IDisposable):
"""
Specifies a component that creates an icon in the notification area. This class cannot be inherited.
NotifyIcon()
NotifyIcon(container: IContainer)
"""
def Dispose(self):
""" Dispose(self: NotifyIcon,disposing: bool) """
pass
def GetService(self, *args):
"""
GetService(self: Component,service: Type) -> object
Returns an object that represents a service provided by the System.ComponentModel.Component or
by its System.ComponentModel.Container.
service: A service provided by the System.ComponentModel.Component.
Returns: An System.Object that represents a service provided by the System.ComponentModel.Component,or
null if the System.ComponentModel.Component does not provide the specified service.
"""
pass
def MemberwiseClone(self, *args):
"""
MemberwiseClone(self: MarshalByRefObject,cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity,which will cause the
object to be assigned a new identity when it is marshaled across a remoting boundary. A value of
false is usually appropriate. true to copy the current System.MarshalByRefObject object's
identity to its clone,which will cause remoting client calls to be routed to the remote server
object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def ShowBalloonTip(self, timeout, tipTitle=None, tipText=None, tipIcon=None):
"""
ShowBalloonTip(self: NotifyIcon,timeout: int,tipTitle: str,tipText: str,tipIcon: ToolTipIcon)
Displays a balloon tip with the specified title,text,and icon in the taskbar for the specified
time period.
timeout: The time period,in milliseconds,the balloon tip should display.
tipTitle: The title to display on the balloon tip.
tipText: The text to display on the balloon tip.
tipIcon: One of the System.Windows.Forms.ToolTipIcon values.
ShowBalloonTip(self: NotifyIcon,timeout: int)
Displays a balloon tip in the taskbar for the specified time period.
timeout: The time period,in milliseconds,the balloon tip should display.
"""
pass
def __enter__(self, *args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self, *args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, container=None):
"""
__new__(cls: type)
__new__(cls: type,container: IContainer)
"""
pass
def __str__(self, *args):
pass
BalloonTipIcon = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the icon to display on the balloon tip associated with the System.Windows.Forms.NotifyIcon.
Get: BalloonTipIcon(self: NotifyIcon) -> ToolTipIcon
Set: BalloonTipIcon(self: NotifyIcon)=value
"""
BalloonTipText = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the text to display on the balloon tip associated with the System.Windows.Forms.NotifyIcon.
Get: BalloonTipText(self: NotifyIcon) -> str
Set: BalloonTipText(self: NotifyIcon)=value
"""
BalloonTipTitle = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the title of the balloon tip displayed on the System.Windows.Forms.NotifyIcon.
Get: BalloonTipTitle(self: NotifyIcon) -> str
Set: BalloonTipTitle(self: NotifyIcon)=value
"""
CanRaiseEvents = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating whether the component can raise an event.
"""
ContextMenu = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the shortcut menu for the icon.
Get: ContextMenu(self: NotifyIcon) -> ContextMenu
Set: ContextMenu(self: NotifyIcon)=value
"""
ContextMenuStrip = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the shortcut menu associated with the System.Windows.Forms.NotifyIcon.
Get: ContextMenuStrip(self: NotifyIcon) -> ContextMenuStrip
Set: ContextMenuStrip(self: NotifyIcon)=value
"""
DesignMode = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode.
"""
Events = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the list of event handlers that are attached to this System.ComponentModel.Component.
"""
Icon = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the current icon.
Get: Icon(self: NotifyIcon) -> Icon
Set: Icon(self: NotifyIcon)=value
"""
Tag = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets an object that contains data about the System.Windows.Forms.NotifyIcon.
Get: Tag(self: NotifyIcon) -> object
Set: Tag(self: NotifyIcon)=value
"""
Text = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the ToolTip text displayed when the mouse pointer rests on a notification area icon.
Get: Text(self: NotifyIcon) -> str
Set: Text(self: NotifyIcon)=value
"""
Visible = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets a value indicating whether the icon is visible in the notification area of the taskbar.
Get: Visible(self: NotifyIcon) -> bool
Set: Visible(self: NotifyIcon)=value
"""
BalloonTipClicked = None
BalloonTipClosed = None
BalloonTipShown = None
Click = None
DoubleClick = None
MouseClick = None
MouseDoubleClick = None
MouseDown = None
MouseMove = None
MouseUp = None
| 18
| 0
| 29
|
6219e63f4cbb5f076b00f07b3a8aca7479ab646f
| 1,052
|
py
|
Python
|
Python/DataCampChallenge-main/problem.py
|
BechirTr/Some_Projects
|
ac0d3975f5969903a0fcf67cf72f7d9cae42d272
|
[
"Apache-2.0"
] | null | null | null |
Python/DataCampChallenge-main/problem.py
|
BechirTr/Some_Projects
|
ac0d3975f5969903a0fcf67cf72f7d9cae42d272
|
[
"Apache-2.0"
] | null | null | null |
Python/DataCampChallenge-main/problem.py
|
BechirTr/Some_Projects
|
ac0d3975f5969903a0fcf67cf72f7d9cae42d272
|
[
"Apache-2.0"
] | 1
|
2021-02-02T17:09:04.000Z
|
2021-02-02T17:09:04.000Z
|
import os
import pandas as pd
import rampwf as rw
from sklearn.model_selection import ShuffleSplit
import numpy as np
problem_title = 'Salary prediction'
_target_column_name = 'SalaryUSD'
# A value which will be used to create wrapper objects for y_pred
Predictions = rw.prediction_types.make_regression()
# An object implementing the workflow
workflow = rw.workflows.Estimator()
score_types = [
rw.score_types.RMSE(name='rmse', precision=3),
]
# READ DATA
| 23.909091
| 75
| 0.694867
|
import os
import pandas as pd
import rampwf as rw
from sklearn.model_selection import ShuffleSplit
import numpy as np
problem_title = 'Salary prediction'
_target_column_name = 'SalaryUSD'
# A value which will be used to create wrapper objects for y_pred
Predictions = rw.prediction_types.make_regression()
# An object implementing the workflow
workflow = rw.workflows.Estimator()
score_types = [
rw.score_types.RMSE(name='rmse', precision=3),
]
def get_cv(X, y):
cv = ShuffleSplit(n_splits=3, test_size=0.2, random_state=57)
return cv.split(X, y)
# READ DATA
def _read_data(path, df_filename):
df = pd.read_csv(os.path.join(path, 'data', df_filename), index_col=0)
y = df[_target_column_name]
X = df.drop(_target_column_name, axis=1)
return X, np.log(1+y.values)
def get_train_data(path='.'):
df_filename = 'train.csv'
return _read_data(path, df_filename)
def get_test_data(path='.'):
df_filename = 'test.csv'
return _read_data(path, df_filename)
| 458
| 0
| 100
|
f9f1f120411b015f12b61e2e79f925200ac0f016
| 6,008
|
py
|
Python
|
kochira/services/core/logger.py
|
nattofriends/kochira
|
8c41f9936ed588a8acc365a2829f06521388e55b
|
[
"MS-PL"
] | null | null | null |
kochira/services/core/logger.py
|
nattofriends/kochira
|
8c41f9936ed588a8acc365a2829f06521388e55b
|
[
"MS-PL"
] | 1
|
2019-05-13T22:02:18.000Z
|
2019-05-13T22:02:18.000Z
|
kochira/services/core/logger.py
|
nattofriends/kochira
|
8c41f9936ed588a8acc365a2829f06521388e55b
|
[
"MS-PL"
] | 1
|
2019-05-13T21:22:02.000Z
|
2019-05-13T21:22:02.000Z
|
"""
IRC message logger.
Enables logging of messages to flat files.
"""
import threading
from datetime import datetime
from kochira import config
from kochira.service import Service, Config
from pathlib import Path
service = Service(__name__, __doc__)
@service.config
@service.setup
@service.shutdown
@service.hook("sighup")
@service.hook("own_message", priority=10000)
@service.hook("own_notice", priority=10000)
@service.hook("invite", priority=10000)
@service.hook("join", priority=10000)
@service.hook("kill", priority=10000)
@service.hook("kick", priority=10000)
@service.hook("mode_change", priority=10000)
@service.hook("channel_message", priority=10000)
@service.hook("private_message", priority=10000)
@service.hook("nick_change", priority=10000)
@service.hook("channel_notice", priority=10000)
@service.hook("private_notice", priority=10000)
@service.hook("part", priority=10000)
@service.hook("topic_change", priority=10000)
@service.hook("quit", priority=10000)
@service.hook("ctcp_action", priority=10000)
| 27.18552
| 85
| 0.651298
|
"""
IRC message logger.
Enables logging of messages to flat files.
"""
import threading
from datetime import datetime
from kochira import config
from kochira.service import Service, Config
from pathlib import Path
service = Service(__name__, __doc__)
@service.config
class Config(Config):
log_dir = config.Field(doc="Path to the log directory.", default="logs")
def _is_log_open(ctx, channel):
return (ctx.client.name, channel) in ctx.storage.handles
def _get_file_handle(ctx, channel):
k = (ctx.client.name, channel)
if k not in ctx.storage.handles:
client_name_path = ctx.storage.path / ctx.client.name
if not client_name_path.exists():
client_name_path.mkdir(parents=True)
path = client_name_path / (channel + ".log")
f = path.open("ab")
service.logger.debug("Opened handle for: %s", path)
ctx.storage.handles[k] = f
return ctx.storage.handles[k]
def _hostmask_for(client, nickname):
user = client.users.get(nickname, {})
username = user.get("username") or ""
hostname = user.get("hostname") or ""
if not username and not hostname:
return ""
return "{username}@{hostname}".format(username=username,
hostname=hostname)
def log(ctx, channel, what):
now = datetime.utcnow()
f = _get_file_handle(ctx, channel)
with ctx.storage.lock:
f.write(("{now} {what}\n".format(
now=now.isoformat(),
what=what
)).encode("utf-8"))
f.flush()
def log_message(ctx, target, origin, message, format):
sigil = " "
if target in ctx.client.channels:
for sigil2, mode in ctx.client._nickname_prefixes.items():
if origin in ctx.client.channels[target]["modes"].get(mode, []):
sigil = sigil2
log(ctx, target, format.format(sigil=sigil, origin=origin,
message=message))
def log_global(ctx, origin, what):
for channel, info in ctx.client.channels.items():
if origin in info["users"]:
log(ctx, channel, what)
if _is_log_open(ctx, origin):
log(ctx, origin, what)
def close_all_handles(storage):
with storage.lock:
for f in storage.handles.values():
f.close()
storage.handles = {}
service.logger.debug("Log handles closed")
@service.setup
def setup_logger(ctx):
ctx.storage.handles = {}
ctx.storage.path = Path(ctx.config.log_dir)
ctx.storage.lock = threading.Lock()
@service.shutdown
def shutdown_logger(ctx):
close_all_handles(ctx.storage)
@service.hook("sighup")
def flush_log_handles(ctx):
close_all_handles(ctx.storage)
@service.hook("own_message", priority=10000)
def on_own_message(ctx, target, message):
on_channel_message(ctx, target, ctx.client.nickname, message)
@service.hook("own_notice", priority=10000)
def on_own_notice(ctx, target, message):
on_channel_notice(ctx, target, ctx.client.nickname, message)
@service.hook("invite", priority=10000)
def on_invite(ctx, target, origin):
log(ctx, origin, "-!- {origin} [{hostmask}] is inviting you to {channel}".format(
origin=origin,
hostmask=_hostmask_for(ctx.client, origin),
channel=target))
@service.hook("join", priority=10000)
def on_join(ctx, target, origin):
log(ctx, target, "--> {origin} [{hostmask}] joined".format(
origin=origin,
hostmask=_hostmask_for(ctx.client, origin)))
@service.hook("kill", priority=10000)
def on_kill(ctx, target, by, message=None):
log_global(ctx, target, "<== {target} was killed by {by}: {message}".format(
target=target,
by=by,
message=message or ""))
@service.hook("kick", priority=10000)
def on_kick(ctx, channel, target, by, message=None):
log(ctx, channel, "<-- {target} was kicked by {by}: {message}".format(
target=target,
by=by,
message=message or ""))
@service.hook("mode_change", priority=10000)
def on_mode_change(ctx, channel, modes, by):
log(ctx, channel, "-!- {by} set modes: {modes}".format(
by=by,
modes=" ".join(modes)))
@service.hook("channel_message", priority=10000)
def on_channel_message(ctx, target, origin, message):
log_message(ctx, target, origin, message, "<{sigil}{origin}> {message}")
@service.hook("private_message", priority=10000)
def on_private_message(ctx, origin, message):
on_channel_message(ctx, origin, origin, message)
@service.hook("nick_change", priority=10000)
def on_nick_change(ctx, old, new):
what = "-!- {old} is now known as {new}".format(old=old, new=new)
log_global(ctx, new, what)
log_global(ctx, old, what)
@service.hook("channel_notice", priority=10000)
def on_channel_notice(ctx, target, origin, message):
log_message(ctx, target, origin, message, "-{sigil}{origin}- {message}")
@service.hook("private_notice", priority=10000)
def on_private_notice(ctx, origin, message):
on_channel_notice(ctx, origin, origin, message)
@service.hook("part", priority=10000)
def on_part(ctx, target, origin, message=None):
log(ctx, target, "<-- {origin} parted: {message}".format(
origin=origin,
message=message or ""))
@service.hook("topic_change", priority=10000)
def on_topic_change(ctx, target, message, by):
log(ctx, target, "-!- {by} changed the topic: {message}".format(
by=by,
message=message))
@service.hook("quit", priority=10000)
def on_quit(ctx, origin, message=None):
log_global(ctx, origin, "<== {origin} [{hostmask}] quit: {message}".format(
origin=origin,
hostmask=_hostmask_for(ctx.client, origin),
message=message or ""))
@service.hook("ctcp_action", priority=10000)
def on_ctcp_action(ctx, origin, target, message):
if target == ctx.client.nickname:
target = origin
log_message(ctx, target, origin, message, " * {sigil}{origin} {message}")
| 4,261
| 77
| 601
|
c1339e18a7f733a5c9225797b9a9145e7b651fbb
| 4,560
|
py
|
Python
|
tests/test_classifier_api.py
|
pmatigakis/classifier
|
b37c89df947f26a8001aa7a51dad86af1921ed19
|
[
"MIT"
] | 1
|
2020-01-07T00:05:09.000Z
|
2020-01-07T00:05:09.000Z
|
tests/test_classifier_api.py
|
pmatigakis/classifier
|
b37c89df947f26a8001aa7a51dad86af1921ed19
|
[
"MIT"
] | 2
|
2019-12-20T19:02:37.000Z
|
2019-12-20T19:02:38.000Z
|
tests/test_classifier_api.py
|
pmatigakis/classifier
|
b37c89df947f26a8001aa7a51dad86af1921ed19
|
[
"MIT"
] | 1
|
2017-05-24T19:56:08.000Z
|
2017-05-24T19:56:08.000Z
|
import json
from unittest import main
from unittest.mock import patch
from common import ClassifierTestCaseWithMockClassifiers
from classifier import __VERSION__
from classifier.ml import Classifier
if __name__ == "__main__":
main()
| 25.333333
| 75
| 0.557237
|
import json
from unittest import main
from unittest.mock import patch
from common import ClassifierTestCaseWithMockClassifiers
from classifier import __VERSION__
from classifier.ml import Classifier
class ClassificationEndpointTests(ClassifierTestCaseWithMockClassifiers):
def test_probability_classifier(self):
client = self.app.test_client()
data = {
"data": [[5.4, 3.0, 4.5, 1.5]]
}
headers = {
"Content-Type": "application/json",
}
response = client.post(
"/api/v1/predict/iris_probabilities",
data=json.dumps(data),
headers=headers
)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode())
self.assertIn("results", data)
self.assertEqual(len(data["results"]), 1)
self.assertEqual(len(data["results"][0]), 3)
self.assertAlmostEqual(
data["results"][0]["Iris-setosa"], 0.01031, 5)
self.assertAlmostEqual(
data["results"][0]["Iris-versicolor"], 0.36503, 5)
self.assertAlmostEqual(
data["results"][0]["Iris-virginica"], 0.62465, 5)
def test_classifier(self):
client = self.app.test_client()
data = {
"data": [[5.4, 3.0, 4.5, 1.5]]
}
headers = {
"Content-Type": "application/json",
}
response = client.post(
"/api/v1/predict/iris", data=json.dumps(data), headers=headers)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode())
self.assertDictEqual(
data,
{
"results": ["Iris-virginica"]
}
)
def test_classifier_does_not_exist(self):
client = self.app.test_client()
data = {
"data": [[5.4, 3.0, 4.5, 1.5]]
}
headers = {
"Content-Type": "application/json",
}
response = client.post(
"/api/v1/predict/fail_test",
data=json.dumps(data),
headers=headers
)
self.assertEqual(response.status_code, 404)
data = json.loads(response.data.decode())
self.assertDictEqual(
data,
{
"classifier": "fail_test",
"error": "unknown classifier"
}
)
@patch.object(Classifier, "classify")
def test_error_while_running_classification(self, classify_mock):
classify_mock.side_effect = Exception
client = self.app.test_client()
data = {
"data": [[5.4, 3.0, 4.5, 1.5]]
}
headers = {
"Content-Type": "application/json",
}
response = client.post(
"/api/v1/predict/iris", data=json.dumps(data), headers=headers)
self.assertEqual(response.status_code, 500)
data = json.loads(response.data.decode())
self.assertDictEqual(
data,
{
"error": "failed to classify object",
"classifier": "iris"
}
)
class ClassifiersResourceTests(ClassifierTestCaseWithMockClassifiers):
def test_get_available_classifiers(self):
client = self.app.test_client()
response = client.get("/api/v1/classifiers")
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode())
self.assertCountEqual(
data,
[
'iris',
'iris_probabilities'
]
)
class HealthResourceTests(ClassifierTestCaseWithMockClassifiers):
def test_get_health(self):
client = self.app.test_client()
response = client.get("/service/health")
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode())
self.assertDictEqual(data, {"result": "ok"})
class InformationResourceTests(ClassifierTestCaseWithMockClassifiers):
def test_get_information(self):
client = self.app.test_client()
response = client.get("/service/information")
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode())
self.assertDictEqual(
data,
{
"host": "127.0.0.1",
"port": 8022,
"service": "classifier",
"version": __VERSION__
}
)
if __name__ == "__main__":
main()
| 3,803
| 343
| 170
|
537e3e050876d7c9d2b8f2ecca7ee3f33f63f223
| 7,246
|
py
|
Python
|
utils.py
|
lobachevzky/on-policy-curiosity
|
1d1e9c05b04d313d15e698fb8a05c3116085381f
|
[
"MIT"
] | 1
|
2021-11-22T09:43:13.000Z
|
2021-11-22T09:43:13.000Z
|
utils.py
|
lobachevzky/on-policy-curiosity
|
1d1e9c05b04d313d15e698fb8a05c3116085381f
|
[
"MIT"
] | 2
|
2019-12-17T04:05:25.000Z
|
2020-03-18T18:38:31.000Z
|
utils.py
|
lobachevzky/on-policy-curiosity
|
1d1e9c05b04d313d15e698fb8a05c3116085381f
|
[
"MIT"
] | 1
|
2020-05-29T15:14:36.000Z
|
2020-05-29T15:14:36.000Z
|
# third party
import argparse
import csv
import random
import re
import subprocess
from dataclasses import fields, is_dataclass
from functools import reduce
from io import StringIO
from typing import List, Optional
import numpy as np
import torch
import torch.jit
import torch.nn as nn
from gym import spaces
import gym
# Necessary for my KFAC implementation.
# https://github.com/openai/baselines/blob/master/baselines/common/tf_util.py#L87
@torch.jit.script
RESET = "\033[0m"
def hierarchical_parse_args(parser: argparse.ArgumentParser, include_positional=False):
"""
:return:
{
group1: {**kwargs}
group2: {**kwargs}
...
**kwargs
}
"""
args = parser.parse_args()
positional = list(get_positionals(parser._action_groups))
nonpositional = dict(get_nonpositionals(parser._action_groups))
optional = nonpositional.pop("optional arguments")
nonpositional = {**nonpositional, **optional}
if include_positional:
return positional, nonpositional
return nonpositional
| 26.542125
| 87
| 0.635109
|
# third party
import argparse
import csv
import random
import re
import subprocess
from dataclasses import fields, is_dataclass
from functools import reduce
from io import StringIO
from typing import List, Optional
import numpy as np
import torch
import torch.jit
import torch.nn as nn
from gym import spaces
import gym
def round(x, dec):
return torch.round(x * 10 ** dec) / 10 ** dec
def grad(x, y):
return torch.autograd.grad(
x.mean(), y.parameters() if isinstance(y, nn.Module) else y, retain_graph=True
)
def get_render_func(venv):
if hasattr(venv, "envs"):
return venv.envs[0].render
elif hasattr(venv, "venv"):
return get_render_func(venv.venv)
elif hasattr(venv, "env"):
return get_render_func(venv.env)
return None
# Necessary for my KFAC implementation.
class AddBias(nn.Module):
def __init__(self, bias):
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1))
def forward(self, x):
if x.dim() == 2:
bias = self._bias.t().view(1, -1)
else:
bias = self._bias.t().view(1, -1, 1, 1)
return x + bias
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
# https://github.com/openai/baselines/blob/master/baselines/common/tf_util.py#L87
def init_normc_(weight, gain=1):
weight.normal_(0, 1)
weight *= gain / torch.sqrt(weight.pow(2).sum(1, keepdim=True))
def set_index(array, idxs, value):
idxs = np.array(idxs)
if idxs.size > 0:
array[tuple(idxs.T)] = value
def get_index(array, idxs):
idxs = np.array(idxs)
if idxs.size == 0:
return np.array([], array.dtype)
return array[tuple(idxs.T)]
def get_n_gpu():
nvidia_smi = subprocess.check_output(
"nvidia-smi --format=csv --query-gpu=memory.free".split(),
universal_newlines=True,
)
return len(list(csv.reader(StringIO(nvidia_smi)))) - 1
def get_random_gpu():
return random.randrange(0, get_n_gpu())
def get_freer_gpu():
nvidia_smi = subprocess.check_output(
"nvidia-smi --format=csv --query-gpu=memory.free".split(),
universal_newlines=True,
)
free_memory = [
float(x[0].split()[0])
for i, x in enumerate(csv.reader(StringIO(nvidia_smi)))
if i > 0
]
return int(np.argmax(free_memory))
def init_(network: nn.Module, non_linearity: nn.Module = nn.ReLU):
if non_linearity is None:
return init(network, init_normc_, lambda x: nn.init.constant_(x, 0))
# return init(network, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0))
return init(
network,
nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0),
nn.init.calculate_gain(non_linearity.__name__.lower()),
)
def broadcast3d(inputs, shape):
return inputs.view(*inputs.shape, 1, 1).expand(*inputs.shape, *shape)
def interp(x1, x2, c):
return c * x2 + (1 - c) * x1
@torch.jit.script
def log_prob(i, probs):
return torch.log(torch.gather(probs, -1, i))
def trace(module_fn, in_size):
return torch.jit.trace(module_fn(in_size), example_inputs=torch.rand(1, in_size))
RESET = "\033[0m"
def k_scalar_pairs(*args, **kwargs):
for k, v in dict(*args, **kwargs).items():
mean = np.mean(v)
if not np.isnan(mean):
yield k, mean
def set_seeds(cuda, cuda_deterministic, seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
cuda &= torch.cuda.is_available()
if cuda and cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_num_threads(1)
def hierarchical_parse_args(parser: argparse.ArgumentParser, include_positional=False):
"""
:return:
{
group1: {**kwargs}
group2: {**kwargs}
...
**kwargs
}
"""
args = parser.parse_args()
def key_value_pairs(group):
for action in group._group_actions:
if action.dest != "help":
yield action.dest, getattr(args, action.dest, None)
def get_positionals(groups):
for group in groups:
if group.title == "positional arguments":
for k, v in key_value_pairs(group):
yield v
def get_nonpositionals(groups: List[argparse._ArgumentGroup]):
for group in groups:
if group.title != "positional arguments":
children = key_value_pairs(group)
descendants = get_nonpositionals(group._action_groups)
yield group.title, {**dict(children), **dict(descendants)}
positional = list(get_positionals(parser._action_groups))
nonpositional = dict(get_nonpositionals(parser._action_groups))
optional = nonpositional.pop("optional arguments")
nonpositional = {**nonpositional, **optional}
if include_positional:
return positional, nonpositional
return nonpositional
def get_device(name):
match = re.search("\d+$", name)
if match:
device_num = int(match.group()) % get_n_gpu()
else:
device_num = get_random_gpu()
return torch.device("cuda", device_num)
def astuple(obj):
def gen():
for f in fields(obj):
yield astuple(getattr(obj, f.name))
if is_dataclass(obj):
return tuple(gen())
return obj
def asdict(obj):
def gen():
for f in fields(obj):
yield f.name, asdict(getattr(obj, f.name))
if hasattr(obj, "_asdict"):
# noinspection PyProtectedMember
return obj._asdict()
if is_dataclass(obj):
return dict(gen())
return obj
class Discrete(spaces.Discrete):
def __init__(self, low: int, high: int):
self.low = low
self.high = high
super().__init__(1 + high - low)
def sample(self) -> int:
return self.low + super().sample()
def contains(self, x) -> bool:
return super().contains(x - self.low)
def __repr__(self) -> str:
return f"Discrete({self.low}, {self.high})"
def __eq__(self, other) -> bool:
return (
isinstance(other, Discrete)
and self.low == other.low
and self.high == other.high
)
def get_max_shape(*xs) -> np.ndarray:
def compare_shape(max_so_far: Optional[np.ndarray], opener: np.ndarray):
new = np.array(opener.shape)
return new if max_so_far is None else np.maximum(new, max_so_far)
return reduce(compare_shape, map(np.array, xs), None)
def space_shape(space: gym.Space):
if isinstance(space, gym.spaces.Box):
return space.low.shape
if isinstance(space, gym.spaces.Dict):
return {k: space_shape(v) for k, v in space.spaces.items()}
if isinstance(space, gym.spaces.Tuple):
return tuple(space_shape(s) for s in space.spaces)
if isinstance(space, gym.spaces.MultiDiscrete):
return space.nvec.shape
if isinstance(space, gym.spaces.Discrete):
return (1,)
if isinstance(space, gym.spaces.MultiBinary):
return (space.n,)
raise NotImplementedError
| 5,332
| 15
| 817
|
3e3b84b9bfcb2d82c9de1b00bbb595e034cc5661
| 28,845
|
py
|
Python
|
flatsat/comms/user_segment/radio/flowgraphs/unit_test/test_flatsat_rx.py
|
cromulencellc/hackasat-final-2021
|
d01a1b5d7947b3e41ae2da3ec63d5f43278a5eac
|
[
"MIT"
] | 4
|
2022-02-25T05:45:27.000Z
|
2022-03-10T01:05:27.000Z
|
flatsat/comms/user_segment/radio/flowgraphs/unit_test/test_flatsat_rx.py
|
cromulencellc/hackasat-final-2021
|
d01a1b5d7947b3e41ae2da3ec63d5f43278a5eac
|
[
"MIT"
] | null | null | null |
flatsat/comms/user_segment/radio/flowgraphs/unit_test/test_flatsat_rx.py
|
cromulencellc/hackasat-final-2021
|
d01a1b5d7947b3e41ae2da3ec63d5f43278a5eac
|
[
"MIT"
] | 2
|
2022-03-02T02:14:16.000Z
|
2022-03-05T07:36:18.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: GPL-3.0
#
# GNU Radio Python Flow Graph
# Title: Flatsat RX Test
# Author: dev
# GNU Radio version: 3.9.3.0
from distutils.version import StrictVersion
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print("Warning: failed to XInitThreads()")
import os
import sys
sys.path.append(os.environ.get('GRC_HIER_PATH', os.path.expanduser('~/.grc_gnuradio')))
from PyQt5 import Qt
from gnuradio import eng_notation
from gnuradio import qtgui
from gnuradio.filter import firdes
import sip
from flatsat_rx import flatsat_rx # grc-generated hier_block
from gnuradio import analog
from gnuradio import blocks
import pmt
from gnuradio import digital
from gnuradio import filter
from gnuradio import gr
from gnuradio.fft import window
import signal
from argparse import ArgumentParser
from gnuradio.eng_arg import eng_float, intx
from gnuradio import uhd
import time
from gnuradio.filter import pfb
from gnuradio.qtgui import Range, RangeWidget
from PyQt5 import QtCore
from usersegment_tx import usersegment_tx # grc-generated hier_block
import satellites.hier
import test_flatsat_rx_epy_block_0_0_0_0_0_0 as epy_block_0_0_0_0_0_0 # embedded python block
from gnuradio import qtgui
if __name__ == '__main__':
main()
| 42.988077
| 154
| 0.668123
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: GPL-3.0
#
# GNU Radio Python Flow Graph
# Title: Flatsat RX Test
# Author: dev
# GNU Radio version: 3.9.3.0
from distutils.version import StrictVersion
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print("Warning: failed to XInitThreads()")
import os
import sys
sys.path.append(os.environ.get('GRC_HIER_PATH', os.path.expanduser('~/.grc_gnuradio')))
from PyQt5 import Qt
from gnuradio import eng_notation
from gnuradio import qtgui
from gnuradio.filter import firdes
import sip
from flatsat_rx import flatsat_rx # grc-generated hier_block
from gnuradio import analog
from gnuradio import blocks
import pmt
from gnuradio import digital
from gnuradio import filter
from gnuradio import gr
from gnuradio.fft import window
import signal
from argparse import ArgumentParser
from gnuradio.eng_arg import eng_float, intx
from gnuradio import uhd
import time
from gnuradio.filter import pfb
from gnuradio.qtgui import Range, RangeWidget
from PyQt5 import QtCore
from usersegment_tx import usersegment_tx # grc-generated hier_block
import satellites.hier
import test_flatsat_rx_epy_block_0_0_0_0_0_0 as epy_block_0_0_0_0_0_0 # embedded python block
from gnuradio import qtgui
class test_flatsat_rx(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "Flatsat RX Test", catch_exceptions=True)
Qt.QWidget.__init__(self)
self.setWindowTitle("Flatsat RX Test")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "test_flatsat_rx")
try:
if StrictVersion(Qt.qVersion()) < StrictVersion("5.0.0"):
self.restoreGeometry(self.settings.value("geometry").toByteArray())
else:
self.restoreGeometry(self.settings.value("geometry"))
except:
pass
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 1e6
self.tx_samp_rate = tx_samp_rate = samp_rate
self.num_chan = num_chan = 18
self.freq = freq = 901e6
self.tx_samp_rate_0 = tx_samp_rate_0 = samp_rate
self.tx_gain = tx_gain = 70
self.tx_freq = tx_freq = freq
self.timing_bw = timing_bw = .045
self.strobe_delay = strobe_delay = 1000
self.squelch = squelch = -70
self.sps = sps = 4
self.rx_switch = rx_switch = 0
self.rx_samp_rate_0 = rx_samp_rate_0 = samp_rate
self.rx_samp_rate = rx_samp_rate = samp_rate
self.rx_gain = rx_gain = 0
self.rx_freq = rx_freq = freq
self.lpf_taps = lpf_taps = firdes.low_pass(1.0, samp_rate, samp_rate/num_chan*0.8,5e3, window.WIN_HAMMING, 6.76)
self.eb = eb = .22
self.deviation = deviation = 1.5
self.damping = damping = 1.0
self.constel = constel = digital.constellation_bpsk().base()
self.constel.gen_soft_dec_lut(8)
self.chan_samp_rate = chan_samp_rate = tx_samp_rate/(num_chan)
self.chan_cutoff = chan_cutoff = samp_rate/num_chan*0.8
##################################################
# Blocks
##################################################
self._tx_gain_tool_bar = Qt.QToolBar(self)
self._tx_gain_tool_bar.addWidget(Qt.QLabel('TX Gain' + ": "))
self._tx_gain_line_edit = Qt.QLineEdit(str(self.tx_gain))
self._tx_gain_tool_bar.addWidget(self._tx_gain_line_edit)
self._tx_gain_line_edit.returnPressed.connect(
lambda: self.set_tx_gain(int(str(self._tx_gain_line_edit.text()))))
self.top_grid_layout.addWidget(self._tx_gain_tool_bar, 10, 0, 1, 1)
for r in range(10, 11):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 1):
self.top_grid_layout.setColumnStretch(c, 1)
self._timing_bw_range = Range(0.001, 0.1, 0.001, .045, 200)
self._timing_bw_win = RangeWidget(self._timing_bw_range, self.set_timing_bw, 'Timing BW', "counter_slider", float, QtCore.Qt.Horizontal)
self.top_layout.addWidget(self._timing_bw_win)
self.tab1 = Qt.QTabWidget()
self.tab1_widget_0 = Qt.QWidget()
self.tab1_layout_0 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.tab1_widget_0)
self.tab1_grid_layout_0 = Qt.QGridLayout()
self.tab1_layout_0.addLayout(self.tab1_grid_layout_0)
self.tab1.addTab(self.tab1_widget_0, 'TX')
self.tab1_widget_1 = Qt.QWidget()
self.tab1_layout_1 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.tab1_widget_1)
self.tab1_grid_layout_1 = Qt.QGridLayout()
self.tab1_layout_1.addLayout(self.tab1_grid_layout_1)
self.tab1.addTab(self.tab1_widget_1, 'RX')
self.top_grid_layout.addWidget(self.tab1, 0, 0, 6, 6)
for r in range(0, 6):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 6):
self.top_grid_layout.setColumnStretch(c, 1)
self.tab0 = Qt.QTabWidget()
self.tab0_widget_0 = Qt.QWidget()
self.tab0_layout_0 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.tab0_widget_0)
self.tab0_grid_layout_0 = Qt.QGridLayout()
self.tab0_layout_0.addLayout(self.tab0_grid_layout_0)
self.tab0.addTab(self.tab0_widget_0, 'AGC')
self.tab0_widget_1 = Qt.QWidget()
self.tab0_layout_1 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.tab0_widget_1)
self.tab0_grid_layout_1 = Qt.QGridLayout()
self.tab0_layout_1.addLayout(self.tab0_grid_layout_1)
self.tab0.addTab(self.tab0_widget_1, 'Correlation')
self.tab0_widget_2 = Qt.QWidget()
self.tab0_layout_2 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.tab0_widget_2)
self.tab0_grid_layout_2 = Qt.QGridLayout()
self.tab0_layout_2.addLayout(self.tab0_grid_layout_2)
self.tab0.addTab(self.tab0_widget_2, 'Timing')
self.tab0_widget_3 = Qt.QWidget()
self.tab0_layout_3 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.tab0_widget_3)
self.tab0_grid_layout_3 = Qt.QGridLayout()
self.tab0_layout_3.addLayout(self.tab0_grid_layout_3)
self.tab0.addTab(self.tab0_widget_3, 'Equalizer')
self.tab0_widget_4 = Qt.QWidget()
self.tab0_layout_4 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.tab0_widget_4)
self.tab0_grid_layout_4 = Qt.QGridLayout()
self.tab0_layout_4.addLayout(self.tab0_grid_layout_4)
self.tab0.addTab(self.tab0_widget_4, 'Costas')
self.tab0_widget_5 = Qt.QWidget()
self.tab0_layout_5 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.tab0_widget_5)
self.tab0_grid_layout_5 = Qt.QGridLayout()
self.tab0_layout_5.addLayout(self.tab0_grid_layout_5)
self.tab0.addTab(self.tab0_widget_5, 'Tab 5')
self.top_grid_layout.addWidget(self.tab0, 0, 6, 6, 6)
for r in range(0, 6):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(6, 12):
self.top_grid_layout.setColumnStretch(c, 1)
self._strobe_delay_range = Range(1, 1000, 1, 1000, 200)
self._strobe_delay_win = RangeWidget(self._strobe_delay_range, self.set_strobe_delay, 'Strobe Delay', "counter_slider", int, QtCore.Qt.Horizontal)
self.top_layout.addWidget(self._strobe_delay_win)
self._squelch_range = Range(-100, 0, 1, -70, 200)
self._squelch_win = RangeWidget(self._squelch_range, self.set_squelch, 'Power Squelch', "counter_slider", float, QtCore.Qt.Horizontal)
self.top_layout.addWidget(self._squelch_win)
self._rx_switch_range = Range(0, 1, 1, 0, 200)
self._rx_switch_win = RangeWidget(self._rx_switch_range, self.set_rx_switch, 'RX On/Off', "dial", float, QtCore.Qt.Horizontal)
self.top_grid_layout.addWidget(self._rx_switch_win, 14, 0, 1, 1)
for r in range(14, 15):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 1):
self.top_grid_layout.setColumnStretch(c, 1)
self._rx_gain_tool_bar = Qt.QToolBar(self)
self._rx_gain_tool_bar.addWidget(Qt.QLabel('RX Gain' + ": "))
self._rx_gain_line_edit = Qt.QLineEdit(str(self.rx_gain))
self._rx_gain_tool_bar.addWidget(self._rx_gain_line_edit)
self._rx_gain_line_edit.returnPressed.connect(
lambda: self.set_rx_gain(int(str(self._rx_gain_line_edit.text()))))
self.top_grid_layout.addWidget(self._rx_gain_tool_bar, 11, 0, 1, 1)
for r in range(11, 12):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 1):
self.top_grid_layout.setColumnStretch(c, 1)
self._deviation_range = Range(0.1, 2, 0.1, 1.5, 200)
self._deviation_win = RangeWidget(self._deviation_range, self.set_deviation, 'Deviation', "counter_slider", float, QtCore.Qt.Horizontal)
self.top_layout.addWidget(self._deviation_win)
self._damping_range = Range(0.1, 2, 0.1, 1.0, 200)
self._damping_win = RangeWidget(self._damping_range, self.set_damping, 'Damping', "counter_slider", float, QtCore.Qt.Horizontal)
self.top_layout.addWidget(self._damping_win)
self.usersegment_tx_0 = usersegment_tx()
self.usersegment_tx_0.set_max_output_buffer(4096)
self.uhd_usrp_source_0 = uhd.usrp_source(
",".join(("", "")),
uhd.stream_args(
cpu_format="fc32",
args='',
channels=list(range(0,1)),
),
)
self.uhd_usrp_source_0.set_subdev_spec('A:B', 0)
self.uhd_usrp_source_0.set_samp_rate(rx_samp_rate)
self.uhd_usrp_source_0.set_time_now(uhd.time_spec(time.time()), uhd.ALL_MBOARDS)
self.uhd_usrp_source_0.set_center_freq(rx_freq, 0)
self.uhd_usrp_source_0.set_antenna('RX2', 0)
self.uhd_usrp_source_0.set_bandwidth(rx_samp_rate, 0)
self.uhd_usrp_source_0.set_gain(rx_gain, 0)
self.uhd_usrp_sink_0 = uhd.usrp_sink(
",".join(("", "")),
uhd.stream_args(
cpu_format="fc32",
args='',
channels=list(range(0,1)),
),
'',
)
self.uhd_usrp_sink_0.set_samp_rate(tx_samp_rate)
self.uhd_usrp_sink_0.set_time_now(uhd.time_spec(time.time()), uhd.ALL_MBOARDS)
self.uhd_usrp_sink_0.set_center_freq(tx_freq, 0)
self.uhd_usrp_sink_0.set_antenna('TX/RX', 0)
self.uhd_usrp_sink_0.set_gain(tx_gain, 0)
self.satellites_rms_agc_0 = satellites.hier.rms_agc(alpha=1e-2, reference=1.0)
self.qtgui_sink_x_1 = qtgui.sink_c(
1024, #fftsize
window.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
'TX', #name
True, #plotfreq
True, #plotwaterfall
True, #plottime
True, #plotconst
None # parent
)
self.qtgui_sink_x_1.set_update_time(1.0/10)
self._qtgui_sink_x_1_win = sip.wrapinstance(self.qtgui_sink_x_1.qwidget(), Qt.QWidget)
self.qtgui_sink_x_1.enable_rf_freq(False)
self.tab1_layout_0.addWidget(self._qtgui_sink_x_1_win)
self.qtgui_sink_x_0_0_0_0_1 = qtgui.sink_c(
1024*4, #fftsize
window.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
chan_samp_rate, #bw
'Costas', #name
True, #plotfreq
False, #plotwaterfall
True, #plottime
True, #plotconst
None # parent
)
self.qtgui_sink_x_0_0_0_0_1.set_update_time(1.0/10)
self._qtgui_sink_x_0_0_0_0_1_win = sip.wrapinstance(self.qtgui_sink_x_0_0_0_0_1.qwidget(), Qt.QWidget)
self.qtgui_sink_x_0_0_0_0_1.enable_rf_freq(False)
self.tab0_layout_4.addWidget(self._qtgui_sink_x_0_0_0_0_1_win)
self.qtgui_sink_x_0_0_0_0 = qtgui.sink_c(
1024*4, #fftsize
window.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
chan_samp_rate, #bw
'Equalizer', #name
True, #plotfreq
False, #plotwaterfall
True, #plottime
True, #plotconst
None # parent
)
self.qtgui_sink_x_0_0_0_0.set_update_time(1.0/10)
self._qtgui_sink_x_0_0_0_0_win = sip.wrapinstance(self.qtgui_sink_x_0_0_0_0.qwidget(), Qt.QWidget)
self.qtgui_sink_x_0_0_0_0.enable_rf_freq(False)
self.tab0_layout_3.addWidget(self._qtgui_sink_x_0_0_0_0_win)
self.qtgui_sink_x_0_0_0 = qtgui.sink_c(
1024*4, #fftsize
window.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
chan_samp_rate, #bw
'Timing', #name
True, #plotfreq
False, #plotwaterfall
True, #plottime
True, #plotconst
None # parent
)
self.qtgui_sink_x_0_0_0.set_update_time(1.0/10)
self._qtgui_sink_x_0_0_0_win = sip.wrapinstance(self.qtgui_sink_x_0_0_0.qwidget(), Qt.QWidget)
self.qtgui_sink_x_0_0_0.enable_rf_freq(False)
self.tab0_layout_2.addWidget(self._qtgui_sink_x_0_0_0_win)
self.qtgui_sink_x_0_0 = qtgui.sink_c(
1024*4, #fftsize
window.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
chan_samp_rate, #bw
'AGC', #name
True, #plotfreq
False, #plotwaterfall
True, #plottime
True, #plotconst
None # parent
)
self.qtgui_sink_x_0_0.set_update_time(1.0/10)
self._qtgui_sink_x_0_0_win = sip.wrapinstance(self.qtgui_sink_x_0_0.qwidget(), Qt.QWidget)
self.qtgui_sink_x_0_0.enable_rf_freq(False)
self.tab0_layout_0.addWidget(self._qtgui_sink_x_0_0_win)
self.qtgui_sink_x_0 = qtgui.sink_c(
1024*32, #fftsize
window.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
'Wideband RX', #name
True, #plotfreq
True, #plotwaterfall
True, #plottime
True, #plotconst
None # parent
)
self.qtgui_sink_x_0.set_update_time(1.0/10)
self._qtgui_sink_x_0_win = sip.wrapinstance(self.qtgui_sink_x_0.qwidget(), Qt.QWidget)
self.qtgui_sink_x_0.enable_rf_freq(False)
self.tab1_layout_1.addWidget(self._qtgui_sink_x_0_win)
self.pfb_synthesizer_ccf_0 = filter.pfb_synthesizer_ccf(
num_chan,
lpf_taps,
False)
self.pfb_synthesizer_ccf_0.set_channel_map([0,10,12,14,16,1,3,5,7,11,13,15,17,2,4,6,8,9])
self.pfb_synthesizer_ccf_0.declare_sample_delay(0)
self.pfb_channelizer_ccf_0 = pfb.channelizer_ccf(
num_chan,
lpf_taps,
1,
100)
self.pfb_channelizer_ccf_0.set_channel_map([0,10,12,14,16,1,3,5,7,11,13,15,17,2,4,6,8,9])
self.pfb_channelizer_ccf_0.declare_sample_delay(0)
self.flatsat_rx_0 = flatsat_rx(
damping=damping,
deviation=deviation,
timing_bw=timing_bw,
)
self.epy_block_0_0_0_0_0_0 = epy_block_0_0_0_0_0_0.blk(enabled=rx_switch)
self.blocks_socket_pdu_0_0_2_2 = blocks.socket_pdu('UDP_SERVER', '', '3008', 10000, False)
self.blocks_socket_pdu_0_0_2_1 = blocks.socket_pdu('UDP_SERVER', '', '3007', 10000, False)
self.blocks_socket_pdu_0_0_2_0 = blocks.socket_pdu('UDP_SERVER', '', '3006', 10000, False)
self.blocks_socket_pdu_0_0_2 = blocks.socket_pdu('UDP_SERVER', '', '3005', 10000, False)
self.blocks_socket_pdu_0_0_1 = blocks.socket_pdu('UDP_SERVER', '', '3004', 10000, False)
self.blocks_socket_pdu_0_0_0 = blocks.socket_pdu('UDP_SERVER', '', '3003', 10000, False)
self.blocks_socket_pdu_0_0 = blocks.socket_pdu('UDP_SERVER', '', '3002', 10000, False)
self.blocks_socket_pdu_0 = blocks.socket_pdu('UDP_SERVER', '', '3001', 10000, False)
self.blocks_random_pdu_0 = blocks.random_pdu(100, 100, 0xFF, 2)
self.blocks_null_source_0_0_0 = blocks.null_source(gr.sizeof_gr_complex*1)
self.blocks_null_source_0_0 = blocks.null_source(gr.sizeof_gr_complex*1)
self.blocks_null_source_0 = blocks.null_source(gr.sizeof_gr_complex*1)
self.blocks_null_sink_0_0 = blocks.null_sink(gr.sizeof_gr_complex*1)
self.blocks_null_sink_0 = blocks.null_sink(gr.sizeof_gr_complex*1)
self.blocks_multiply_const_vxx_0 = blocks.multiply_const_cc(0.5)
self.blocks_message_strobe_0_0 = blocks.message_strobe(pmt.intern("TEST"), strobe_delay)
self.blocks_message_debug_0_0 = blocks.message_debug(True)
self.analog_pwr_squelch_xx_0 = analog.pwr_squelch_cc(squelch, 1e-4, 0, True)
##################################################
# Connections
##################################################
self.msg_connect((self.blocks_message_strobe_0_0, 'strobe'), (self.blocks_random_pdu_0, 'generate'))
self.msg_connect((self.blocks_random_pdu_0, 'pdus'), (self.epy_block_0_0_0_0_0_0, 'tx_in'))
self.msg_connect((self.blocks_random_pdu_0, 'pdus'), (self.usersegment_tx_0, 'pdu'))
self.msg_connect((self.blocks_socket_pdu_0, 'pdus'), (self.usersegment_tx_0, 'pdu'))
self.msg_connect((self.flatsat_rx_0, 'pdu'), (self.blocks_message_debug_0_0, 'print_pdu'))
self.msg_connect((self.flatsat_rx_0, 'pdu'), (self.epy_block_0_0_0_0_0_0, 'rx_in'))
self.connect((self.analog_pwr_squelch_xx_0, 0), (self.satellites_rms_agc_0, 0))
self.connect((self.blocks_multiply_const_vxx_0, 0), (self.uhd_usrp_sink_0, 0))
self.connect((self.blocks_null_source_0, 0), (self.pfb_synthesizer_ccf_0, 0))
self.connect((self.blocks_null_source_0_0, 11), (self.pfb_synthesizer_ccf_0, 16))
self.connect((self.blocks_null_source_0_0, 3), (self.pfb_synthesizer_ccf_0, 8))
self.connect((self.blocks_null_source_0_0, 2), (self.pfb_synthesizer_ccf_0, 7))
self.connect((self.blocks_null_source_0_0, 0), (self.pfb_synthesizer_ccf_0, 5))
self.connect((self.blocks_null_source_0_0, 12), (self.pfb_synthesizer_ccf_0, 17))
self.connect((self.blocks_null_source_0_0, 8), (self.pfb_synthesizer_ccf_0, 13))
self.connect((self.blocks_null_source_0_0, 10), (self.pfb_synthesizer_ccf_0, 15))
self.connect((self.blocks_null_source_0_0, 4), (self.pfb_synthesizer_ccf_0, 9))
self.connect((self.blocks_null_source_0_0, 7), (self.pfb_synthesizer_ccf_0, 12))
self.connect((self.blocks_null_source_0_0, 9), (self.pfb_synthesizer_ccf_0, 14))
self.connect((self.blocks_null_source_0_0, 5), (self.pfb_synthesizer_ccf_0, 10))
self.connect((self.blocks_null_source_0_0, 6), (self.pfb_synthesizer_ccf_0, 11))
self.connect((self.blocks_null_source_0_0, 1), (self.pfb_synthesizer_ccf_0, 6))
self.connect((self.blocks_null_source_0_0_0, 1), (self.pfb_synthesizer_ccf_0, 3))
self.connect((self.blocks_null_source_0_0_0, 2), (self.pfb_synthesizer_ccf_0, 4))
self.connect((self.blocks_null_source_0_0_0, 0), (self.pfb_synthesizer_ccf_0, 2))
self.connect((self.flatsat_rx_0, 0), (self.qtgui_sink_x_0_0_0, 0))
self.connect((self.flatsat_rx_0, 1), (self.qtgui_sink_x_0_0_0_0, 0))
self.connect((self.flatsat_rx_0, 2), (self.qtgui_sink_x_0_0_0_0_1, 0))
self.connect((self.pfb_channelizer_ccf_0, 1), (self.analog_pwr_squelch_xx_0, 0))
self.connect((self.pfb_channelizer_ccf_0, 0), (self.blocks_null_sink_0, 0))
self.connect((self.pfb_channelizer_ccf_0, 9), (self.blocks_null_sink_0_0, 7))
self.connect((self.pfb_channelizer_ccf_0, 12), (self.blocks_null_sink_0_0, 10))
self.connect((self.pfb_channelizer_ccf_0, 3), (self.blocks_null_sink_0_0, 1))
self.connect((self.pfb_channelizer_ccf_0, 4), (self.blocks_null_sink_0_0, 2))
self.connect((self.pfb_channelizer_ccf_0, 16), (self.blocks_null_sink_0_0, 14))
self.connect((self.pfb_channelizer_ccf_0, 14), (self.blocks_null_sink_0_0, 12))
self.connect((self.pfb_channelizer_ccf_0, 7), (self.blocks_null_sink_0_0, 5))
self.connect((self.pfb_channelizer_ccf_0, 5), (self.blocks_null_sink_0_0, 3))
self.connect((self.pfb_channelizer_ccf_0, 2), (self.blocks_null_sink_0_0, 0))
self.connect((self.pfb_channelizer_ccf_0, 11), (self.blocks_null_sink_0_0, 9))
self.connect((self.pfb_channelizer_ccf_0, 17), (self.blocks_null_sink_0_0, 15))
self.connect((self.pfb_channelizer_ccf_0, 15), (self.blocks_null_sink_0_0, 13))
self.connect((self.pfb_channelizer_ccf_0, 8), (self.blocks_null_sink_0_0, 6))
self.connect((self.pfb_channelizer_ccf_0, 10), (self.blocks_null_sink_0_0, 8))
self.connect((self.pfb_channelizer_ccf_0, 6), (self.blocks_null_sink_0_0, 4))
self.connect((self.pfb_channelizer_ccf_0, 13), (self.blocks_null_sink_0_0, 11))
self.connect((self.pfb_synthesizer_ccf_0, 0), (self.blocks_multiply_const_vxx_0, 0))
self.connect((self.satellites_rms_agc_0, 0), (self.flatsat_rx_0, 0))
self.connect((self.satellites_rms_agc_0, 0), (self.qtgui_sink_x_0_0, 0))
self.connect((self.uhd_usrp_source_0, 0), (self.pfb_channelizer_ccf_0, 0))
self.connect((self.uhd_usrp_source_0, 0), (self.qtgui_sink_x_0, 0))
self.connect((self.usersegment_tx_0, 0), (self.pfb_synthesizer_ccf_0, 1))
self.connect((self.usersegment_tx_0, 0), (self.qtgui_sink_x_1, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "test_flatsat_rx")
self.settings.setValue("geometry", self.saveGeometry())
self.stop()
self.wait()
event.accept()
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.set_chan_cutoff(self.samp_rate/self.num_chan*0.8)
self.set_lpf_taps(firdes.low_pass(1.0, self.samp_rate, self.samp_rate/self.num_chan*0.8, 5e3, window.WIN_HAMMING, 6.76))
self.set_rx_samp_rate(self.samp_rate)
self.set_rx_samp_rate_0(self.samp_rate)
self.set_tx_samp_rate(self.samp_rate)
self.set_tx_samp_rate_0(self.samp_rate)
self.qtgui_sink_x_0.set_frequency_range(0, self.samp_rate)
self.qtgui_sink_x_1.set_frequency_range(0, self.samp_rate)
def get_tx_samp_rate(self):
return self.tx_samp_rate
def set_tx_samp_rate(self, tx_samp_rate):
self.tx_samp_rate = tx_samp_rate
self.set_chan_samp_rate(self.tx_samp_rate/(self.num_chan))
self.uhd_usrp_sink_0.set_samp_rate(self.tx_samp_rate)
def get_num_chan(self):
return self.num_chan
def set_num_chan(self, num_chan):
self.num_chan = num_chan
self.set_chan_cutoff(self.samp_rate/self.num_chan*0.8)
self.set_chan_samp_rate(self.tx_samp_rate/(self.num_chan))
self.set_lpf_taps(firdes.low_pass(1.0, self.samp_rate, self.samp_rate/self.num_chan*0.8, 5e3, window.WIN_HAMMING, 6.76))
def get_freq(self):
return self.freq
def set_freq(self, freq):
self.freq = freq
self.set_rx_freq(self.freq)
self.set_tx_freq(self.freq)
def get_tx_samp_rate_0(self):
return self.tx_samp_rate_0
def set_tx_samp_rate_0(self, tx_samp_rate_0):
self.tx_samp_rate_0 = tx_samp_rate_0
def get_tx_gain(self):
return self.tx_gain
def set_tx_gain(self, tx_gain):
self.tx_gain = tx_gain
Qt.QMetaObject.invokeMethod(self._tx_gain_line_edit, "setText", Qt.Q_ARG("QString", str(self.tx_gain)))
self.uhd_usrp_sink_0.set_gain(self.tx_gain, 0)
def get_tx_freq(self):
return self.tx_freq
def set_tx_freq(self, tx_freq):
self.tx_freq = tx_freq
self.uhd_usrp_sink_0.set_center_freq(self.tx_freq, 0)
def get_timing_bw(self):
return self.timing_bw
def set_timing_bw(self, timing_bw):
self.timing_bw = timing_bw
self.flatsat_rx_0.set_timing_bw(self.timing_bw)
def get_strobe_delay(self):
return self.strobe_delay
def set_strobe_delay(self, strobe_delay):
self.strobe_delay = strobe_delay
self.blocks_message_strobe_0_0.set_period(self.strobe_delay)
def get_squelch(self):
return self.squelch
def set_squelch(self, squelch):
self.squelch = squelch
self.analog_pwr_squelch_xx_0.set_threshold(self.squelch)
def get_sps(self):
return self.sps
def set_sps(self, sps):
self.sps = sps
def get_rx_switch(self):
return self.rx_switch
def set_rx_switch(self, rx_switch):
self.rx_switch = rx_switch
self.epy_block_0_0_0_0_0_0.enabled = self.rx_switch
def get_rx_samp_rate_0(self):
return self.rx_samp_rate_0
def set_rx_samp_rate_0(self, rx_samp_rate_0):
self.rx_samp_rate_0 = rx_samp_rate_0
def get_rx_samp_rate(self):
return self.rx_samp_rate
def set_rx_samp_rate(self, rx_samp_rate):
self.rx_samp_rate = rx_samp_rate
self.uhd_usrp_source_0.set_samp_rate(self.rx_samp_rate)
self.uhd_usrp_source_0.set_bandwidth(self.rx_samp_rate, 0)
def get_rx_gain(self):
return self.rx_gain
def set_rx_gain(self, rx_gain):
self.rx_gain = rx_gain
Qt.QMetaObject.invokeMethod(self._rx_gain_line_edit, "setText", Qt.Q_ARG("QString", str(self.rx_gain)))
self.uhd_usrp_source_0.set_gain(self.rx_gain, 0)
def get_rx_freq(self):
return self.rx_freq
def set_rx_freq(self, rx_freq):
self.rx_freq = rx_freq
self.uhd_usrp_source_0.set_center_freq(self.rx_freq, 0)
def get_lpf_taps(self):
return self.lpf_taps
def set_lpf_taps(self, lpf_taps):
self.lpf_taps = lpf_taps
self.pfb_channelizer_ccf_0.set_taps(self.lpf_taps)
self.pfb_synthesizer_ccf_0.set_taps(self.lpf_taps)
def get_eb(self):
return self.eb
def set_eb(self, eb):
self.eb = eb
def get_deviation(self):
return self.deviation
def set_deviation(self, deviation):
self.deviation = deviation
self.flatsat_rx_0.set_deviation(self.deviation)
def get_damping(self):
return self.damping
def set_damping(self, damping):
self.damping = damping
self.flatsat_rx_0.set_damping(self.damping)
def get_constel(self):
return self.constel
def set_constel(self, constel):
self.constel = constel
def get_chan_samp_rate(self):
return self.chan_samp_rate
def set_chan_samp_rate(self, chan_samp_rate):
self.chan_samp_rate = chan_samp_rate
self.qtgui_sink_x_0_0.set_frequency_range(0, self.chan_samp_rate)
self.qtgui_sink_x_0_0_0.set_frequency_range(0, self.chan_samp_rate)
self.qtgui_sink_x_0_0_0_0.set_frequency_range(0, self.chan_samp_rate)
self.qtgui_sink_x_0_0_0_0_1.set_frequency_range(0, self.chan_samp_rate)
def get_chan_cutoff(self):
return self.chan_cutoff
def set_chan_cutoff(self, chan_cutoff):
self.chan_cutoff = chan_cutoff
def main(top_block_cls=test_flatsat_rx, options=None):
if StrictVersion("4.5.0") <= StrictVersion(Qt.qVersion()) < StrictVersion("5.0.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def sig_handler(sig=None, frame=None):
tb.stop()
tb.wait()
Qt.QApplication.quit()
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
timer = Qt.QTimer()
timer.start(500)
timer.timeout.connect(lambda: None)
qapp.exec_()
if __name__ == '__main__':
main()
| 25,997
| 27
| 1,342
|
650df8b817d43a116b8c59edc813e4e70e09997b
| 715
|
py
|
Python
|
py/sc.py
|
Utsho/Car-Number-Plate-Reading
|
5c8c92bb320d9b19a0d54015d80a75dfc4cbf60a
|
[
"CNRI-Python"
] | 1
|
2019-08-24T04:06:35.000Z
|
2019-08-24T04:06:35.000Z
|
py/sc.py
|
Utsho/Car-Number-Plate-Reading
|
5c8c92bb320d9b19a0d54015d80a75dfc4cbf60a
|
[
"CNRI-Python"
] | null | null | null |
py/sc.py
|
Utsho/Car-Number-Plate-Reading
|
5c8c92bb320d9b19a0d54015d80a75dfc4cbf60a
|
[
"CNRI-Python"
] | null | null | null |
from random import randint
import os
from PIL import Image
path='final'
classnames=os.listdir(path)
images=[]
f1=open('input_index.txt','w')
for temp in classnames:
images.append(Image.open('final/'+temp))
f1.write(temp+"\n")
f1.close()
k=len(images)
im2=[]
for i in range(k):
img=images[i]
im2.append(img)
h=0
for i in range(k):
h=h+40*im2[i].size[1]/im2[i].size[0]+5
result = Image.new('L', (40,h),'white')
lp=0
for i in range(k):
img=im2[i]
img=img.convert('L')
print 'before ',
print img.size
h=40*img.size[1]/img.size[0]
if h<1:
h=1
img=img.resize((40,h),Image.ANTIALIAS)
print 'after ',
print img.size
result.paste(img, box=(0, lp))
lp=lp+5+img.size[1]
result.save('res.png')
| 16.627907
| 41
| 0.653147
|
from random import randint
import os
from PIL import Image
path='final'
classnames=os.listdir(path)
images=[]
f1=open('input_index.txt','w')
for temp in classnames:
images.append(Image.open('final/'+temp))
f1.write(temp+"\n")
f1.close()
k=len(images)
im2=[]
for i in range(k):
img=images[i]
im2.append(img)
h=0
for i in range(k):
h=h+40*im2[i].size[1]/im2[i].size[0]+5
result = Image.new('L', (40,h),'white')
lp=0
for i in range(k):
img=im2[i]
img=img.convert('L')
print 'before ',
print img.size
h=40*img.size[1]/img.size[0]
if h<1:
h=1
img=img.resize((40,h),Image.ANTIALIAS)
print 'after ',
print img.size
result.paste(img, box=(0, lp))
lp=lp+5+img.size[1]
result.save('res.png')
| 0
| 0
| 0
|
e855e1e160879d94bc5165d60b4668e5bdedecc3
| 10,836
|
py
|
Python
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/storage/netapp/na_elementsw_network_interfaces.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 17
|
2017-06-07T23:15:01.000Z
|
2021-08-30T14:32:36.000Z
|
ansible/ansible/modules/storage/netapp/na_elementsw_network_interfaces.py
|
SergeyCherepanov/ansible
|
875711cd2fd6b783c812241c2ed7a954bf6f670f
|
[
"MIT"
] | 9
|
2017-06-25T03:31:52.000Z
|
2021-05-17T23:43:12.000Z
|
ansible/ansible/modules/storage/netapp/na_elementsw_network_interfaces.py
|
SergeyCherepanov/ansible
|
875711cd2fd6b783c812241c2ed7a954bf6f670f
|
[
"MIT"
] | 3
|
2018-05-26T21:31:22.000Z
|
2019-09-28T17:00:45.000Z
|
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
'''
Element Software Node Network Interfaces - Bond 1G and 10G configuration
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_elementsw_network_interfaces
short_description: NetApp Element Software Configure Node Network Interfaces
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.7'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Configure Element SW Node Network Interfaces for Bond 1G and 10G IP address.
options:
method:
description:
- Type of Method used to configure the interface.
- method depends on other settings such as the use of a static IP address, which will change the method to static.
- loopback - Used to define the IPv4 loopback interface.
- manual - Used to define interfaces for which no configuration is done by default.
- dhcp - May be used to obtain an IP address via DHCP.
- static - Used to define Ethernet interfaces with statically allocated IPv4 addresses.
choices: ['loopback', 'manual', 'dhcp', 'static']
required: true
ip_address_1g:
description:
- IP address for the 1G network.
required: true
ip_address_10g:
description:
- IP address for the 10G network.
required: true
subnet_1g:
description:
- 1GbE Subnet Mask.
required: true
subnet_10g:
description:
- 10GbE Subnet Mask.
required: true
gateway_address_1g:
description:
- Router network address to send packets out of the local network.
required: true
gateway_address_10g:
description:
- Router network address to send packets out of the local network.
required: true
mtu_1g:
description:
- Maximum Transmission Unit for 1GbE, Largest packet size that a network protocol can transmit.
- Must be greater than or equal to 1500 bytes.
default: '1500'
mtu_10g:
description:
- Maximum Transmission Unit for 10GbE, Largest packet size that a network protocol can transmit.
- Must be greater than or equal to 1500 bytes.
default: '1500'
dns_nameservers:
description:
- List of addresses for domain name servers.
dns_search_domains:
description:
- List of DNS search domains.
bond_mode_1g:
description:
- Bond mode for 1GbE configuration.
choices: ['ActivePassive', 'ALB', 'LACP']
default: 'ActivePassive'
bond_mode_10g:
description:
- Bond mode for 10GbE configuration.
choices: ['ActivePassive', 'ALB', 'LACP']
default: 'ActivePassive'
lacp_1g:
description:
- Link Aggregation Control Protocol useful only if LACP is selected as the Bond Mode.
- Slow - Packets are transmitted at 30 second intervals.
- Fast - Packets are transmitted in 1 second intervals.
choices: ['Fast', 'Slow']
default: 'Slow'
lacp_10g:
description:
- Link Aggregation Control Protocol useful only if LACP is selected as the Bond Mode.
- Slow - Packets are transmitted at 30 second intervals.
- Fast - Packets are transmitted in 1 second intervals.
choices: ['Fast', 'Slow']
default: 'Slow'
virtual_network_tag:
description:
- This is the primary network tag. All nodes in a cluster have the same VLAN tag.
'''
EXAMPLES = """
- name: Set Node network interfaces configuration for Bond 1G and 10G properties
tags:
- elementsw_network_interfaces
na_elementsw_network_interfaces:
hostname: "{{ elementsw_hostname }}"
username: "{{ elementsw_username }}"
password: "{{ elementsw_password }}"
method: static
ip_address_1g: 10.226.109.68
ip_address_10g: 10.226.201.72
subnet_1g: 255.255.255.0
subnet_10g: 255.255.255.0
gateway_address_1g: 10.193.139.1
gateway_address_10g: 10.193.140.1
mtu_1g: 1500
mtu_10g: 9000
bond_mode_1g: ActivePassive
bond_mode_10g: LACP
lacp_10g: Fast
"""
RETURN = """
msg:
description: Success message
returned: success
type: str
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
try:
from solidfire.models import Network, NetworkConfig
HAS_SF_SDK = True
except Exception:
HAS_SF_SDK = False
class ElementSWNetworkInterfaces(object):
"""
Element Software Network Interfaces - Bond 1G and 10G Network configuration
"""
def set_network_config(self):
"""
set network configuration
"""
try:
self.sfe.set_network_config(network=self.network_object)
except Exception as exception_object:
self.module.fail_json(msg='Error network setting for node %s' % (to_native(exception_object)),
exception=traceback.format_exc())
def get_network_params_object(self):
"""
Get Element SW Network object
:description: get Network object
:return: NetworkConfig object
:rtype: object(NetworkConfig object)
"""
try:
bond_1g_network = NetworkConfig(method=self.method,
address=self.ip_address_1g,
netmask=self.subnet_1g,
gateway=self.gateway_address_1g,
mtu=self.mtu_1g,
dns_nameservers=self.dns_nameservers,
dns_search=self.dns_search_domains,
bond_mode=self.bond_mode_1g,
bond_lacp_rate=self.lacp_1g,
virtual_network_tag=self.virtual_network_tag)
bond_10g_network = NetworkConfig(method=self.method,
address=self.ip_address_10g,
netmask=self.subnet_10g,
gateway=self.gateway_address_10g,
mtu=self.mtu_10g,
dns_nameservers=self.dns_nameservers,
dns_search=self.dns_search_domains,
bond_mode=self.bond_mode_10g,
bond_lacp_rate=self.lacp_10g,
virtual_network_tag=self.virtual_network_tag)
network_object = Network(bond1_g=bond_1g_network,
bond10_g=bond_10g_network)
return network_object
except Exception as e:
self.module.fail_json(msg='Error with setting up network object for node 1G and 10G configuration : %s' % to_native(e),
exception=to_native(e))
def apply(self):
"""
Check connection and initialize node with cluster ownership
"""
changed = False
result_message = None
self.network_object = self.get_network_params_object()
if self.network_object is not None:
self.set_network_config()
changed = True
else:
result_message = "Skipping changes, No change requested"
self.module.exit_json(changed=changed, msg=result_message)
def main():
"""
Main function
"""
elementsw_network_interfaces = ElementSWNetworkInterfaces()
elementsw_network_interfaces.apply()
if __name__ == '__main__':
main()
| 36.362416
| 131
| 0.612126
|
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
'''
Element Software Node Network Interfaces - Bond 1G and 10G configuration
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_elementsw_network_interfaces
short_description: NetApp Element Software Configure Node Network Interfaces
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.7'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Configure Element SW Node Network Interfaces for Bond 1G and 10G IP address.
options:
method:
description:
- Type of Method used to configure the interface.
- method depends on other settings such as the use of a static IP address, which will change the method to static.
- loopback - Used to define the IPv4 loopback interface.
- manual - Used to define interfaces for which no configuration is done by default.
- dhcp - May be used to obtain an IP address via DHCP.
- static - Used to define Ethernet interfaces with statically allocated IPv4 addresses.
choices: ['loopback', 'manual', 'dhcp', 'static']
required: true
ip_address_1g:
description:
- IP address for the 1G network.
required: true
ip_address_10g:
description:
- IP address for the 10G network.
required: true
subnet_1g:
description:
- 1GbE Subnet Mask.
required: true
subnet_10g:
description:
- 10GbE Subnet Mask.
required: true
gateway_address_1g:
description:
- Router network address to send packets out of the local network.
required: true
gateway_address_10g:
description:
- Router network address to send packets out of the local network.
required: true
mtu_1g:
description:
- Maximum Transmission Unit for 1GbE, Largest packet size that a network protocol can transmit.
- Must be greater than or equal to 1500 bytes.
default: '1500'
mtu_10g:
description:
- Maximum Transmission Unit for 10GbE, Largest packet size that a network protocol can transmit.
- Must be greater than or equal to 1500 bytes.
default: '1500'
dns_nameservers:
description:
- List of addresses for domain name servers.
dns_search_domains:
description:
- List of DNS search domains.
bond_mode_1g:
description:
- Bond mode for 1GbE configuration.
choices: ['ActivePassive', 'ALB', 'LACP']
default: 'ActivePassive'
bond_mode_10g:
description:
- Bond mode for 10GbE configuration.
choices: ['ActivePassive', 'ALB', 'LACP']
default: 'ActivePassive'
lacp_1g:
description:
- Link Aggregation Control Protocol useful only if LACP is selected as the Bond Mode.
- Slow - Packets are transmitted at 30 second intervals.
- Fast - Packets are transmitted in 1 second intervals.
choices: ['Fast', 'Slow']
default: 'Slow'
lacp_10g:
description:
- Link Aggregation Control Protocol useful only if LACP is selected as the Bond Mode.
- Slow - Packets are transmitted at 30 second intervals.
- Fast - Packets are transmitted in 1 second intervals.
choices: ['Fast', 'Slow']
default: 'Slow'
virtual_network_tag:
description:
- This is the primary network tag. All nodes in a cluster have the same VLAN tag.
'''
EXAMPLES = """
- name: Set Node network interfaces configuration for Bond 1G and 10G properties
tags:
- elementsw_network_interfaces
na_elementsw_network_interfaces:
hostname: "{{ elementsw_hostname }}"
username: "{{ elementsw_username }}"
password: "{{ elementsw_password }}"
method: static
ip_address_1g: 10.226.109.68
ip_address_10g: 10.226.201.72
subnet_1g: 255.255.255.0
subnet_10g: 255.255.255.0
gateway_address_1g: 10.193.139.1
gateway_address_10g: 10.193.140.1
mtu_1g: 1500
mtu_10g: 9000
bond_mode_1g: ActivePassive
bond_mode_10g: LACP
lacp_10g: Fast
"""
RETURN = """
msg:
description: Success message
returned: success
type: str
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
try:
from solidfire.models import Network, NetworkConfig
HAS_SF_SDK = True
except Exception:
HAS_SF_SDK = False
class ElementSWNetworkInterfaces(object):
"""
Element Software Network Interfaces - Bond 1G and 10G Network configuration
"""
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(
method=dict(type='str', required=True, choices=['loopback', 'manual', 'dhcp', 'static']),
ip_address_1g=dict(type='str', required=True),
ip_address_10g=dict(type='str', required=True),
subnet_1g=dict(type='str', required=True),
subnet_10g=dict(type='str', required=True),
gateway_address_1g=dict(type='str', required=True),
gateway_address_10g=dict(type='str', required=True),
mtu_1g=dict(type='str', default='1500'),
mtu_10g=dict(type='str', default='1500'),
dns_nameservers=dict(type='list'),
dns_search_domains=dict(type='list'),
bond_mode_1g=dict(type='str', default='ActivePassive', choices=['ActivePassive', 'ALB', 'LACP']),
bond_mode_10g=dict(type='str', default='ActivePassive', choices=['ActivePassive', 'ALB', 'LACP']),
lacp_1g=dict(type='str', default='Slow', choices=['Fast', 'Slow']),
lacp_10g=dict(type='str', default='Slow', choices=['Fast', 'Slow']),
virtual_network_tag=dict(type='str'),
)
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True,
)
input_params = self.module.params
self.method = input_params['method']
self.ip_address_1g = input_params['ip_address_1g']
self.ip_address_10g = input_params['ip_address_10g']
self.subnet_1g = input_params['subnet_1g']
self.subnet_10g = input_params['subnet_10g']
self.gateway_address_1g = input_params['gateway_address_1g']
self.gateway_address_10g = input_params['gateway_address_10g']
self.mtu_1g = input_params['mtu_1g']
self.mtu_10g = input_params['mtu_10g']
self.dns_nameservers = input_params['dns_nameservers']
self.dns_search_domains = input_params['dns_search_domains']
self.bond_mode_1g = input_params['bond_mode_1g']
self.bond_mode_10g = input_params['bond_mode_10g']
self.lacp_1g = input_params['lacp_1g']
self.lacp_10g = input_params['lacp_10g']
self.virtual_network_tag = input_params['virtual_network_tag']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module, port=442)
def set_network_config(self):
"""
set network configuration
"""
try:
self.sfe.set_network_config(network=self.network_object)
except Exception as exception_object:
self.module.fail_json(msg='Error network setting for node %s' % (to_native(exception_object)),
exception=traceback.format_exc())
def get_network_params_object(self):
"""
Get Element SW Network object
:description: get Network object
:return: NetworkConfig object
:rtype: object(NetworkConfig object)
"""
try:
bond_1g_network = NetworkConfig(method=self.method,
address=self.ip_address_1g,
netmask=self.subnet_1g,
gateway=self.gateway_address_1g,
mtu=self.mtu_1g,
dns_nameservers=self.dns_nameservers,
dns_search=self.dns_search_domains,
bond_mode=self.bond_mode_1g,
bond_lacp_rate=self.lacp_1g,
virtual_network_tag=self.virtual_network_tag)
bond_10g_network = NetworkConfig(method=self.method,
address=self.ip_address_10g,
netmask=self.subnet_10g,
gateway=self.gateway_address_10g,
mtu=self.mtu_10g,
dns_nameservers=self.dns_nameservers,
dns_search=self.dns_search_domains,
bond_mode=self.bond_mode_10g,
bond_lacp_rate=self.lacp_10g,
virtual_network_tag=self.virtual_network_tag)
network_object = Network(bond1_g=bond_1g_network,
bond10_g=bond_10g_network)
return network_object
except Exception as e:
self.module.fail_json(msg='Error with setting up network object for node 1G and 10G configuration : %s' % to_native(e),
exception=to_native(e))
def apply(self):
"""
Check connection and initialize node with cluster ownership
"""
changed = False
result_message = None
self.network_object = self.get_network_params_object()
if self.network_object is not None:
self.set_network_config()
changed = True
else:
result_message = "Skipping changes, No change requested"
self.module.exit_json(changed=changed, msg=result_message)
def main():
"""
Main function
"""
elementsw_network_interfaces = ElementSWNetworkInterfaces()
elementsw_network_interfaces.apply()
if __name__ == '__main__':
main()
| 2,521
| 0
| 27
|
327abded97602fd35ed2f1723e9822ff6005624c
| 6,589
|
py
|
Python
|
LoRaSensorMonitoringServer.py
|
TomNaiser/LoRa-Sensor-Monitoring
|
090edce9a14147b9eb62a2e065e02e248c41be84
|
[
"MIT"
] | null | null | null |
LoRaSensorMonitoringServer.py
|
TomNaiser/LoRa-Sensor-Monitoring
|
090edce9a14147b9eb62a2e065e02e248c41be84
|
[
"MIT"
] | null | null | null |
LoRaSensorMonitoringServer.py
|
TomNaiser/LoRa-Sensor-Monitoring
|
090edce9a14147b9eb62a2e065e02e248c41be84
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 28 10:14:42 2020
@author: Thomas
"""
#!/usr/bin/env python
#Lora Base Server
#Reads Data from the ESP32 Lora Receiver via serial Port and generates a CSV file for each LoRa sensor client
import time
import serial
import re
import datetime
import os.path
myDataFileName="/home/pi/BienenWaageMessdaten"
ser = serial.Serial(
port='/dev/ttyUSB0',
#port='COM15',
baudrate = 115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
measurements=[]
currentSensorData=sensorData(sensorID=[-1])
datasetReady=False
while 1:
x=ser.readline()
#print(x)
parsedInput=str(x).split('\\t',)
SensorIDVal=parseSerialInput("Sensor ID",parsedInput[0])
if SensorIDVal is not None:
currentSensorData=sensorData(sensorID=SensorIDVal)
if "Sensor ID" in parsedInput[0]:
d = datetime.datetime.now()
timeNow=time.mktime(d.timetuple()) #get posix time
currentSensorData.receiveTime=convertDateTime(d)
currentSensorData.receiveTimePosix=timeNow
for paramString in parsedInput:
SensorIDVal=parseSerialInput("Sensor ID",paramString)
if SensorIDVal is not None:
currentSensorData.sensorID=SensorIDVal
weightVal=parseSerialInput("Weight",paramString)
if weightVal is not None:
currentSensorData.weight=weightVal
voltageVal=parseSerialInput("Voltage",paramString)
if voltageVal is not None:
currentSensorData.voltage=voltageVal
rssidVal=parseSerialInput("RSSID",paramString)
if rssidVal is not None:
currentSensorData.RSSID=rssidVal
swarmAlarmVal=parseSerialInput("Swarm Alarm",paramString)
if swarmAlarmVal is not None:
currentSensorData.swarmAlarm=swarmAlarmVal
else:
continue
outputDataLine=getOutputDataLine(currentSensorData)
print(outputDataLine)
appendLineToFile(myDataFileName+'Sensor'+str(int(currentSensorData.sensorID[0]))+'.txt',outputDataLine)
# =============================================================================
# SensorIDVal=parseSerialInput("Sensor ID",str(x))
#
# if SensorIDVal is not None:
# datasetReady=True
# #timeNow=str(datetime.datetime.now())
#
# d = datetime.datetime.now()
#
# timeNow=time.mktime(d.timetuple())
# #timeNow=convertDateTime(datetime.datetime.now())
# print("Create new SensorDataSet with sensorID: ",SensorIDVal)
# newSensorData=sensorData(sensorID=SensorIDVal,receiveTime=timeNow)
# print("Created new SensorDataSet with sensorID: ",SensorIDVal)
# #append currentSensorData after the first Dataset has been read completely
# #Thats when SensorIDVal is call for the second time
# print ("CurrentSensorData: ",currentSensorData.sensorID)
# if currentSensorData.sensorID[0]>-1: # if the currentSensorData is not completely new...
# measurements.append(currentSensorData)
# outputDataLine=getOutputDataLine(currentSensorData)
# csd=currentSensorData.sensorID
# print ("Output: ",outputDataLine)
# appendLineToFile(myDataFileName+'Sensor'+str(int(SensorIDVal[0]))+'.txt',outputDataLine)
#
#
#
# currentSensorData=newSensorData
#
# if datasetReady:
# weightVal=parseSerialInput("Weight",str(x))
# if weightVal is not None:
# currentSensorData.weight=weightVal
#
# voltageVal=parseSerialInput("Voltage",str(x))
# if voltageVal is not None:
# currentSensorData.voltage=voltageVal
#
# rssidVal=parseSerialInput("RSSID",str(x))
# if rssidVal is not None:
# currentSensorData.RSSID=rssidVal
#
# swarmAlarmVal=parseSerialInput("Swarm Alarm",str(x))
# if swarmAlarmVal is not None:
# currentSensorData.swarmAlarm=swarmAlarmVal
#
# =============================================================================
| 36.403315
| 141
| 0.595386
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 28 10:14:42 2020
@author: Thomas
"""
#!/usr/bin/env python
#Lora Base Server
#Reads Data from the ESP32 Lora Receiver via serial Port and generates a CSV file for each LoRa sensor client
import time
import serial
import re
import datetime
import os.path
class sensorData(object):
def __init__(self, sensorID=[0],weight=[0],voltage=[0],RSSID=[0],swarmAlarm=[0],receiveTimePosix=[0],receiveTime=[0] ):
self.sensorID = sensorID
self.weight = weight
self.voltage=voltage
self.RSSID=RSSID
self.swarmAlarm=swarmAlarm
self.receiveTimePosix=receiveTimePosix
self.receiveTime=receiveTime
def appendLineToFile(filename,content):
if os.path.isfile(filename)==False:
with open(filename,"a") as myfile:
myfile.write("Posix-Zeit\tSensorID\tGewicht\tSpannung\tRSSID\tZeit\n")
else:
with open(filename, "a") as myfile:
myfile.write(content)
def getOutputDataLine(mySensorData):
outputStr=""
outputStr=outputStr+str(mySensorData.receiveTimePosix)+"\t"
outputStr=outputStr+str(int(mySensorData.sensorID[0]))+"\t"
outputStr=outputStr+str(mySensorData.weight[0])+"\t"
outputStr=outputStr+str(mySensorData.voltage[0])+"\t"
outputStr=outputStr+str(int(mySensorData.RSSID[0]))+"\t"
outputStr=outputStr+str(mySensorData.receiveTime)+"\n" #put datetime to the last column, otherwise kst2 plot seems to confuse the columns
return outputStr
def parseSerialInput(IDString,serialInputString):
value=None
if IDString in serialInputString:
value=[float(s) for s in re.findall(r'-?\d+\.?\d*', serialInputString)]
return value
def convertDateTime(dt):
timestring=""
timestring=timestring+str(dt.year)+'-'
timestring=timestring+str(dt.month).zfill(2)+'-'
timestring=timestring+str(dt.day).zfill(2)+" "
timestring=timestring+str(dt.hour).zfill(2)+":"
timestring=timestring+str(dt.minute).zfill(2)+":"
timestring=timestring+str(dt.second).zfill(2)
print (timestring)
return timestring
myDataFileName="/home/pi/BienenWaageMessdaten"
ser = serial.Serial(
port='/dev/ttyUSB0',
#port='COM15',
baudrate = 115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
measurements=[]
currentSensorData=sensorData(sensorID=[-1])
datasetReady=False
while 1:
x=ser.readline()
#print(x)
parsedInput=str(x).split('\\t',)
SensorIDVal=parseSerialInput("Sensor ID",parsedInput[0])
if SensorIDVal is not None:
currentSensorData=sensorData(sensorID=SensorIDVal)
if "Sensor ID" in parsedInput[0]:
d = datetime.datetime.now()
timeNow=time.mktime(d.timetuple()) #get posix time
currentSensorData.receiveTime=convertDateTime(d)
currentSensorData.receiveTimePosix=timeNow
for paramString in parsedInput:
SensorIDVal=parseSerialInput("Sensor ID",paramString)
if SensorIDVal is not None:
currentSensorData.sensorID=SensorIDVal
weightVal=parseSerialInput("Weight",paramString)
if weightVal is not None:
currentSensorData.weight=weightVal
voltageVal=parseSerialInput("Voltage",paramString)
if voltageVal is not None:
currentSensorData.voltage=voltageVal
rssidVal=parseSerialInput("RSSID",paramString)
if rssidVal is not None:
currentSensorData.RSSID=rssidVal
swarmAlarmVal=parseSerialInput("Swarm Alarm",paramString)
if swarmAlarmVal is not None:
currentSensorData.swarmAlarm=swarmAlarmVal
else:
continue
outputDataLine=getOutputDataLine(currentSensorData)
print(outputDataLine)
appendLineToFile(myDataFileName+'Sensor'+str(int(currentSensorData.sensorID[0]))+'.txt',outputDataLine)
# =============================================================================
# SensorIDVal=parseSerialInput("Sensor ID",str(x))
#
# if SensorIDVal is not None:
# datasetReady=True
# #timeNow=str(datetime.datetime.now())
#
# d = datetime.datetime.now()
#
# timeNow=time.mktime(d.timetuple())
# #timeNow=convertDateTime(datetime.datetime.now())
# print("Create new SensorDataSet with sensorID: ",SensorIDVal)
# newSensorData=sensorData(sensorID=SensorIDVal,receiveTime=timeNow)
# print("Created new SensorDataSet with sensorID: ",SensorIDVal)
# #append currentSensorData after the first Dataset has been read completely
# #Thats when SensorIDVal is call for the second time
# print ("CurrentSensorData: ",currentSensorData.sensorID)
# if currentSensorData.sensorID[0]>-1: # if the currentSensorData is not completely new...
# measurements.append(currentSensorData)
# outputDataLine=getOutputDataLine(currentSensorData)
# csd=currentSensorData.sensorID
# print ("Output: ",outputDataLine)
# appendLineToFile(myDataFileName+'Sensor'+str(int(SensorIDVal[0]))+'.txt',outputDataLine)
#
#
#
# currentSensorData=newSensorData
#
# if datasetReady:
# weightVal=parseSerialInput("Weight",str(x))
# if weightVal is not None:
# currentSensorData.weight=weightVal
#
# voltageVal=parseSerialInput("Voltage",str(x))
# if voltageVal is not None:
# currentSensorData.voltage=voltageVal
#
# rssidVal=parseSerialInput("RSSID",str(x))
# if rssidVal is not None:
# currentSensorData.RSSID=rssidVal
#
# swarmAlarmVal=parseSerialInput("Swarm Alarm",str(x))
# if swarmAlarmVal is not None:
# currentSensorData.swarmAlarm=swarmAlarmVal
#
# =============================================================================
| 1,653
| 4
| 163
|
225465fb383bfcf90d72035fd4286c3072f0a432
| 750
|
py
|
Python
|
sapextractor/utils/tstct/extract_tstct.py
|
aarkue/sap-meta-explorer
|
613bf657bbaa72a3781a84664e5de7626516532f
|
[
"Apache-2.0"
] | 2
|
2021-02-10T08:09:35.000Z
|
2021-05-21T06:25:34.000Z
|
sapextractor/utils/tstct/extract_tstct.py
|
aarkue/sap-meta-explorer
|
613bf657bbaa72a3781a84664e5de7626516532f
|
[
"Apache-2.0"
] | null | null | null |
sapextractor/utils/tstct/extract_tstct.py
|
aarkue/sap-meta-explorer
|
613bf657bbaa72a3781a84664e5de7626516532f
|
[
"Apache-2.0"
] | 3
|
2021-11-22T13:27:00.000Z
|
2022-03-16T22:08:51.000Z
|
from copy import copy
| 27.777778
| 116
| 0.644
|
from copy import copy
class Shared:
transactions_dictio = {}
def apply(con, target_language="E"):
df = con.prepare_and_execute_query("TSTCT", ["SPRSL", "TCODE", "TTEXT"], " WHERE SPRSL = '"+target_language+"'")
df = df[df["SPRSL"] == target_language]
stream = df.to_dict('records')
dictio = {}
for el in stream:
dictio[el["TCODE"]] = el["TTEXT"]
return dictio
def apply_static(con, transactions=None):
if not Shared.transactions_dictio:
Shared.transactions_dictio = apply(con)
ret = copy(Shared.transactions_dictio)
if transactions is not None:
transactions = set(transactions).difference(set(ret.keys()))
for t in transactions:
ret[t] = str(t)
return ret
| 635
| 21
| 69
|
a606e61515f9330200bf5c9be613695f8e5414ce
| 7,873
|
py
|
Python
|
ogs5py/tools/download.py
|
GeoStat-Framework/ogs5py
|
2bc4428c4c485d094e02c129ba5051745df58391
|
[
"MIT"
] | 12
|
2018-12-11T15:44:58.000Z
|
2022-03-30T19:04:42.000Z
|
ogs5py/tools/download.py
|
GeoStat-Framework/ogs5py
|
2bc4428c4c485d094e02c129ba5051745df58391
|
[
"MIT"
] | 4
|
2019-07-09T17:47:05.000Z
|
2021-12-27T07:34:07.000Z
|
ogs5py/tools/download.py
|
GeoStat-Framework/ogs5py
|
2bc4428c4c485d094e02c129ba5051745df58391
|
[
"MIT"
] | 5
|
2019-04-04T19:47:56.000Z
|
2021-04-28T21:56:39.000Z
|
# -*- coding: utf-8 -*-
"""
Downloader for ogs5.
.. currentmodule:: ogs5py.tools.download
Downloader
^^^^^^^^^^
A downloading routine to get the OSG5 executable.
.. autosummary::
download_ogs
add_exe
reset_download
OGS5PY_CONFIG
----
"""
import os
import shutil
import tarfile
import zipfile
from urllib.request import urlretrieve, urlopen
import tempfile
import platform
import lxml.html
# TemporaryDirectory not avialable in python2
# from: https://gist.github.com/cpelley/10e2eeaf60dacc7956bb
TemporaryDirectory = getattr(
tempfile, "TemporaryDirectory", _TemporaryDirectory
)
# https://stackoverflow.com/a/34615446/6696397
def get_links(url, ext, build=None):
"""Get links from url ending with ext and containing build."""
sublinks = []
connection = urlopen(url)
dom = lxml.html.fromstring(connection.read())
for link in dom.xpath("//a/@href"):
if not link or not link.endswith(ext):
continue # skip unwanted
if build is None or "build_" + build + "/" in link:
sublinks.append(
url + link if not link.startswith("http") else link
)
return sublinks
RELEASE = "https://ogsstorage.blob.core.windows.net/binaries/ogs5/"
BUILD = "https://jenkins.opengeosys.org/job/ufz/job/ogs5/job/master/"
STABLE = BUILD + "lastStableBuild/"
SUCCESS = BUILD + "lastSuccessfulBuild/"
# https://stackoverflow.com/a/53222876/6696397
OGS5PY_CONFIG = os.path.join(
os.environ.get("APPDATA")
or os.environ.get("XDG_CONFIG_HOME")
or os.path.join(os.environ["HOME"], ".config"),
"ogs5py",
)
"""str: Standard config path for ogs5py."""
URLS = {
"5.7": {
"Linux": (
RELEASE + "ogs-5.7.0-Linux-2.6.32-573.8.1.el6.x86_64-x64.tar.gz"
),
"Windows": RELEASE + "ogs-5.7.0-Windows-6.1.7601-x64.zip",
"Darwin": RELEASE + "ogs-5.7.0-Darwin-15.2.0-x64.tar.gz",
},
"5.7.1": {
"Windows": (
"https://github.com/ufz/ogs5/releases/download/"
+ "5.7.1/ogs-5.7.1-Windows-x64.zip"
)
},
"5.8": {
"Linux": (
RELEASE + "ogs-5.8-Linux-2.6.32-754.3.5.el6.x86_64-x64.tar.gz"
),
"Windows": RELEASE + "ogs-5.8-Windows-x64.zip",
},
}
def download_ogs(
version="5.7", system=None, path=OGS5PY_CONFIG, name=None, build=None
):
"""
Download the OGS5 executable.
Parameters
----------
version : :class:`str`, optional
Version to download ("5.7", "5.8", "latest" or "stable").
Default: "5.7"
system : :class:`str`, optional
Target system (Linux, Windows, Darwin). Default: platform.system()
path : :class:`str`, optional
Destination path. Default: :any:`OGS5PY_CONFIG`
name : :class:`str`, optional
Destination file name. Default "ogs[.exe]"
build : :class:`str`, optional
If system is "Linux" and version is "latest" or "stable",
you can select a certain build from the ogs 5 builds:
* "BRNS": Biogeochemical Reaction Network Simulator
* "FEM": Finite Element Method
* "GEMS": Gibbs Energy Minimization Solver
* "IPQC": IPhreeqc
* "LIS": Library of Iterative Solvers
* "MKL": Intel Math Kernel Library
* "MPI": Message Passing Interface
* "PETSC": Portable, Extensible Toolkit for Scientific Computation
* "PETSC_GEMS": PETSC and GEMS
* "PQC": PHREEQC
* "SP": Sparse solver
Returns
-------
dest : :class:`str`
If an OGS5 executable was successfully downloaded, the file-path
is returned.
Notes
-----
There is only an executable on "Darwin" for version "5.7".
Taken from:
* https://www.opengeosys.org/ogs-5/
* https://jenkins.opengeosys.org/job/ufz/job/ogs5/job/master/
"""
URLS["latest"] = {
"Linux": get_links(SUCCESS, "tar.gz", build="FEM")[0],
"Windows": get_links(SUCCESS, "zip", build=None)[0],
}
URLS["stable"] = {
"Linux": get_links(STABLE, "tar.gz", build="FEM")[0],
"Windows": get_links(STABLE, "zip", build=None)[0],
}
system = platform.system() if system is None else system
path = os.path.abspath(path)
if not os.path.exists(path):
os.makedirs(path)
if version not in URLS:
raise ValueError(
"'{}': unknown version. Use: {}".format(version, list(URLS))
)
urls_version = URLS[version]
if system not in urls_version:
raise ValueError(
"'{}': unsupported system for version '{}'. Use: {}".format(
system, version, list(urls_version)
)
)
if system == "Linux" and build is not None:
if version not in ["stable", "latest"]:
raise ValueError(
"Use version 'stable' or 'latest' for specific build."
)
base_url = STABLE if version == "stable" else SUCCESS
links = get_links(base_url, ".tar.gz", build)
if len(links) != 1:
raise ValueError(
"Can't find unique version for build '{}'. Found: {}".format(
build, links
)
)
ogs_url = links[0]
elif build is None or build == "FEM":
ogs_url = urls_version[system]
else:
raise ValueError(
"system='{}', build='{}': Could not find matching exe.".format(
system, build
)
)
print("Downloading: ", ogs_url)
ext = ".tar.gz" if ogs_url.endswith(".tar.gz") else ".zip"
if name is None:
name = "ogs.exe" if system == "Windows" else "ogs"
dest = os.path.join(path, name)
with TemporaryDirectory() as tmpdirname:
data_filename = os.path.join(tmpdirname, "data" + ext)
urlretrieve(ogs_url, data_filename)
# extract the data
if ext == ".tar.gz":
z_file = tarfile.open(data_filename, "r:gz")
names = z_file.getnames()
else:
z_file = zipfile.ZipFile(data_filename)
names = z_file.namelist()
found = ""
for file in names:
if os.path.basename(file).startswith("ogs"):
found = file
break
if found:
z_file.extract(member=found, path=tmpdirname)
shutil.copy(os.path.join(tmpdirname, found), dest)
z_file.close()
return dest if found else None
def add_exe(ogs_exe, dest_name=None):
"""
Add an OGS5 exe to :any:`OGS5PY_CONFIG`.
Parameters
----------
ogs_exe : :class:`str`
Path to the ogs executable to be copied.
dest_name : :class:`str`, optional
Destination file name. Default: basename of ogs_exe
Returns
-------
dest : :class:`str`
If an OGS5 executable was successfully copied, the file-path
is returned.
"""
if platform.system() == "Windows" and ogs_exe[-4:] == ".lnk":
print("Don't use file links under windows...")
return None
if os.path.islink(ogs_exe):
ogs_exe = os.path.realpath(ogs_exe)
if os.path.exists(ogs_exe) and os.path.isfile(ogs_exe):
dest_name = (
os.path.basename(ogs_exe) if dest_name is None else dest_name
)
dest = os.path.join(OGS5PY_CONFIG, dest_name)
shutil.copy(ogs_exe, dest)
return dest
print("The given ogs_exe does not exist...")
return None
def reset_download():
"""Reset all downloads in :any:`OGS5PY_CONFIG`."""
shutil.rmtree(OGS5PY_CONFIG, ignore_errors=True)
| 30.87451
| 78
| 0.587578
|
# -*- coding: utf-8 -*-
"""
Downloader for ogs5.
.. currentmodule:: ogs5py.tools.download
Downloader
^^^^^^^^^^
A downloading routine to get the OSG5 executable.
.. autosummary::
download_ogs
add_exe
reset_download
OGS5PY_CONFIG
----
"""
import os
import shutil
import tarfile
import zipfile
from urllib.request import urlretrieve, urlopen
import tempfile
import platform
import lxml.html
# TemporaryDirectory not avialable in python2
# from: https://gist.github.com/cpelley/10e2eeaf60dacc7956bb
class _TemporaryDirectory(object):
def __enter__(self):
self.dir_name = tempfile.mkdtemp()
return self.dir_name
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self.dir_name)
TemporaryDirectory = getattr(
tempfile, "TemporaryDirectory", _TemporaryDirectory
)
# https://stackoverflow.com/a/34615446/6696397
def get_links(url, ext, build=None):
"""Get links from url ending with ext and containing build."""
sublinks = []
connection = urlopen(url)
dom = lxml.html.fromstring(connection.read())
for link in dom.xpath("//a/@href"):
if not link or not link.endswith(ext):
continue # skip unwanted
if build is None or "build_" + build + "/" in link:
sublinks.append(
url + link if not link.startswith("http") else link
)
return sublinks
RELEASE = "https://ogsstorage.blob.core.windows.net/binaries/ogs5/"
BUILD = "https://jenkins.opengeosys.org/job/ufz/job/ogs5/job/master/"
STABLE = BUILD + "lastStableBuild/"
SUCCESS = BUILD + "lastSuccessfulBuild/"
# https://stackoverflow.com/a/53222876/6696397
OGS5PY_CONFIG = os.path.join(
os.environ.get("APPDATA")
or os.environ.get("XDG_CONFIG_HOME")
or os.path.join(os.environ["HOME"], ".config"),
"ogs5py",
)
"""str: Standard config path for ogs5py."""
URLS = {
"5.7": {
"Linux": (
RELEASE + "ogs-5.7.0-Linux-2.6.32-573.8.1.el6.x86_64-x64.tar.gz"
),
"Windows": RELEASE + "ogs-5.7.0-Windows-6.1.7601-x64.zip",
"Darwin": RELEASE + "ogs-5.7.0-Darwin-15.2.0-x64.tar.gz",
},
"5.7.1": {
"Windows": (
"https://github.com/ufz/ogs5/releases/download/"
+ "5.7.1/ogs-5.7.1-Windows-x64.zip"
)
},
"5.8": {
"Linux": (
RELEASE + "ogs-5.8-Linux-2.6.32-754.3.5.el6.x86_64-x64.tar.gz"
),
"Windows": RELEASE + "ogs-5.8-Windows-x64.zip",
},
}
def download_ogs(
version="5.7", system=None, path=OGS5PY_CONFIG, name=None, build=None
):
"""
Download the OGS5 executable.
Parameters
----------
version : :class:`str`, optional
Version to download ("5.7", "5.8", "latest" or "stable").
Default: "5.7"
system : :class:`str`, optional
Target system (Linux, Windows, Darwin). Default: platform.system()
path : :class:`str`, optional
Destination path. Default: :any:`OGS5PY_CONFIG`
name : :class:`str`, optional
Destination file name. Default "ogs[.exe]"
build : :class:`str`, optional
If system is "Linux" and version is "latest" or "stable",
you can select a certain build from the ogs 5 builds:
* "BRNS": Biogeochemical Reaction Network Simulator
* "FEM": Finite Element Method
* "GEMS": Gibbs Energy Minimization Solver
* "IPQC": IPhreeqc
* "LIS": Library of Iterative Solvers
* "MKL": Intel Math Kernel Library
* "MPI": Message Passing Interface
* "PETSC": Portable, Extensible Toolkit for Scientific Computation
* "PETSC_GEMS": PETSC and GEMS
* "PQC": PHREEQC
* "SP": Sparse solver
Returns
-------
dest : :class:`str`
If an OGS5 executable was successfully downloaded, the file-path
is returned.
Notes
-----
There is only an executable on "Darwin" for version "5.7".
Taken from:
* https://www.opengeosys.org/ogs-5/
* https://jenkins.opengeosys.org/job/ufz/job/ogs5/job/master/
"""
URLS["latest"] = {
"Linux": get_links(SUCCESS, "tar.gz", build="FEM")[0],
"Windows": get_links(SUCCESS, "zip", build=None)[0],
}
URLS["stable"] = {
"Linux": get_links(STABLE, "tar.gz", build="FEM")[0],
"Windows": get_links(STABLE, "zip", build=None)[0],
}
system = platform.system() if system is None else system
path = os.path.abspath(path)
if not os.path.exists(path):
os.makedirs(path)
if version not in URLS:
raise ValueError(
"'{}': unknown version. Use: {}".format(version, list(URLS))
)
urls_version = URLS[version]
if system not in urls_version:
raise ValueError(
"'{}': unsupported system for version '{}'. Use: {}".format(
system, version, list(urls_version)
)
)
if system == "Linux" and build is not None:
if version not in ["stable", "latest"]:
raise ValueError(
"Use version 'stable' or 'latest' for specific build."
)
base_url = STABLE if version == "stable" else SUCCESS
links = get_links(base_url, ".tar.gz", build)
if len(links) != 1:
raise ValueError(
"Can't find unique version for build '{}'. Found: {}".format(
build, links
)
)
ogs_url = links[0]
elif build is None or build == "FEM":
ogs_url = urls_version[system]
else:
raise ValueError(
"system='{}', build='{}': Could not find matching exe.".format(
system, build
)
)
print("Downloading: ", ogs_url)
ext = ".tar.gz" if ogs_url.endswith(".tar.gz") else ".zip"
if name is None:
name = "ogs.exe" if system == "Windows" else "ogs"
dest = os.path.join(path, name)
with TemporaryDirectory() as tmpdirname:
data_filename = os.path.join(tmpdirname, "data" + ext)
urlretrieve(ogs_url, data_filename)
# extract the data
if ext == ".tar.gz":
z_file = tarfile.open(data_filename, "r:gz")
names = z_file.getnames()
else:
z_file = zipfile.ZipFile(data_filename)
names = z_file.namelist()
found = ""
for file in names:
if os.path.basename(file).startswith("ogs"):
found = file
break
if found:
z_file.extract(member=found, path=tmpdirname)
shutil.copy(os.path.join(tmpdirname, found), dest)
z_file.close()
return dest if found else None
def add_exe(ogs_exe, dest_name=None):
"""
Add an OGS5 exe to :any:`OGS5PY_CONFIG`.
Parameters
----------
ogs_exe : :class:`str`
Path to the ogs executable to be copied.
dest_name : :class:`str`, optional
Destination file name. Default: basename of ogs_exe
Returns
-------
dest : :class:`str`
If an OGS5 executable was successfully copied, the file-path
is returned.
"""
if platform.system() == "Windows" and ogs_exe[-4:] == ".lnk":
print("Don't use file links under windows...")
return None
if os.path.islink(ogs_exe):
ogs_exe = os.path.realpath(ogs_exe)
if os.path.exists(ogs_exe) and os.path.isfile(ogs_exe):
dest_name = (
os.path.basename(ogs_exe) if dest_name is None else dest_name
)
dest = os.path.join(OGS5PY_CONFIG, dest_name)
shutil.copy(ogs_exe, dest)
return dest
print("The given ogs_exe does not exist...")
return None
def reset_download():
"""Reset all downloads in :any:`OGS5PY_CONFIG`."""
shutil.rmtree(OGS5PY_CONFIG, ignore_errors=True)
| 138
| 13
| 75
|
2fc9e9e0b73e020f3c72b32bf69a10544d35b251
| 9,397
|
py
|
Python
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/ViewCropRegionShapeManager.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/Autodesk/Revit/DB/__init___parts/ViewCropRegionShapeManager.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/Autodesk/Revit/DB/__init___parts/ViewCropRegionShapeManager.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
class ViewCropRegionShapeManager(object, IDisposable):
""" A class that provides access to settings related to the crop assigned to a view or a reference callout. """
def Dispose(self):
""" Dispose(self: ViewCropRegionShapeManager) """
pass
def GetAnnotationCropShape(self):
"""
GetAnnotationCropShape(self: ViewCropRegionShapeManager) -> CurveLoop
Gets the annotation crop box assigned to the view.
Returns: The annotation crop boundary.
"""
pass
def GetCropShape(self):
"""
GetCropShape(self: ViewCropRegionShapeManager) -> IList[CurveLoop]
Gets the crop boundaries that are curently active.
Returns: The crop boundaries.
"""
pass
def GetSplitRegionMaximum(self, regionIndex):
"""
GetSplitRegionMaximum(self: ViewCropRegionShapeManager,regionIndex: int) -> float
Returns the proportional location of the maximum boundary of the specified
split crop region.
regionIndex: Index of region to be split horizontally (numbering starts with 0).
Returns: A value from 0 to 1 representing the maximum location for the regions split
boundary.
This number represents the location as a ratio along the
non-split rectangular crop.
"""
pass
def GetSplitRegionMinimum(self, regionIndex):
"""
GetSplitRegionMinimum(self: ViewCropRegionShapeManager,regionIndex: int) -> float
Returns the proportional location of the minimum boundary of the specified
split crop region.
regionIndex: Index of region to be split horizontally (numbering starts with 0).
Returns: A value from 0 to 1 representing the minimum location for the regions split
boundary.
This number represents the location as a ratio along the
non-split rectangular crop.
"""
pass
def IsCropRegionShapeValid(self, boundary):
"""
IsCropRegionShapeValid(self: ViewCropRegionShapeManager,boundary: CurveLoop) -> bool
Verifies that boundary represents one closed curve loop without
self-intersections,
consisting of non-zero length straight lines in a plane
parallel to the view plane.
boundary: The crop boundary.
Returns: True if the passed crop boundary represents one closed curve loop without
self-intersections,
consisting of non-zero length straight lines in a plane
parallel to the view plane.
"""
pass
def ReleaseUnmanagedResources(self, *args):
""" ReleaseUnmanagedResources(self: ViewCropRegionShapeManager,disposing: bool) """
pass
def RemoveCropRegionShape(self):
"""
RemoveCropRegionShape(self: ViewCropRegionShapeManager)
Removes any non-rectangular boundary of the view's crop.
"""
pass
def RemoveSplit(self):
"""
RemoveSplit(self: ViewCropRegionShapeManager)
Removes any split applied to the view's crop.
"""
pass
def RemoveSplitRegion(self, regionIndex):
"""
RemoveSplitRegion(self: ViewCropRegionShapeManager,regionIndex: int)
Removes one region in split crop.
regionIndex: Index of region to be deleted (numbering starts with 0).
"""
pass
def SetCropShape(self, boundary):
"""
SetCropShape(self: ViewCropRegionShapeManager,boundary: CurveLoop)
Sets the boundary of the view's crop to the specified shape.
boundary: The crop boundary.
"""
pass
def SplitRegionHorizontally(self, regionIndex, leftPart, rightPart):
"""
SplitRegionHorizontally(self: ViewCropRegionShapeManager,regionIndex: int,leftPart: float,rightPart: float)
Splits horizontally one region in split crop.
regionIndex: Index of region to be split horizontally (numbering starts with 0).
leftPart: Relative portion of the original region to become the new left region (0 to 1).
rightPart: Relative portion of the original region to become the new right region (0 to 1).
"""
pass
def SplitRegionVertically(self, regionIndex, topPart, bottomPart):
"""
SplitRegionVertically(self: ViewCropRegionShapeManager,regionIndex: int,topPart: float,bottomPart: float)
Splits vertically one region in split crop.
regionIndex: Index of region to be split vertically (numbering starts with 0).
topPart: Relative portion of the original region to become the new top region (0 to 1).
bottomPart: Relative portion of the original region to become the new bottom region (0 to
1).
"""
pass
def __enter__(self, *args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self, *args):
""" __repr__(self: object) -> str """
pass
BottomAnnotationCropOffset = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The offset from the bottom of the view crop that determines the location of the annotation crop bottom boundary.
Get: BottomAnnotationCropOffset(self: ViewCropRegionShapeManager) -> float
Set: BottomAnnotationCropOffset(self: ViewCropRegionShapeManager)=value
"""
CanBeSplit = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Verifies that the crop of the associated view is permitted to have multiple regions.
Get: CanBeSplit(self: ViewCropRegionShapeManager) -> bool
"""
CanHaveAnnotationCrop = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Verifies that the view is allowed to have an annotation crop.
Get: CanHaveAnnotationCrop(self: ViewCropRegionShapeManager) -> bool
"""
CanHaveShape = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Verifies that the crop of the associated view is permitted to have a non-rectangular shape.
Get: CanHaveShape(self: ViewCropRegionShapeManager) -> bool
"""
IsSplitHorizontally = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Whether or not the view's crop is split (and the split is horizontal).
Get: IsSplitHorizontally(self: ViewCropRegionShapeManager) -> bool
"""
IsSplitVertically = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Whether or not the view's crop is split (and the split is vertical).
Get: IsSplitVertically(self: ViewCropRegionShapeManager) -> bool
"""
IsValidObject = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: ViewCropRegionShapeManager) -> bool
"""
LeftAnnotationCropOffset = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The offset from the left of the view crop that determines the location of the annotation crop left boundary.
Get: LeftAnnotationCropOffset(self: ViewCropRegionShapeManager) -> float
Set: LeftAnnotationCropOffset(self: ViewCropRegionShapeManager)=value
"""
NumberOfSplitRegions = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The number of split crop regions (1 if the crop is not currently split).
Get: NumberOfSplitRegions(self: ViewCropRegionShapeManager) -> int
"""
RightAnnotationCropOffset = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The offset from the right of the view crop that determines the location of the annotation crop right boundary.
Get: RightAnnotationCropOffset(self: ViewCropRegionShapeManager) -> float
Set: RightAnnotationCropOffset(self: ViewCropRegionShapeManager)=value
"""
ShapeSet = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Whether or not the view crop has a non-rectangular shape set.
Get: ShapeSet(self: ViewCropRegionShapeManager) -> bool
"""
Split = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Whether or not the view crop is split.
Get: Split(self: ViewCropRegionShapeManager) -> bool
"""
TopAnnotationCropOffset = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The offset from the top of the view crop that determines the location of the annotation crop top boundary.
Get: TopAnnotationCropOffset(self: ViewCropRegionShapeManager) -> float
Set: TopAnnotationCropOffset(self: ViewCropRegionShapeManager)=value
"""
| 24.728947
| 221
| 0.670427
|
class ViewCropRegionShapeManager(object, IDisposable):
""" A class that provides access to settings related to the crop assigned to a view or a reference callout. """
def Dispose(self):
""" Dispose(self: ViewCropRegionShapeManager) """
pass
def GetAnnotationCropShape(self):
"""
GetAnnotationCropShape(self: ViewCropRegionShapeManager) -> CurveLoop
Gets the annotation crop box assigned to the view.
Returns: The annotation crop boundary.
"""
pass
def GetCropShape(self):
"""
GetCropShape(self: ViewCropRegionShapeManager) -> IList[CurveLoop]
Gets the crop boundaries that are curently active.
Returns: The crop boundaries.
"""
pass
def GetSplitRegionMaximum(self, regionIndex):
"""
GetSplitRegionMaximum(self: ViewCropRegionShapeManager,regionIndex: int) -> float
Returns the proportional location of the maximum boundary of the specified
split crop region.
regionIndex: Index of region to be split horizontally (numbering starts with 0).
Returns: A value from 0 to 1 representing the maximum location for the regions split
boundary.
This number represents the location as a ratio along the
non-split rectangular crop.
"""
pass
def GetSplitRegionMinimum(self, regionIndex):
"""
GetSplitRegionMinimum(self: ViewCropRegionShapeManager,regionIndex: int) -> float
Returns the proportional location of the minimum boundary of the specified
split crop region.
regionIndex: Index of region to be split horizontally (numbering starts with 0).
Returns: A value from 0 to 1 representing the minimum location for the regions split
boundary.
This number represents the location as a ratio along the
non-split rectangular crop.
"""
pass
def IsCropRegionShapeValid(self, boundary):
"""
IsCropRegionShapeValid(self: ViewCropRegionShapeManager,boundary: CurveLoop) -> bool
Verifies that boundary represents one closed curve loop without
self-intersections,
consisting of non-zero length straight lines in a plane
parallel to the view plane.
boundary: The crop boundary.
Returns: True if the passed crop boundary represents one closed curve loop without
self-intersections,
consisting of non-zero length straight lines in a plane
parallel to the view plane.
"""
pass
def ReleaseUnmanagedResources(self, *args):
""" ReleaseUnmanagedResources(self: ViewCropRegionShapeManager,disposing: bool) """
pass
def RemoveCropRegionShape(self):
"""
RemoveCropRegionShape(self: ViewCropRegionShapeManager)
Removes any non-rectangular boundary of the view's crop.
"""
pass
def RemoveSplit(self):
"""
RemoveSplit(self: ViewCropRegionShapeManager)
Removes any split applied to the view's crop.
"""
pass
def RemoveSplitRegion(self, regionIndex):
"""
RemoveSplitRegion(self: ViewCropRegionShapeManager,regionIndex: int)
Removes one region in split crop.
regionIndex: Index of region to be deleted (numbering starts with 0).
"""
pass
def SetCropShape(self, boundary):
"""
SetCropShape(self: ViewCropRegionShapeManager,boundary: CurveLoop)
Sets the boundary of the view's crop to the specified shape.
boundary: The crop boundary.
"""
pass
def SplitRegionHorizontally(self, regionIndex, leftPart, rightPart):
"""
SplitRegionHorizontally(self: ViewCropRegionShapeManager,regionIndex: int,leftPart: float,rightPart: float)
Splits horizontally one region in split crop.
regionIndex: Index of region to be split horizontally (numbering starts with 0).
leftPart: Relative portion of the original region to become the new left region (0 to 1).
rightPart: Relative portion of the original region to become the new right region (0 to 1).
"""
pass
def SplitRegionVertically(self, regionIndex, topPart, bottomPart):
"""
SplitRegionVertically(self: ViewCropRegionShapeManager,regionIndex: int,topPart: float,bottomPart: float)
Splits vertically one region in split crop.
regionIndex: Index of region to be split vertically (numbering starts with 0).
topPart: Relative portion of the original region to become the new top region (0 to 1).
bottomPart: Relative portion of the original region to become the new bottom region (0 to
1).
"""
pass
def __enter__(self, *args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self, *args):
""" __repr__(self: object) -> str """
pass
BottomAnnotationCropOffset = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The offset from the bottom of the view crop that determines the location of the annotation crop bottom boundary.
Get: BottomAnnotationCropOffset(self: ViewCropRegionShapeManager) -> float
Set: BottomAnnotationCropOffset(self: ViewCropRegionShapeManager)=value
"""
CanBeSplit = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Verifies that the crop of the associated view is permitted to have multiple regions.
Get: CanBeSplit(self: ViewCropRegionShapeManager) -> bool
"""
CanHaveAnnotationCrop = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Verifies that the view is allowed to have an annotation crop.
Get: CanHaveAnnotationCrop(self: ViewCropRegionShapeManager) -> bool
"""
CanHaveShape = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Verifies that the crop of the associated view is permitted to have a non-rectangular shape.
Get: CanHaveShape(self: ViewCropRegionShapeManager) -> bool
"""
IsSplitHorizontally = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Whether or not the view's crop is split (and the split is horizontal).
Get: IsSplitHorizontally(self: ViewCropRegionShapeManager) -> bool
"""
IsSplitVertically = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Whether or not the view's crop is split (and the split is vertical).
Get: IsSplitVertically(self: ViewCropRegionShapeManager) -> bool
"""
IsValidObject = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: ViewCropRegionShapeManager) -> bool
"""
LeftAnnotationCropOffset = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The offset from the left of the view crop that determines the location of the annotation crop left boundary.
Get: LeftAnnotationCropOffset(self: ViewCropRegionShapeManager) -> float
Set: LeftAnnotationCropOffset(self: ViewCropRegionShapeManager)=value
"""
NumberOfSplitRegions = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The number of split crop regions (1 if the crop is not currently split).
Get: NumberOfSplitRegions(self: ViewCropRegionShapeManager) -> int
"""
RightAnnotationCropOffset = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The offset from the right of the view crop that determines the location of the annotation crop right boundary.
Get: RightAnnotationCropOffset(self: ViewCropRegionShapeManager) -> float
Set: RightAnnotationCropOffset(self: ViewCropRegionShapeManager)=value
"""
ShapeSet = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Whether or not the view crop has a non-rectangular shape set.
Get: ShapeSet(self: ViewCropRegionShapeManager) -> bool
"""
Split = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Whether or not the view crop is split.
Get: Split(self: ViewCropRegionShapeManager) -> bool
"""
TopAnnotationCropOffset = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The offset from the top of the view crop that determines the location of the annotation crop top boundary.
Get: TopAnnotationCropOffset(self: ViewCropRegionShapeManager) -> float
Set: TopAnnotationCropOffset(self: ViewCropRegionShapeManager)=value
"""
| 0
| 0
| 0
|
181263c57ee32e69218aebc26fb0790bf52e4b8e
| 1,788
|
py
|
Python
|
large-repo-master/test-3/nb14.py
|
victorsun123/numpy
|
1fb036758b4035ab2c132d26f420e5bc3ffc917e
|
[
"BSD-3-Clause"
] | null | null | null |
large-repo-master/test-3/nb14.py
|
victorsun123/numpy
|
1fb036758b4035ab2c132d26f420e5bc3ffc917e
|
[
"BSD-3-Clause"
] | null | null | null |
large-repo-master/test-3/nb14.py
|
victorsun123/numpy
|
1fb036758b4035ab2c132d26f420e5bc3ffc917e
|
[
"BSD-3-Clause"
] | null | null | null |
# Databricks notebook source
INSTGUYIKQEFVDAVPNQI
DFXKJDJFAIGTQDFPNZXIHFBFJBXNSCAQMMDSAPVIFVULKEPYGJRUNILQPXYXVGWBNVBHBWSUEGQTKHMFFROKHGJKPJ
OCRRFSYKRPOYFVBAHSCCPHGHUA
ORMCLMKXZAKDXRSGAXPHGQDBBOGQKFQYRLQQ
AKVWPNWJGXQTKSYPSLNOUXSWVYRQ
YVOFGDXZWZHFASZQCMZFKHUUBCFUILVBZPHAAMMPWQPOCWRGVAHLTM
NCUOQLVTIVWIMABZGYRFYFLMOIUDXEPZTMNSDTHDYWKAINMG
UJKXTIQSHVGBGMWYWHLVZJL
RDVCHBNRWRCSEUFZCIRJYEECZIIYXHARBS
OASOKPNORKQTXEWQTQCWFBCZJRAFHVHRFKMUSOWSDDJNLP
HQACLBDITTEMCSLKEUOLWVAFECHWDEWSMPJJLADNOORIOWFFQBXUHBP
KCOEBZYAQCFAFLRNZTJGGNXZLRUGEDYMKBMBAIDKKLFAZUCARVQCPUTQPKMKXWPVTSGRMOLJNYGNMYGTJASOUBCMX
YXOVQMGQLPZNAQSKHUIKOJHCGKJKLZJWJZKRNSNGQCRMCHEWNNHMSEWWGJZKHSWTOYXOGXSO
JSHVIDOARPSYHMPG
VDTAPXXSHWBWLVPOOLFJEQBVZRDYQWSBGPTSLSKPXVTUOZZFXXVIFEWQJJKTYLHCEPPOOEYB
YGYUMVULDSZBEWELXBTASPDVYUKZORKFQZMLRQKJCPYRRINVLAUSTTEGGPQJVBAMHAYIHFY
RXTBRRWTGLSRQPPYWZFNNXUABDGXDPYGYHEVNPLHWVOADDQJWQEWMONTUADOMV
YKOYLHAREBGQERUXDMJBSZHYPIIIRZTVMKXVWDCNOFHIAJZZSRJMMFQHCGCWYJYVBYQDBZLXLKMBOE
NDLBRNSZCHXOZCSDTBJEC
BURTEUHPAWQUUWDBQQWDIVWHGRBAKKBATTBNDLNRZTIYLCPPIPOMVGLMJVXZCNASMHYPWSHTDDOLOQLBQORYWLYZETQ
PUMXIXFBWKXVIFAB
FEWWASRUZCEZVRRPNWCJNASCAKNVHDSRAMAQGTGRHSFCJPXZIXDJWBEDXZRRKLUEECYMK
RQPBYDZGSMFQZXGVUMXLEZFHJFXAACTOQHIUQQMHLXFTZGBPPEYSAXXGDZIZOQCYBWIQYKEBTHBGMEKNWMGGR
RPOTNUMNVYNIM
ASFTCBFRGKQXMJXFHMXUHUVBIEEQURNIIPGZQBTZUCMRTF
FHFDAFRLXKVNJDUTSJKZODTSXTFENNSBEDLWCUICYSZTMQNQYAOJYRL
BRXHCYYPLPYENXXYYBQBSEGAKNAGFMIMHWOATNDTJSUWOFJEJMSBGMNGMKKHQLSJGNGKB
ZKUDOFKJSSQTWZXOOLTHLGBWWRTUWVAUZIIEGSHPFDYSORQDIXESGZLL
MDREQOCVTVBHHDOBQGIOHPWKTYYXCURTDKWANRYHZTXMWWKUEZXWNQFMXSEWWYGL
MWUOYEOJYZUBKZDSMYGCNAVFZVXCDOETRKTSMJDZMNQFQTFJZMUHVQCHDMCRZIFRKVCVC
ROQHBIPMNTEBHEBKXSFJEDNLKICRSCPULYUYEJZGDUCLDDFUXWWBAEFROFACUQKRDUJFVHRHVVMNLYVNWXVKI
HOYJXDQGQSPNBVISRWIJUZFMUZGLOKFWPZKXGJNYELDOKFXKCNIPKDSJVUFTLSL
| 45.846154
| 91
| 0.97651
|
# Databricks notebook source
INSTGUYIKQEFVDAVPNQI
DFXKJDJFAIGTQDFPNZXIHFBFJBXNSCAQMMDSAPVIFVULKEPYGJRUNILQPXYXVGWBNVBHBWSUEGQTKHMFFROKHGJKPJ
OCRRFSYKRPOYFVBAHSCCPHGHUA
ORMCLMKXZAKDXRSGAXPHGQDBBOGQKFQYRLQQ
AKVWPNWJGXQTKSYPSLNOUXSWVYRQ
YVOFGDXZWZHFASZQCMZFKHUUBCFUILVBZPHAAMMPWQPOCWRGVAHLTM
NCUOQLVTIVWIMABZGYRFYFLMOIUDXEPZTMNSDTHDYWKAINMG
UJKXTIQSHVGBGMWYWHLVZJL
RDVCHBNRWRCSEUFZCIRJYEECZIIYXHARBS
OASOKPNORKQTXEWQTQCWFBCZJRAFHVHRFKMUSOWSDDJNLP
HQACLBDITTEMCSLKEUOLWVAFECHWDEWSMPJJLADNOORIOWFFQBXUHBP
KCOEBZYAQCFAFLRNZTJGGNXZLRUGEDYMKBMBAIDKKLFAZUCARVQCPUTQPKMKXWPVTSGRMOLJNYGNMYGTJASOUBCMX
YXOVQMGQLPZNAQSKHUIKOJHCGKJKLZJWJZKRNSNGQCRMCHEWNNHMSEWWGJZKHSWTOYXOGXSO
JSHVIDOARPSYHMPG
VDTAPXXSHWBWLVPOOLFJEQBVZRDYQWSBGPTSLSKPXVTUOZZFXXVIFEWQJJKTYLHCEPPOOEYB
YGYUMVULDSZBEWELXBTASPDVYUKZORKFQZMLRQKJCPYRRINVLAUSTTEGGPQJVBAMHAYIHFY
RXTBRRWTGLSRQPPYWZFNNXUABDGXDPYGYHEVNPLHWVOADDQJWQEWMONTUADOMV
YKOYLHAREBGQERUXDMJBSZHYPIIIRZTVMKXVWDCNOFHIAJZZSRJMMFQHCGCWYJYVBYQDBZLXLKMBOE
NDLBRNSZCHXOZCSDTBJEC
BURTEUHPAWQUUWDBQQWDIVWHGRBAKKBATTBNDLNRZTIYLCPPIPOMVGLMJVXZCNASMHYPWSHTDDOLOQLBQORYWLYZETQ
PUMXIXFBWKXVIFAB
FEWWASRUZCEZVRRPNWCJNASCAKNVHDSRAMAQGTGRHSFCJPXZIXDJWBEDXZRRKLUEECYMK
RQPBYDZGSMFQZXGVUMXLEZFHJFXAACTOQHIUQQMHLXFTZGBPPEYSAXXGDZIZOQCYBWIQYKEBTHBGMEKNWMGGR
RPOTNUMNVYNIM
ASFTCBFRGKQXMJXFHMXUHUVBIEEQURNIIPGZQBTZUCMRTF
FHFDAFRLXKVNJDUTSJKZODTSXTFENNSBEDLWCUICYSZTMQNQYAOJYRL
BRXHCYYPLPYENXXYYBQBSEGAKNAGFMIMHWOATNDTJSUWOFJEJMSBGMNGMKKHQLSJGNGKB
ZKUDOFKJSSQTWZXOOLTHLGBWWRTUWVAUZIIEGSHPFDYSORQDIXESGZLL
MDREQOCVTVBHHDOBQGIOHPWKTYYXCURTDKWANRYHZTXMWWKUEZXWNQFMXSEWWYGL
MWUOYEOJYZUBKZDSMYGCNAVFZVXCDOETRKTSMJDZMNQFQTFJZMUHVQCHDMCRZIFRKVCVC
ROQHBIPMNTEBHEBKXSFJEDNLKICRSCPULYUYEJZGDUCLDDFUXWWBAEFROFACUQKRDUJFVHRHVVMNLYVNWXVKI
HOYJXDQGQSPNBVISRWIJUZFMUZGLOKFWPZKXGJNYELDOKFXKCNIPKDSJVUFTLSL
| 0
| 0
| 0
|
87008688f730d39f20393ab216ba561f7bc783bc
| 2,446
|
py
|
Python
|
hyperbox/networks/bnnas/bn_blocks.py
|
marsggbo/hyperbox
|
91dcd04ad30164bcb12209d818df18961fa3f347
|
[
"MIT"
] | 1
|
2022-01-17T00:34:14.000Z
|
2022-01-17T00:34:14.000Z
|
hyperbox/networks/bnnas/bn_blocks.py
|
marsggbo/hyperbox
|
91dcd04ad30164bcb12209d818df18961fa3f347
|
[
"MIT"
] | null | null | null |
hyperbox/networks/bnnas/bn_blocks.py
|
marsggbo/hyperbox
|
91dcd04ad30164bcb12209d818df18961fa3f347
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
__all__ = [
'blocks_dict',
'InvertedResidual',
'conv_1x1_bn',
'conv_bn'
]
blocks_dict = {
'k3r3':lambda inp, oup, stride : InvertedResidual(inp, oup, 3, 1, stride, 3),
'k3r6':lambda inp, oup, stride : InvertedResidual(inp, oup, 3, 1, stride, 6),
'k5r3':lambda inp, oup, stride : InvertedResidual(inp, oup, 5, 2, stride, 3),
'k5r6':lambda inp, oup, stride : InvertedResidual(inp, oup, 5, 2, stride, 6),
'k7r3':lambda inp, oup, stride : InvertedResidual(inp, oup, 7, 3, stride, 3),
'k7r6':lambda inp, oup, stride : InvertedResidual(inp, oup, 7, 3, stride, 6),
}
| 32.613333
| 129
| 0.554374
|
import torch
import torch.nn as nn
__all__ = [
'blocks_dict',
'InvertedResidual',
'conv_1x1_bn',
'conv_bn'
]
blocks_dict = {
'k3r3':lambda inp, oup, stride : InvertedResidual(inp, oup, 3, 1, stride, 3),
'k3r6':lambda inp, oup, stride : InvertedResidual(inp, oup, 3, 1, stride, 6),
'k5r3':lambda inp, oup, stride : InvertedResidual(inp, oup, 5, 2, stride, 3),
'k5r6':lambda inp, oup, stride : InvertedResidual(inp, oup, 5, 2, stride, 6),
'k7r3':lambda inp, oup, stride : InvertedResidual(inp, oup, 7, 3, stride, 3),
'k7r6':lambda inp, oup, stride : InvertedResidual(inp, oup, 7, 3, stride, 6),
}
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, ksize, padding, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
self.use_res_connect = self.stride == 1 and inp == oup
self.expand_ratio = expand_ratio
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(inp, inp, ksize, stride, padding, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(inp * expand_ratio, inp * expand_ratio, ksize, stride, padding, groups=inp * expand_ratio, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
| 1,670
| 13
| 122
|
3c3d8da7b19f669775422aaf440d8177d4613006
| 5,682
|
py
|
Python
|
experiments/scripts/csv2hdf5_allinone.py
|
cvelten/DeepSurv
|
de9b365be6f994cb30eb0a50197930d24e48dee4
|
[
"MIT"
] | null | null | null |
experiments/scripts/csv2hdf5_allinone.py
|
cvelten/DeepSurv
|
de9b365be6f994cb30eb0a50197930d24e48dee4
|
[
"MIT"
] | null | null | null |
experiments/scripts/csv2hdf5_allinone.py
|
cvelten/DeepSurv
|
de9b365be6f994cb30eb0a50197930d24e48dee4
|
[
"MIT"
] | null | null | null |
import h5py
import numpy as np
import pandas as pd
import os
from argparse import ArgumentParser
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('ifile_os')
parser.add_argument('ifile_pfs')
# parser.add_argument('-e', '--event_col', default='OSEvent')
# parser.add_argument('-t', '--time_col', default='TTDy')
# parser.add_argument('--txcol', type=str, default='SBRT')
# parser.add_argument('--drop', help='drop columns', nargs='+', type=str)
# parser.add_argument('--droprows', help='drop rows where [cols] have value --droprowsval', nargs='+', type=str)
# parser.add_argument(
# '--droprowsval', help='value at which to drop the rows from --droprows, default 1', type=int, default=1)
# parser.add_argument('--droprows2', help='drop rows where [cols] have value --droprowsval2', nargs='+', type=str)
# parser.add_argument(
# '--droprowsval2', help='value at which to drop the rows from --droprows2, default 0', type=int, default=0)
args = parser.parse_args()
print(args)
df = pd.read_csv(args.ifile_os)
# print(df)
drop_sbrtVS = ['Treatment', 'RFA', 'SBRT_OR_RFA']
drop_rfaVS = ['Treatment', 'SBRT', 'SBRT_OR_RFA']
drop_sbrtORrfa = ['Treatment', 'SBRT', 'RFA']
#
# THIS IS FOR OS FIRST
frac = 0.5
ds = {
'SBRT_train': df[df.SBRT == 1].sample(frac=frac),
'RFA_train': df[df.RFA == 1].sample(frac=frac),
'NONE_train': df[df.SBRT_OR_RFA == 0].sample(frac=frac)
}
ds |= {
'SBRT_test': df.loc[df[df.SBRT == 1].index.symmetric_difference(ds['SBRT_train'].index)],
'RFA_test': df.loc[df[df.RFA == 1].index.symmetric_difference(ds['RFA_train'].index)],
'NONE_test': df.loc[df[df.SBRT_OR_RFA == 0].index.symmetric_difference(ds['NONE_train'].index)],
}
df_sbrtVSnone = {
'train': pd.concat([ds['SBRT_train'], ds['NONE_train']]).drop(columns=drop_sbrtVS),
'test': pd.concat([ds['SBRT_test'], ds['NONE_test']]).drop(columns=drop_sbrtVS)
}
df_rfaVSnone = {
'train': pd.concat([ds['RFA_train'], ds['NONE_train']]).drop(columns=drop_rfaVS),
'test': pd.concat([ds['RFA_test'], ds['NONE_test']]).drop(columns=drop_rfaVS)
}
df_sbrtVSrfa = {
'train': pd.concat([ds['SBRT_train'], ds['RFA_train']]).drop(columns=drop_sbrtVS),
'test': pd.concat([ds['SBRT_test'], ds['RFA_test']]).drop(columns=drop_sbrtVS)
}
df_sbrtORrfa = {
'train': pd.concat([ds['SBRT_train'], ds['RFA_train'], ds['NONE_train']]).drop(columns=drop_sbrtORrfa),
'test': pd.concat([ds['SBRT_test'], ds['RFA_test'], ds['NONE_test']]).drop(columns=drop_sbrtORrfa)
}
ofile_os = os.path.join(os.path.dirname(args.ifile_os), 'liver_os_sbrtVSnone.hd5')
dataframes_to_hd5(df_sbrtVSnone, ofile_os, 'OSEvent', 'TTDy')
ofile_os = os.path.join(os.path.dirname(args.ifile_os), 'liver_os_rfaVSnone.hd5')
dataframes_to_hd5(df_rfaVSnone, ofile_os, 'OSEvent', 'TTDy')
ofile_os = os.path.join(os.path.dirname(args.ifile_os), 'liver_os_sbrtVSrfa.hd5')
dataframes_to_hd5(df_sbrtVSrfa, ofile_os, 'OSEvent', 'TTDy')
ofile_os = os.path.join(os.path.dirname(args.ifile_os), 'liver_os_sbrtORrfa.hd5')
dataframes_to_hd5(df_sbrtORrfa, ofile_os, 'OSEvent', 'TTDy')
#
# USE INDICES FROM OS FOR PFS
df_PFS = pd.read_csv(args.ifile_pfs)
df_sbrtVSnone_pfs = {
'train': df_PFS.loc[df_sbrtVSnone['train'].index].drop(columns=drop_sbrtVS),
'test': df_PFS.loc[df_sbrtVSnone['test'].index].drop(columns=drop_sbrtVS)
}
df_rfaVSnone_pfs = {
'train': df_PFS.loc[df_rfaVSnone['train'].index].drop(columns=drop_rfaVS),
'test': df_PFS.loc[df_rfaVSnone['test'].index].drop(columns=drop_rfaVS)
}
df_sbrtVSrfa_pfs = {
'train': df_PFS.loc[df_sbrtVSrfa['train'].index].drop(columns=drop_sbrtVS),
'test': df_PFS.loc[df_sbrtVSrfa['test'].index].drop(columns=drop_sbrtVS)
}
df_sbrtORrfa_pfs = {
'train': df_PFS.loc[df_sbrtORrfa['train'].index].drop(columns=drop_sbrtORrfa),
'test': df_PFS.loc[df_sbrtORrfa['test'].index].drop(columns=drop_sbrtORrfa)
}
ofile_pfs = os.path.join(os.path.dirname(args.ifile_os), 'liver_pfs_sbrtVSnone.hd5')
dataframes_to_hd5(df_sbrtVSnone_pfs, ofile_pfs, 'PFSEvent', 'TTPy')
ofile_pfs = os.path.join(os.path.dirname(args.ifile_os), 'liver_pfs_rfaVSnone.hd5')
dataframes_to_hd5(df_rfaVSnone_pfs, ofile_pfs, 'PFSEvent', 'TTPy')
ofile_pfs = os.path.join(os.path.dirname(args.ifile_os), 'liver_pfs_sbrtVSrfa.hd5')
dataframes_to_hd5(df_sbrtVSrfa_pfs, ofile_pfs, 'PFSEvent', 'TTPy')
ofile_pfs = os.path.join(os.path.dirname(args.ifile_os), 'liver_pfs_sbrtORrfa.hd5')
dataframes_to_hd5(df_sbrtORrfa_pfs, ofile_pfs, 'PFSEvent', 'TTPy')
| 41.474453
| 118
| 0.656987
|
import h5py
import numpy as np
import pandas as pd
import os
from argparse import ArgumentParser
def dataframe_to_deepsurv_ds(df, event_col='Event', time_col='Time'):
# Extract the event and time columns as numpy arrays
e = df[event_col].values.astype(np.int32)
t = df[time_col].values.astype(np.float32)
# Extract the patient's covariates as a numpy array
x_df = df.drop([event_col, time_col], axis=1)
x = x_df.values.astype(np.float32)
# Return the deep surv dataframe
return {
'x': x,
'e': e,
't': t
}
def dataframes_to_hd5(df, ofile, event_col, time_col):
with h5py.File(ofile, 'w') as h:
for k in df:
ds = dataframe_to_deepsurv_ds(df[k], event_col, time_col)
group = h.create_group(k)
group.create_dataset('x', data=ds['x'])
group.create_dataset('e', data=ds['e'])
group.create_dataset('t', data=ds['t'])
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('ifile_os')
parser.add_argument('ifile_pfs')
# parser.add_argument('-e', '--event_col', default='OSEvent')
# parser.add_argument('-t', '--time_col', default='TTDy')
# parser.add_argument('--txcol', type=str, default='SBRT')
# parser.add_argument('--drop', help='drop columns', nargs='+', type=str)
# parser.add_argument('--droprows', help='drop rows where [cols] have value --droprowsval', nargs='+', type=str)
# parser.add_argument(
# '--droprowsval', help='value at which to drop the rows from --droprows, default 1', type=int, default=1)
# parser.add_argument('--droprows2', help='drop rows where [cols] have value --droprowsval2', nargs='+', type=str)
# parser.add_argument(
# '--droprowsval2', help='value at which to drop the rows from --droprows2, default 0', type=int, default=0)
args = parser.parse_args()
print(args)
df = pd.read_csv(args.ifile_os)
# print(df)
drop_sbrtVS = ['Treatment', 'RFA', 'SBRT_OR_RFA']
drop_rfaVS = ['Treatment', 'SBRT', 'SBRT_OR_RFA']
drop_sbrtORrfa = ['Treatment', 'SBRT', 'RFA']
#
# THIS IS FOR OS FIRST
frac = 0.5
ds = {
'SBRT_train': df[df.SBRT == 1].sample(frac=frac),
'RFA_train': df[df.RFA == 1].sample(frac=frac),
'NONE_train': df[df.SBRT_OR_RFA == 0].sample(frac=frac)
}
ds |= {
'SBRT_test': df.loc[df[df.SBRT == 1].index.symmetric_difference(ds['SBRT_train'].index)],
'RFA_test': df.loc[df[df.RFA == 1].index.symmetric_difference(ds['RFA_train'].index)],
'NONE_test': df.loc[df[df.SBRT_OR_RFA == 0].index.symmetric_difference(ds['NONE_train'].index)],
}
df_sbrtVSnone = {
'train': pd.concat([ds['SBRT_train'], ds['NONE_train']]).drop(columns=drop_sbrtVS),
'test': pd.concat([ds['SBRT_test'], ds['NONE_test']]).drop(columns=drop_sbrtVS)
}
df_rfaVSnone = {
'train': pd.concat([ds['RFA_train'], ds['NONE_train']]).drop(columns=drop_rfaVS),
'test': pd.concat([ds['RFA_test'], ds['NONE_test']]).drop(columns=drop_rfaVS)
}
df_sbrtVSrfa = {
'train': pd.concat([ds['SBRT_train'], ds['RFA_train']]).drop(columns=drop_sbrtVS),
'test': pd.concat([ds['SBRT_test'], ds['RFA_test']]).drop(columns=drop_sbrtVS)
}
df_sbrtORrfa = {
'train': pd.concat([ds['SBRT_train'], ds['RFA_train'], ds['NONE_train']]).drop(columns=drop_sbrtORrfa),
'test': pd.concat([ds['SBRT_test'], ds['RFA_test'], ds['NONE_test']]).drop(columns=drop_sbrtORrfa)
}
ofile_os = os.path.join(os.path.dirname(args.ifile_os), 'liver_os_sbrtVSnone.hd5')
dataframes_to_hd5(df_sbrtVSnone, ofile_os, 'OSEvent', 'TTDy')
ofile_os = os.path.join(os.path.dirname(args.ifile_os), 'liver_os_rfaVSnone.hd5')
dataframes_to_hd5(df_rfaVSnone, ofile_os, 'OSEvent', 'TTDy')
ofile_os = os.path.join(os.path.dirname(args.ifile_os), 'liver_os_sbrtVSrfa.hd5')
dataframes_to_hd5(df_sbrtVSrfa, ofile_os, 'OSEvent', 'TTDy')
ofile_os = os.path.join(os.path.dirname(args.ifile_os), 'liver_os_sbrtORrfa.hd5')
dataframes_to_hd5(df_sbrtORrfa, ofile_os, 'OSEvent', 'TTDy')
#
# USE INDICES FROM OS FOR PFS
df_PFS = pd.read_csv(args.ifile_pfs)
df_sbrtVSnone_pfs = {
'train': df_PFS.loc[df_sbrtVSnone['train'].index].drop(columns=drop_sbrtVS),
'test': df_PFS.loc[df_sbrtVSnone['test'].index].drop(columns=drop_sbrtVS)
}
df_rfaVSnone_pfs = {
'train': df_PFS.loc[df_rfaVSnone['train'].index].drop(columns=drop_rfaVS),
'test': df_PFS.loc[df_rfaVSnone['test'].index].drop(columns=drop_rfaVS)
}
df_sbrtVSrfa_pfs = {
'train': df_PFS.loc[df_sbrtVSrfa['train'].index].drop(columns=drop_sbrtVS),
'test': df_PFS.loc[df_sbrtVSrfa['test'].index].drop(columns=drop_sbrtVS)
}
df_sbrtORrfa_pfs = {
'train': df_PFS.loc[df_sbrtORrfa['train'].index].drop(columns=drop_sbrtORrfa),
'test': df_PFS.loc[df_sbrtORrfa['test'].index].drop(columns=drop_sbrtORrfa)
}
ofile_pfs = os.path.join(os.path.dirname(args.ifile_os), 'liver_pfs_sbrtVSnone.hd5')
dataframes_to_hd5(df_sbrtVSnone_pfs, ofile_pfs, 'PFSEvent', 'TTPy')
ofile_pfs = os.path.join(os.path.dirname(args.ifile_os), 'liver_pfs_rfaVSnone.hd5')
dataframes_to_hd5(df_rfaVSnone_pfs, ofile_pfs, 'PFSEvent', 'TTPy')
ofile_pfs = os.path.join(os.path.dirname(args.ifile_os), 'liver_pfs_sbrtVSrfa.hd5')
dataframes_to_hd5(df_sbrtVSrfa_pfs, ofile_pfs, 'PFSEvent', 'TTPy')
ofile_pfs = os.path.join(os.path.dirname(args.ifile_os), 'liver_pfs_sbrtORrfa.hd5')
dataframes_to_hd5(df_sbrtORrfa_pfs, ofile_pfs, 'PFSEvent', 'TTPy')
| 803
| 0
| 46
|
5802aafb5a9a958a3bfd48a3c740eee759d724b5
| 1,517
|
py
|
Python
|
francisco-topo/lit101.py
|
Cyphysecurity/ICS-SDN-1
|
c04d9e7bb7ad945166e969e071a2f82fb5bd18bf
|
[
"MIT"
] | 4
|
2019-12-17T08:59:57.000Z
|
2022-01-09T19:52:27.000Z
|
francisco-topo/lit101.py
|
Cyphysecurity/ICS-SDN-1
|
c04d9e7bb7ad945166e969e071a2f82fb5bd18bf
|
[
"MIT"
] | 3
|
2020-08-13T16:05:46.000Z
|
2021-10-17T07:49:33.000Z
|
francisco-topo/lit101.py
|
Cyphysecurity/ICS-SDN-1
|
c04d9e7bb7ad945166e969e071a2f82fb5bd18bf
|
[
"MIT"
] | 4
|
2017-06-14T23:41:50.000Z
|
2021-03-01T18:54:03.000Z
|
from minicps.devices import PLC
from utils import *
import random
import logging
import time
import socket
import json
import select
import signal
import sys
SENSOR_ADDR = IP['lit101']
LIT101 = ('LIT101', 1)
LIT102 = ('LIT102', 1)
if __name__ == '__main__':
lit101 = Lit101(name='lit101',state=STATE,protocol=LIT101_PROTOCOL,memory=GENERIC_DATA,disk=GENERIC_DATA)
| 27.089286
| 109
| 0.633487
|
from minicps.devices import PLC
from utils import *
import random
import logging
import time
import socket
import json
import select
import signal
import sys
SENSOR_ADDR = IP['lit101']
LIT101 = ('LIT101', 1)
LIT102 = ('LIT102', 1)
class Lit101(PLC):
def sigint_handler(self, sig, frame):
print "I received a SIGINT!"
sys.exit(0)
def pre_loop(self, sleep=0.1):
signal.signal(signal.SIGINT, self.sigint_handler)
signal.signal(signal.SIGTERM, self.sigint_handler)
logging.basicConfig(filename=LOG_LIT101_FILE, level=logging.DEBUG)
def send_message(self, ipaddr, port, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ipaddr, port))
msg_dict = dict.fromkeys(['Type', 'Variable'])
msg_dict['Type'] = "Report"
msg_dict['Variable'] = message
message = json.dumps(str(msg_dict))
try:
ready_to_read, ready_to_write, in_error = select.select([sock, ], [sock, ], [], 5)
except:
print "Socket error"
return
if(ready_to_write > 0):
sock.send(message)
sock.close()
def main_loop(self):
count = 0
while True:
self.level = float(self.get(LIT101))
self.send_message(IP['plc101'], 8754,self.level)
time.sleep(0.0005)
if __name__ == '__main__':
lit101 = Lit101(name='lit101',state=STATE,protocol=LIT101_PROTOCOL,memory=GENERIC_DATA,disk=GENERIC_DATA)
| 1,015
| -3
| 130
|
25d702540d9307d9ec2607e46d357ccf5efcd861
| 268
|
py
|
Python
|
workbench/logbook/migrations/0015_remove_loggedcost_project.py
|
yoshson/workbench
|
701558cac3357cd82e4dc99f0fefed12ee81ddc5
|
[
"MIT"
] | 15
|
2020-09-02T22:17:34.000Z
|
2022-02-01T20:09:10.000Z
|
workbench/logbook/migrations/0015_remove_loggedcost_project.py
|
yoshson/workbench
|
701558cac3357cd82e4dc99f0fefed12ee81ddc5
|
[
"MIT"
] | 18
|
2020-01-08T15:28:26.000Z
|
2022-02-28T02:46:41.000Z
|
workbench/logbook/migrations/0015_remove_loggedcost_project.py
|
yoshson/workbench
|
701558cac3357cd82e4dc99f0fefed12ee81ddc5
|
[
"MIT"
] | 8
|
2020-09-29T08:00:24.000Z
|
2022-01-16T11:58:19.000Z
|
# Generated by Django 2.2.5 on 2019-09-15 10:58
from django.db import migrations
| 24.363636
| 82
| 0.742537
|
# Generated by Django 2.2.5 on 2019-09-15 10:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("logbook", "0014_auto_20190915_1250")]
operations = [migrations.RemoveField(model_name="loggedcost", name="project")]
| 0
| 162
| 23
|
e2a9aa8f84f161a41350b5252f7f5caf4c6451b5
| 10,445
|
py
|
Python
|
test.py
|
Michal-Hubert/backup_dropbox
|
6c9c622ce389e789f0eb7aaad16d1ac0b2f688c5
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
Michal-Hubert/backup_dropbox
|
6c9c622ce389e789f0eb7aaad16d1ac0b2f688c5
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
Michal-Hubert/backup_dropbox
|
6c9c622ce389e789f0eb7aaad16d1ac0b2f688c5
|
[
"Apache-2.0"
] | null | null | null |
'''
End to end testing
'''
import shutil
import os
from ConfigParser import SafeConfigParser
import textwrap
import hashlib
import pexpect
import sys
import leveldb
import backup
import tempfile
from backup import dump_database
from myutils import path_leaf
TEST_CONFIG_NAME = "config"
TEST_DROPBOX_NAME = "dropbox"
TEST_TO_BACKUP_DIR = "to_backup"
TEST_DATABASE_DIR = "db"
TEST_PATH = os.path.dirname(os.path.abspath(__file__)) + '/test/'
#Path wehre db will be recovered
DB_RECOVERY_PATH = os.path.dirname(os.path.abspath(__file__)) + '/foo/'
PASSWORD = "alamakota"
BACKUP_TOOL = "python ./ds.py "
def reset_dropbox_dir(config):
'''
Remove dropbox dropbox directory
'''
dropbox_dir = config.get('DropBox', 'drop_box_dir')
shutil.rmtree(dropbox_dir)
os.makedirs(dropbox_dir)
def reset_db(config):
'''
Remove databse
'''
db_dir = config.get('DB', 'db_path')
shutil.rmtree(db_dir)
os.makedirs(db_dir)
def reset_to_backup_dir(config):
'''
Remove directory to backed up
'''
to_backup_dir = config.get('Backup', 'to_backup')
shutil.rmtree(to_backup_dir)
os.makedirs(to_backup_dir)
def create_test_config():
'''
Creates config file for testing purposes
'''
aa = """\
[Credentials]
password=alamakota
[DropBox]
drop_box_dir = {drop_box_dir}
[DB]
#Database MUST not be in backed up folder as it's being changed
#during backup process. Program backs up databse itself by
#gzipping, encryptying and putting in drobox folder as "dbmapping"
db_path = {db_path}
[Backup]
to_backup = {to_backup}
to_exclude =
"""
context = {
"drop_box_dir":TEST_PATH+TEST_DROPBOX_NAME+"/",
"db_path":TEST_PATH+TEST_DATABASE_DIR+"/",
"to_backup":TEST_PATH+TEST_TO_BACKUP_DIR+"/",
"passord":PASSWORD
}
if not os.path.exists(TEST_PATH):
os.makedirs(TEST_PATH)
with open(TEST_PATH+'/'+TEST_CONFIG_NAME, 'w') as cfgfile:
cfgfile.write(textwrap.dedent(aa.format(**context)))
if not os.path.exists(context["drop_box_dir"]):
os.makedirs(context["drop_box_dir"])
if not os.path.exists(context["db_path"]):
os.makedirs(context["db_path"])
if not os.path.exists(context["to_backup"]):
os.makedirs(context["to_backup"])
if not os.path.exists(DB_RECOVERY_PATH):
os.makedirs(DB_RECOVERY_PATH)
def clear(config):
'''
Clears all created files, directories and exits
'''
db_dir = config.get('DB', 'db_path')
if os.path.exists(db_dir):
shutil.rmtree(db_dir)
dropbox_dir = config.get('DropBox', 'drop_box_dir')
if os.path.exists(dropbox_dir):
shutil.rmtree(dropbox_dir)
to_backup_dir = config.get('Backup', 'to_backup')
if os.path.exists(to_backup_dir):
shutil.rmtree(to_backup_dir)
os.remove(TEST_PATH+TEST_CONFIG_NAME)
def clear_all():
'''
Remove all in one shot
'''
if os.path.exists(TEST_PATH):
shutil.rmtree(TEST_PATH)
if os.path.exists(DB_RECOVERY_PATH):
shutil.rmtree(DB_RECOVERY_PATH)
def test1():
'''
Initially dropbox folder is empty and databse is empty.
Check if file is backed up and restored correctly
'''
print '-'*50
print "Test 1"
clear_all()
create_test_config()
config = SafeConfigParser()
config.read(TEST_PATH+TEST_CONFIG_NAME)
file_to_backup = config.get('Backup', 'to_backup')+'/some_file'
with open(file_to_backup, 'w') as some_file:
some_file.write('Very first line')
checksum_before = hashlib.md5(open(file_to_backup, 'rb').read()).hexdigest()
#Backup
os.system(BACKUP_TOOL + "backup -c" + TEST_PATH + "/" + TEST_CONFIG_NAME)
#Restore
os.remove(file_to_backup)
child = pexpect.spawn(BACKUP_TOOL + "restore -d" + TEST_PATH+
TEST_DROPBOX_NAME + " -r" + DB_RECOVERY_PATH)
child.expect('Password: ')
child.sendline(PASSWORD)
print child.read()
checksum_after = hashlib.md5(open(file_to_backup, 'rb').read()).hexdigest()
if checksum_before != checksum_after:
print "Test 1 failed!"
sys.exit(1)
else:
print "Test 1 ok"
def test2():
'''
Initally dropbox folder is empty and database is empty.
Check if modified file is backed up and restored correctly.
'''
print '-'*50
print "Test 2"
clear_all()
create_test_config()
config = SafeConfigParser()
config.read(TEST_PATH+TEST_CONFIG_NAME)
file_to_backup = config.get('Backup', 'to_backup')+'/some_file'
with open(file_to_backup, 'w') as some_file:
some_file.write('Very first line')
#Backup
os.system(BACKUP_TOOL + "backup -c" + TEST_PATH + "/" + TEST_CONFIG_NAME)
#Modification
with open(file_to_backup, 'a') as some_file:
some_file.write('The second line')
checksum_before = hashlib.md5(open(file_to_backup, 'rb').read()).hexdigest()
#Backup
os.system(BACKUP_TOOL + "backup -c" + TEST_PATH + "/" + TEST_CONFIG_NAME)
#Restore
os.remove(file_to_backup)
child = pexpect.spawn(BACKUP_TOOL + "restore -d " + TEST_PATH+
TEST_DROPBOX_NAME + " -r " + DB_RECOVERY_PATH)
child.expect('Password: ')
child.sendline(PASSWORD)
print child.read()
checksum_after = hashlib.md5(open(file_to_backup, 'rb').read()).hexdigest()
if checksum_before != checksum_after:
print "Test 2 failed!"
sys.exit(1)
db = leveldb.LevelDB(config.get('DB', 'db_path'))
dump_file = os.path.join(tempfile.gettempdir(), '') + 'foo'
entries = dump_database(db, dump_file)
if entries != 1:
print "Test 2 failed!"
sys.exit(1)
print "Test 2 ok"
def test3():
'''
Initally dropbox folder is empty and database is empty.
Check if deleted files in source folder is deleted from dropbox as well
'''
print '-'*50
print "Test 3"
clear_all()
create_test_config()
config = SafeConfigParser()
config.read(TEST_PATH+TEST_CONFIG_NAME)
file_to_backup = config.get('Backup', 'to_backup')+'/some_file'
with open(file_to_backup, 'w') as some_file:
some_file.write('Very first line')
#Backup
os.system(BACKUP_TOOL + "backup -c" + TEST_PATH + "/" + TEST_CONFIG_NAME)
#Delete
os.remove(file_to_backup)
#Backup again
os.system(BACKUP_TOOL + "backup -c" + TEST_PATH + "/" + TEST_CONFIG_NAME)
#Check if file doesn't exist
for subdir, dirs, files in os.walk(config.get('DropBox','drop_box_dir')):
for f in files:
#Meta entry is not in database
if path_leaf(f) == backup.META_DB_NAME + ".xyz":
continue
else:
print "Test 3 failed - deleted file exists in dropbox folder"
sys.exit(1)
#Database shall not contain any entry
entries = 0
db = leveldb.LevelDB(config.get('DB', 'db_path'))
dump_file = os.path.join(tempfile.gettempdir(), '') + 'foo'
entries = dump_database(db, dump_file)
if entries != 0:
print "Test 3 failed - deleted file has entry in database"
sys.exit(1)
print "Test 3 ok"
def test4():
'''
Initally dropbox folder is empty and database is empty.
Check if deleted source folder (which contains files) causes deletion
of this files from dropbox folder.
'''
print '-'*50
print "Test 4"
clear_all()
create_test_config()
config = SafeConfigParser()
config.read(TEST_PATH+TEST_CONFIG_NAME)
directory_to_backup = config.get('Backup', 'to_backup')+'/some_dir'
os.makedirs(directory_to_backup)
file_to_backup = config.get('Backup', 'to_backup')+'/some_dir/some_file1'
with open(file_to_backup, 'w') as some_file:
some_file.write('Very first line')
file_to_backup = config.get('Backup', 'to_backup')+'/some_dir/some_file2'
with open(file_to_backup, 'w') as some_file:
some_file.write('The other very first line')
#Backup
os.system(BACKUP_TOOL + "backup -c" + TEST_PATH + "/" + TEST_CONFIG_NAME)
#Delete
shutil.rmtree(directory_to_backup)
#Backup again
os.system(BACKUP_TOOL + "backup -c" + TEST_PATH + "/" + TEST_CONFIG_NAME)
#Check if file doesn't exist
for subdir, dirs, files in os.walk(config.get('DropBox','drop_box_dir')):
for f in files:
#Meta entry is not in database
if path_leaf(f) == backup.META_DB_NAME + ".xyz":
continue
else:
print "Test 4 failed - deleted file exists in dropbox folder"
sys.exit(1)
#Database shall not contain any entry
entries = 0
db = leveldb.LevelDB(config.get('DB', 'db_path'))
dump_file = os.path.join(tempfile.gettempdir(), '') + 'foo'
entries = dump_database(db, dump_file)
if entries != 0:
print "Test 4 failed - deleted file has entry in database"
sys.exit(1)
print "Test 4 ok"
def test5():
'''
Initially dropbox folder is empty and databse is empty.
Check if file is backed + directory up and restored correctly.
It's very much alike test1 but here file is in subdirectory of
backed up directory
'''
print '-'*50
print "Test 5"
clear_all()
create_test_config()
config = SafeConfigParser()
config.read(TEST_PATH+TEST_CONFIG_NAME)
subdir = config.get('Backup', 'to_backup')+'/dir/'
os.makedirs(subdir)
file_to_backup = subdir + 'some_file'
with open(file_to_backup, 'w') as some_file:
some_file.write('Very first line')
checksum_before = hashlib.md5(open(file_to_backup, 'rb').read()).hexdigest()
#Backup
os.system(BACKUP_TOOL + "backup -c" + TEST_PATH + "/" + TEST_CONFIG_NAME)
#Restore
os.remove(file_to_backup)
child = pexpect.spawn(BACKUP_TOOL + "restore -d" + TEST_PATH+
TEST_DROPBOX_NAME + " -r" + DB_RECOVERY_PATH)
child.expect('Password: ')
child.sendline(PASSWORD)
print child.read()
checksum_after = hashlib.md5(open(file_to_backup, 'rb').read()).hexdigest()
if checksum_before != checksum_after:
print "Test 5 failed!"
sys.exit(1)
else:
print "Test 5 ok"
def run_tests():
'''
Run test cases
'''
test1()
test2()
test3()
test4()
test5()
clear_all()
if __name__ == "__main__":
#TEST_PATH = os.path.dirname(os.path.abspath(__file__)) + '/test/'
run_tests()
| 29.094708
| 80
| 0.650742
|
'''
End to end testing
'''
import shutil
import os
from ConfigParser import SafeConfigParser
import textwrap
import hashlib
import pexpect
import sys
import leveldb
import backup
import tempfile
from backup import dump_database
from myutils import path_leaf
TEST_CONFIG_NAME = "config"
TEST_DROPBOX_NAME = "dropbox"
TEST_TO_BACKUP_DIR = "to_backup"
TEST_DATABASE_DIR = "db"
TEST_PATH = os.path.dirname(os.path.abspath(__file__)) + '/test/'
#Path wehre db will be recovered
DB_RECOVERY_PATH = os.path.dirname(os.path.abspath(__file__)) + '/foo/'
PASSWORD = "alamakota"
BACKUP_TOOL = "python ./ds.py "
def reset_dropbox_dir(config):
'''
Remove dropbox dropbox directory
'''
dropbox_dir = config.get('DropBox', 'drop_box_dir')
shutil.rmtree(dropbox_dir)
os.makedirs(dropbox_dir)
def reset_db(config):
'''
Remove databse
'''
db_dir = config.get('DB', 'db_path')
shutil.rmtree(db_dir)
os.makedirs(db_dir)
def reset_to_backup_dir(config):
'''
Remove directory to backed up
'''
to_backup_dir = config.get('Backup', 'to_backup')
shutil.rmtree(to_backup_dir)
os.makedirs(to_backup_dir)
def create_test_config():
'''
Creates config file for testing purposes
'''
aa = """\
[Credentials]
password=alamakota
[DropBox]
drop_box_dir = {drop_box_dir}
[DB]
#Database MUST not be in backed up folder as it's being changed
#during backup process. Program backs up databse itself by
#gzipping, encryptying and putting in drobox folder as "dbmapping"
db_path = {db_path}
[Backup]
to_backup = {to_backup}
to_exclude =
"""
context = {
"drop_box_dir":TEST_PATH+TEST_DROPBOX_NAME+"/",
"db_path":TEST_PATH+TEST_DATABASE_DIR+"/",
"to_backup":TEST_PATH+TEST_TO_BACKUP_DIR+"/",
"passord":PASSWORD
}
if not os.path.exists(TEST_PATH):
os.makedirs(TEST_PATH)
with open(TEST_PATH+'/'+TEST_CONFIG_NAME, 'w') as cfgfile:
cfgfile.write(textwrap.dedent(aa.format(**context)))
if not os.path.exists(context["drop_box_dir"]):
os.makedirs(context["drop_box_dir"])
if not os.path.exists(context["db_path"]):
os.makedirs(context["db_path"])
if not os.path.exists(context["to_backup"]):
os.makedirs(context["to_backup"])
if not os.path.exists(DB_RECOVERY_PATH):
os.makedirs(DB_RECOVERY_PATH)
def clear(config):
'''
Clears all created files, directories and exits
'''
db_dir = config.get('DB', 'db_path')
if os.path.exists(db_dir):
shutil.rmtree(db_dir)
dropbox_dir = config.get('DropBox', 'drop_box_dir')
if os.path.exists(dropbox_dir):
shutil.rmtree(dropbox_dir)
to_backup_dir = config.get('Backup', 'to_backup')
if os.path.exists(to_backup_dir):
shutil.rmtree(to_backup_dir)
os.remove(TEST_PATH+TEST_CONFIG_NAME)
def clear_all():
'''
Remove all in one shot
'''
if os.path.exists(TEST_PATH):
shutil.rmtree(TEST_PATH)
if os.path.exists(DB_RECOVERY_PATH):
shutil.rmtree(DB_RECOVERY_PATH)
def test1():
'''
Initially dropbox folder is empty and databse is empty.
Check if file is backed up and restored correctly
'''
print '-'*50
print "Test 1"
clear_all()
create_test_config()
config = SafeConfigParser()
config.read(TEST_PATH+TEST_CONFIG_NAME)
file_to_backup = config.get('Backup', 'to_backup')+'/some_file'
with open(file_to_backup, 'w') as some_file:
some_file.write('Very first line')
checksum_before = hashlib.md5(open(file_to_backup, 'rb').read()).hexdigest()
#Backup
os.system(BACKUP_TOOL + "backup -c" + TEST_PATH + "/" + TEST_CONFIG_NAME)
#Restore
os.remove(file_to_backup)
child = pexpect.spawn(BACKUP_TOOL + "restore -d" + TEST_PATH+
TEST_DROPBOX_NAME + " -r" + DB_RECOVERY_PATH)
child.expect('Password: ')
child.sendline(PASSWORD)
print child.read()
checksum_after = hashlib.md5(open(file_to_backup, 'rb').read()).hexdigest()
if checksum_before != checksum_after:
print "Test 1 failed!"
sys.exit(1)
else:
print "Test 1 ok"
def test2():
'''
Initally dropbox folder is empty and database is empty.
Check if modified file is backed up and restored correctly.
'''
print '-'*50
print "Test 2"
clear_all()
create_test_config()
config = SafeConfigParser()
config.read(TEST_PATH+TEST_CONFIG_NAME)
file_to_backup = config.get('Backup', 'to_backup')+'/some_file'
with open(file_to_backup, 'w') as some_file:
some_file.write('Very first line')
#Backup
os.system(BACKUP_TOOL + "backup -c" + TEST_PATH + "/" + TEST_CONFIG_NAME)
#Modification
with open(file_to_backup, 'a') as some_file:
some_file.write('The second line')
checksum_before = hashlib.md5(open(file_to_backup, 'rb').read()).hexdigest()
#Backup
os.system(BACKUP_TOOL + "backup -c" + TEST_PATH + "/" + TEST_CONFIG_NAME)
#Restore
os.remove(file_to_backup)
child = pexpect.spawn(BACKUP_TOOL + "restore -d " + TEST_PATH+
TEST_DROPBOX_NAME + " -r " + DB_RECOVERY_PATH)
child.expect('Password: ')
child.sendline(PASSWORD)
print child.read()
checksum_after = hashlib.md5(open(file_to_backup, 'rb').read()).hexdigest()
if checksum_before != checksum_after:
print "Test 2 failed!"
sys.exit(1)
db = leveldb.LevelDB(config.get('DB', 'db_path'))
dump_file = os.path.join(tempfile.gettempdir(), '') + 'foo'
entries = dump_database(db, dump_file)
if entries != 1:
print "Test 2 failed!"
sys.exit(1)
print "Test 2 ok"
def test3():
'''
Initally dropbox folder is empty and database is empty.
Check if deleted files in source folder is deleted from dropbox as well
'''
print '-'*50
print "Test 3"
clear_all()
create_test_config()
config = SafeConfigParser()
config.read(TEST_PATH+TEST_CONFIG_NAME)
file_to_backup = config.get('Backup', 'to_backup')+'/some_file'
with open(file_to_backup, 'w') as some_file:
some_file.write('Very first line')
#Backup
os.system(BACKUP_TOOL + "backup -c" + TEST_PATH + "/" + TEST_CONFIG_NAME)
#Delete
os.remove(file_to_backup)
#Backup again
os.system(BACKUP_TOOL + "backup -c" + TEST_PATH + "/" + TEST_CONFIG_NAME)
#Check if file doesn't exist
for subdir, dirs, files in os.walk(config.get('DropBox','drop_box_dir')):
for f in files:
#Meta entry is not in database
if path_leaf(f) == backup.META_DB_NAME + ".xyz":
continue
else:
print "Test 3 failed - deleted file exists in dropbox folder"
sys.exit(1)
#Database shall not contain any entry
entries = 0
db = leveldb.LevelDB(config.get('DB', 'db_path'))
dump_file = os.path.join(tempfile.gettempdir(), '') + 'foo'
entries = dump_database(db, dump_file)
if entries != 0:
print "Test 3 failed - deleted file has entry in database"
sys.exit(1)
print "Test 3 ok"
def test4():
'''
Initally dropbox folder is empty and database is empty.
Check if deleted source folder (which contains files) causes deletion
of this files from dropbox folder.
'''
print '-'*50
print "Test 4"
clear_all()
create_test_config()
config = SafeConfigParser()
config.read(TEST_PATH+TEST_CONFIG_NAME)
directory_to_backup = config.get('Backup', 'to_backup')+'/some_dir'
os.makedirs(directory_to_backup)
file_to_backup = config.get('Backup', 'to_backup')+'/some_dir/some_file1'
with open(file_to_backup, 'w') as some_file:
some_file.write('Very first line')
file_to_backup = config.get('Backup', 'to_backup')+'/some_dir/some_file2'
with open(file_to_backup, 'w') as some_file:
some_file.write('The other very first line')
#Backup
os.system(BACKUP_TOOL + "backup -c" + TEST_PATH + "/" + TEST_CONFIG_NAME)
#Delete
shutil.rmtree(directory_to_backup)
#Backup again
os.system(BACKUP_TOOL + "backup -c" + TEST_PATH + "/" + TEST_CONFIG_NAME)
#Check if file doesn't exist
for subdir, dirs, files in os.walk(config.get('DropBox','drop_box_dir')):
for f in files:
#Meta entry is not in database
if path_leaf(f) == backup.META_DB_NAME + ".xyz":
continue
else:
print "Test 4 failed - deleted file exists in dropbox folder"
sys.exit(1)
#Database shall not contain any entry
entries = 0
db = leveldb.LevelDB(config.get('DB', 'db_path'))
dump_file = os.path.join(tempfile.gettempdir(), '') + 'foo'
entries = dump_database(db, dump_file)
if entries != 0:
print "Test 4 failed - deleted file has entry in database"
sys.exit(1)
print "Test 4 ok"
def test5():
'''
Initially dropbox folder is empty and databse is empty.
Check if file is backed + directory up and restored correctly.
It's very much alike test1 but here file is in subdirectory of
backed up directory
'''
print '-'*50
print "Test 5"
clear_all()
create_test_config()
config = SafeConfigParser()
config.read(TEST_PATH+TEST_CONFIG_NAME)
subdir = config.get('Backup', 'to_backup')+'/dir/'
os.makedirs(subdir)
file_to_backup = subdir + 'some_file'
with open(file_to_backup, 'w') as some_file:
some_file.write('Very first line')
checksum_before = hashlib.md5(open(file_to_backup, 'rb').read()).hexdigest()
#Backup
os.system(BACKUP_TOOL + "backup -c" + TEST_PATH + "/" + TEST_CONFIG_NAME)
#Restore
os.remove(file_to_backup)
child = pexpect.spawn(BACKUP_TOOL + "restore -d" + TEST_PATH+
TEST_DROPBOX_NAME + " -r" + DB_RECOVERY_PATH)
child.expect('Password: ')
child.sendline(PASSWORD)
print child.read()
checksum_after = hashlib.md5(open(file_to_backup, 'rb').read()).hexdigest()
if checksum_before != checksum_after:
print "Test 5 failed!"
sys.exit(1)
else:
print "Test 5 ok"
def run_tests():
'''
Run test cases
'''
test1()
test2()
test3()
test4()
test5()
clear_all()
if __name__ == "__main__":
#TEST_PATH = os.path.dirname(os.path.abspath(__file__)) + '/test/'
run_tests()
| 0
| 0
| 0
|
8dd0fb258cc91c20a8c501666b3b19c5e498c313
| 5,170
|
py
|
Python
|
lookup.py
|
vlee489/Turnip-Bot
|
2571846607d6ca57171325211c5eb6572013c767
|
[
"MIT"
] | 5
|
2020-04-19T22:47:28.000Z
|
2020-06-01T04:37:12.000Z
|
lookup.py
|
vlee489/Turnip-Bot
|
2571846607d6ca57171325211c5eb6572013c767
|
[
"MIT"
] | 7
|
2020-04-21T23:25:20.000Z
|
2021-04-20T07:50:11.000Z
|
lookup.py
|
vlee489/Turnip-Bot
|
2571846607d6ca57171325211c5eb6572013c767
|
[
"MIT"
] | null | null | null |
"""
This cog deals all the lookup commands
"""
import asyncio
from discord.ext import commands, tasks
import discord
from lookupAPI import nookipediaAPI, localLookup
import os
from dotenv import load_dotenv
load_dotenv(".env")
| 44.956522
| 105
| 0.547002
|
"""
This cog deals all the lookup commands
"""
import asyncio
from discord.ext import commands, tasks
import discord
from lookupAPI import nookipediaAPI, localLookup
import os
from dotenv import load_dotenv
load_dotenv(".env")
class Lookup(commands.Cog):
nookAPI = nookipediaAPI.NookipediaAPI(os.environ.get("nookipedia_API_key"))
localAPI = localLookup.LocalLookup()
def __init__(self, bot):
self.bot = bot
@tasks.loop(hours=6)
async def updateLists(self):
await self.localAPI.updateData()
await self.nookAPI.clearOutdatedCache()
@commands.command(name='villager',
help="Get an overview of a villager and their trites.\n"
"<villager>: The villager you want to search for",
aliases=['Villager', 'Villagers', 'villagers'])
async def villagerOverview(self, ctx, villager):
with ctx.typing():
villager = villager.title()
response = await self.nookAPI.getVillager(villager)
if response is None:
await ctx.send("Couldn't find villager\n"
"If the villager's name is in 2 part, use \" to enclose the name.\n"
"E.G. \"Agent S\"")
return
embeded = discord.Embed.from_dict(response.response)
await ctx.send(embed=embeded)
@commands.command(name='critter',
help="Get an overview of a critter and their trites.\n"
"<critter>: The critter you want to search for",
aliases=['bug', 'fish', 'Critter', 'Bug', 'Fish', 'critters', 'Critters'])
async def critterOverview(self, ctx, critter):
with ctx.typing():
critter = critter.title()
response = await self.nookAPI.getCritter(critter)
if response is None:
await ctx.send("Couldn't find critter\n"
"If the critter's name is in 2 part, use \" to enclose the name.\n"
"E.G. \"Banded Dragonfly\"")
return
embeded = discord.Embed.from_dict(response.response)
await ctx.send(embed=embeded)
@commands.command(name='fossil',
help="Get an overview of a fossil and their trites.\n"
"<fossil>: The fossil you want to search for",
aliases=['fossils', 'Fossil', 'Fossils'])
async def fossilOverview(self, ctx, fossil):
with ctx.typing():
fossil = fossil.title()
response = await self.nookAPI.getFossil(fossil)
if response is None:
await ctx.send("Couldn't find fossil\n"
"If the fossil's name is in 2 part, use \" to enclose the name.\n"
"E.G. \"Tyrannosaurus Rex\"")
return
embeded = discord.Embed.from_dict(response.response)
await ctx.send(embed=embeded)
@commands.command(name='eventsToday',
help="Get an overview of all the events on today in AC\n",
aliases=['eventstoday'])
async def todayOverview(self, ctx):
with ctx.typing():
events = await self.nookAPI.getToday()
if events is None:
await ctx.send("Unable to get events today >.<")
return
if len(events['events']) < 1: # We Check that there are events today first
await ctx.send("No events are on today in Animal Crossing\n")
return
embedded = discord.Embed(title='Events Today', description=events['message'], color=0xCF70D3)
embedded.set_author(name="Turnip Bot",
url="https://github.com/vlee489/Turnip-Bot/",
icon_url="https://cdn.vlee.me.uk/TurnipBot/icon.png")
for x in range(len(events['events'])): # For each event we add a field and the event info
embedded.add_field(name="Events {}:".format(x + 1), value=events['events'][x],
inline=False)
embedded.set_footer(text="Info from nookipedia.com",
icon_url="https://cdn.vlee.me.uk/TurnipBot/Nookipedia.png")
await ctx.send(embed=embedded)
@commands.command(name='DIY',
help="Get an overview of a DIY recipe\n"
"<diy>: The item you want to lookup",
aliases=['diy'])
async def DIYOverview(self, ctx, diy):
with ctx.typing():
response = await self.localAPI.getDiy(diy)
if response is None:
await ctx.send("Couldn't find DIY Recipe\n"
"If the recipe's name is in 2 part, use \" to enclose the name.\n"
"E.G. \"gold helmet\"")
return
embedded = discord.Embed.from_dict(response.response)
await ctx.send(embed=embedded)
def setup(bot):
bot.add_cog(Lookup(bot))
| 3,375
| 1,519
| 46
|
eda16405c98e51c8b837071049470bbd9e53cff0
| 1,068
|
py
|
Python
|
tests/extractors/test_azlyrics.py
|
marian-code/LyricsFinder
|
6a0c7fd3cf3fec6cc410b5d6d8a0e6ff9f48144a
|
[
"MIT"
] | 3
|
2019-03-06T18:20:36.000Z
|
2019-12-05T06:23:16.000Z
|
tests/extractors/test_azlyrics.py
|
marian-code/LyricsFinder
|
6a0c7fd3cf3fec6cc410b5d6d8a0e6ff9f48144a
|
[
"MIT"
] | 11
|
2018-05-04T10:49:39.000Z
|
2018-10-05T20:36:15.000Z
|
tests/extractors/test_azlyrics.py
|
GieselaDev/LyricsFinder
|
21afa8a6daf194ab0a77a4c0f5834c17eaca49f8
|
[
"MIT"
] | 2
|
2019-03-05T14:08:25.000Z
|
2019-08-29T08:02:38.000Z
|
import hashlib
import os
import pytest
from aiohttp import ClientSession
from lyricsfinder.extractors.azlyrics import AZLyrics
from lyricsfinder.utils import Request
| 38.142857
| 132
| 0.730337
|
import hashlib
import os
import pytest
from aiohttp import ClientSession
from lyricsfinder.extractors.azlyrics import AZLyrics
from lyricsfinder.utils import Request
class TestAZLyrics:
@pytest.mark.asyncio
async def test_can_handle(self):
async with ClientSession() as session:
assert await AZLyrics.can_handle(Request(session, "https://www.azlyrics.com/lyrics/edsheeran/theateam.html")) is True
@pytest.mark.skipif(os.environ.get("TRAVIS") == "true", reason="AZLyrics doesn't respond to Travis' servers. Don't ask me why!")
@pytest.mark.asyncio
async def test_extraction(self):
async with ClientSession() as session:
lyrics = await AZLyrics.extract_lyrics(Request(session, "https://www.azlyrics.com/lyrics/edsheeran/theateam.html"))
lyrics_hash = hashlib.sha256(lyrics.lyrics.encode("utf-8")).hexdigest()
assert lyrics_hash == "e77a63fb93b1d0f373b859963532e41a2dbf2d68d290bf3f919b93b174fe26e3"
assert lyrics.title == "The A Team"
assert lyrics.artist == "Ed Sheeran"
| 642
| 234
| 23
|
40ea3f70f9ff9d6bc077279cbd34054808da8844
| 2,000
|
py
|
Python
|
polling_stations/settings/constants/councils.py
|
smsmith97/UK-Polling-Stations
|
ecbd98cb99e89e97354da3960b0063aa36181b11
|
[
"BSD-3-Clause"
] | null | null | null |
polling_stations/settings/constants/councils.py
|
smsmith97/UK-Polling-Stations
|
ecbd98cb99e89e97354da3960b0063aa36181b11
|
[
"BSD-3-Clause"
] | null | null | null |
polling_stations/settings/constants/councils.py
|
smsmith97/UK-Polling-Stations
|
ecbd98cb99e89e97354da3960b0063aa36181b11
|
[
"BSD-3-Clause"
] | null | null | null |
# settings for councils scraper
BOUNDARIES_URL = "https://ons-cache.s3.amazonaws.com/Local_Authority_Districts_April_2019_Boundaries_UK_BFE.geojson"
EC_COUNCIL_CONTACT_DETAILS_API_URL = (
"https://electoralcommission.org.uk/api/v1/data/local-authorities.json"
)
OLD_TO_NEW_MAP = {}
NEW_COUNCILS = []
WELSH_COUNCIL_NAMES = {
"AGY": "Cyngor Sir Ynys Môn", # Isle of Anglesey County Council
"BGE": "Cyngor Bwrdeistref Sirol Pen-y-bont ar Ogwr", # Bridgend County Borough Council
"BGW": "Cyngor Bwrdeistref Sirol Blaenau Gwent", # Blaenau Gwent County Borough Council
"CAY": "Cyngor Bwrdeistref Sirol Caerffili", # Caerphilly County Borough Council
"CGN": "Cyngor Sir Ceredigion", # Ceredigion County Council
"CMN": "Cyngor Sir Gaerfyrddin", # Carmarthenshire County Council
"CRF": "Cyngor Caerdydd", # Cardiff Council
"CWY": "Cyngor Bwrdeistref Sirol Conwy", # Conwy County Borough Council
"DEN": "Cyngor Sir Ddinbych", # Denbighshire County Council
"FLN": "Cyngor Sir y Fflint", # Flintshire County Council
"GWN": "Cyngor Sir Gwynedd", # Gwynedd Council
"MON": "Cyngor Sir Fynwy", # Monmouthshire County Council
"MTY": "Cyngor Bwrdeistref Sirol Merthyr Tudful", # Merthyr Tydfil County Borough Council
"NTL": "Cyngor Bwrdeistref Sirol Castell-nedd Port Talbot", # Neath Port Talbot County Borough Council
"NWP": "Cyngor Dinas Casnewydd", # Newport City Council
"PEM": "Cyngor Sir Penfro", # Pembrokeshire County Council
"POW": "Cyngor Sir Powys", # Powys County Council
"RCT": "Cyngor Bwrdeistref Sirol Rhondda Cynon Taf", # Rhondda Cynon Taf County Borough Council
"SWA": "Cyngor Sir a Dinas Abertawe", # City and County of Swansea
"TOF": "Cyngor Bwrdeistref Sirol Torfaen", # Torfaen County Borough Council
"VGL": "Cyngor Bwrdeistref Sirol Bro Morgannwg", # The Vale of Glamorgan County Borough Council
"WRX": "Cyngor Bwrdeistref Sirol Wrecsam", # Wrexham County Borough Council
}
| 54.054054
| 116
| 0.722
|
# settings for councils scraper
BOUNDARIES_URL = "https://ons-cache.s3.amazonaws.com/Local_Authority_Districts_April_2019_Boundaries_UK_BFE.geojson"
EC_COUNCIL_CONTACT_DETAILS_API_URL = (
"https://electoralcommission.org.uk/api/v1/data/local-authorities.json"
)
OLD_TO_NEW_MAP = {}
NEW_COUNCILS = []
WELSH_COUNCIL_NAMES = {
"AGY": "Cyngor Sir Ynys Môn", # Isle of Anglesey County Council
"BGE": "Cyngor Bwrdeistref Sirol Pen-y-bont ar Ogwr", # Bridgend County Borough Council
"BGW": "Cyngor Bwrdeistref Sirol Blaenau Gwent", # Blaenau Gwent County Borough Council
"CAY": "Cyngor Bwrdeistref Sirol Caerffili", # Caerphilly County Borough Council
"CGN": "Cyngor Sir Ceredigion", # Ceredigion County Council
"CMN": "Cyngor Sir Gaerfyrddin", # Carmarthenshire County Council
"CRF": "Cyngor Caerdydd", # Cardiff Council
"CWY": "Cyngor Bwrdeistref Sirol Conwy", # Conwy County Borough Council
"DEN": "Cyngor Sir Ddinbych", # Denbighshire County Council
"FLN": "Cyngor Sir y Fflint", # Flintshire County Council
"GWN": "Cyngor Sir Gwynedd", # Gwynedd Council
"MON": "Cyngor Sir Fynwy", # Monmouthshire County Council
"MTY": "Cyngor Bwrdeistref Sirol Merthyr Tudful", # Merthyr Tydfil County Borough Council
"NTL": "Cyngor Bwrdeistref Sirol Castell-nedd Port Talbot", # Neath Port Talbot County Borough Council
"NWP": "Cyngor Dinas Casnewydd", # Newport City Council
"PEM": "Cyngor Sir Penfro", # Pembrokeshire County Council
"POW": "Cyngor Sir Powys", # Powys County Council
"RCT": "Cyngor Bwrdeistref Sirol Rhondda Cynon Taf", # Rhondda Cynon Taf County Borough Council
"SWA": "Cyngor Sir a Dinas Abertawe", # City and County of Swansea
"TOF": "Cyngor Bwrdeistref Sirol Torfaen", # Torfaen County Borough Council
"VGL": "Cyngor Bwrdeistref Sirol Bro Morgannwg", # The Vale of Glamorgan County Borough Council
"WRX": "Cyngor Bwrdeistref Sirol Wrecsam", # Wrexham County Borough Council
}
| 0
| 0
| 0
|
f7a75459af31c39231b2c99baeb705677f67cbf6
| 8,665
|
py
|
Python
|
.scripts/make_docs/event_tuples.py
|
daakru/X2WOTCCommunityHighlander
|
de544c30d98fd372b8798dd06adb6d3126fac66f
|
[
"MIT"
] | null | null | null |
.scripts/make_docs/event_tuples.py
|
daakru/X2WOTCCommunityHighlander
|
de544c30d98fd372b8798dd06adb6d3126fac66f
|
[
"MIT"
] | null | null | null |
.scripts/make_docs/event_tuples.py
|
daakru/X2WOTCCommunityHighlander
|
de544c30d98fd372b8798dd06adb6d3126fac66f
|
[
"MIT"
] | null | null | null |
from enum import Enum
from typing import Iterator, List, Optional, Tuple
import re
_PUNCTUATION = {
"(": _Token(_TokenType.LPAREN),
")": _Token(_TokenType.RPAREN),
"{": _Token(_TokenType.LBRACE),
"}": _Token(_TokenType.RBRACE),
"[": _Token(_TokenType.LBRACK),
"]": _Token(_TokenType.RBRACK),
",": _Token(_TokenType.COMMA),
":": _Token(_TokenType.COLON),
}
_KWS = {
"in": _Keyword.IN,
"out": _Keyword.OUT,
"inout": _Keyword.INOUT,
}
_STARTIDENTCHAR = re.compile(r"[A-Za-z]")
_IDENTCHAR = re.compile(r"[A-Za-z0-9\-_<>]")
class _peekable():
"Wrap iterator with lookahead to both peek and test exhausted"
# Code by Terry Jan Reedy, intentionally submitted to the python-ideas
# mailing list for inclusion into itertools.
# https://mail.python.org/pipermail//python-ideas/2013-February/019633.html
_NONE = object()
def _parse_type_sig(lex) -> (InOutness, str, str, Optional[str]):
"""
"inout bool bShow" -> (InOutness.INOUT, "bool", "bShow", None)
"in enum[EInventorySlot] Slot" -> (InOutness.IN, "enum", "Slot", "EInventorySlot")
"""
param_kind = _kw_to_inout(
_expect(lex, _TokenType.KW, "inoutness", "tuple param"))
tup_type = _expect(lex, _TokenType.IDENT, "type", "tuple param").ident
local_type = None
if _try_eat(lex, _TokenType.LBRACK):
local_type = _expect(lex, _TokenType.IDENT, "local type",
"tuple param").ident
_expect(lex, _TokenType.RBRACK, "inoutness", "tuple param")
name = _expect(lex, _TokenType.IDENT, "param name", "tuple param")
if name.ident.lower() == "self":
raise ParseError(f"{name} not a valid local variable name")
return param_kind, tup_type, name.ident, local_type
| 27.507937
| 89
| 0.560415
|
from enum import Enum
from typing import Iterator, List, Optional, Tuple
import re
class _TokenType(Enum):
LPAREN = 1
RPAREN = 2
LBRACE = 3
RBRACE = 4
LBRACK = 5
RBRACK = 6
KW = 7
IDENT = 8
COMMA = 9
COLON = 10
class _Keyword(Enum):
IN = 1
OUT = 2
INOUT = 3
class InOutness(Enum):
IN = 1
OUT = 2
INOUT = 3
def is_in(self) -> bool:
return self in [InOutness.IN, InOutness.INOUT]
def is_out(self) -> bool:
return self in [InOutness.OUT, InOutness.INOUT]
def __str__(self) -> str:
if self == InOutness.IN:
return "in"
elif self == InOutness.OUT:
return "out"
elif self == InOutness.INOUT:
return "inout"
else:
assert False, "unreachable"
class NewGameState(Enum):
YES = 1
NONE = 2
MAYBE = 3
def __str__(self) -> str:
if self == NewGameState.YES:
return "yes"
elif self == NewGameState.NONE:
return "none"
elif self == NewGameState.MAYBE:
return "maybe"
else:
assert False, "unreachable"
class EventSpec:
def __init__(self):
pass
class EventArg:
def __init__(self, ty):
self.type = ty
self.name = None
class _Token:
def __init__(self, type: _TokenType):
self.type = type
def __str__(self):
buf = str(self.type)
if self.type == _TokenType.KW:
buf += f" ({self.kw})"
elif self.type == _TokenType.IDENT:
buf += f" ({self.ident})"
return buf
_PUNCTUATION = {
"(": _Token(_TokenType.LPAREN),
")": _Token(_TokenType.RPAREN),
"{": _Token(_TokenType.LBRACE),
"}": _Token(_TokenType.RBRACE),
"[": _Token(_TokenType.LBRACK),
"]": _Token(_TokenType.RBRACK),
",": _Token(_TokenType.COMMA),
":": _Token(_TokenType.COLON),
}
_KWS = {
"in": _Keyword.IN,
"out": _Keyword.OUT,
"inout": _Keyword.INOUT,
}
_STARTIDENTCHAR = re.compile(r"[A-Za-z]")
_IDENTCHAR = re.compile(r"[A-Za-z0-9\-_<>]")
class ParseError(Exception):
def __init__(self, msg: str):
self.msg = msg
def _ident(pos: int, text: str, first: str) -> (int, str):
while pos < len(text) and _IDENTCHAR.match(text[pos]):
first += text[pos]
pos += 1
return (pos, first)
def _lex_event_spec(text: str) -> Iterator[_Token]:
pos = 0
while pos < len(text):
c = text[pos]
pos += 1
if c.isspace():
continue
if c in _PUNCTUATION:
yield _PUNCTUATION[c]
continue
if _STARTIDENTCHAR.match(c):
(pos, id) = _ident(pos, text, c)
if id in _KWS:
tok = _Token(_TokenType.KW)
tok.kw = _KWS[id]
else:
tok = _Token(_TokenType.IDENT)
tok.ident = id
yield tok
continue
raise ParseError(f"unknown start of token {c}")
def _expect(it, t: _TokenType, thing=None, ctx=None) -> _Token:
n = next(it)
ctx = f" while parsing {ctx}" if ctx is not None else ""
thing = f" ({thing})" if thing is not None else ""
if n is None or n.type != t:
err = " but code block ended" if n is None else f", found {str(n)}"
raise ParseError(f"expected {str(t)}{thing}{ctx}{err}")
return n
def _try_eat(it, t: _TokenType) -> _Token:
if it and it.peek.type == t:
return next(it)
return None
def _kw_to_inout(t: _Token) -> InOutness:
if t.kw == _Keyword.IN:
return InOutness.IN
elif t.kw == _Keyword.OUT:
return InOutness.OUT
elif t.kw == _Keyword.INOUT:
return InOutness.INOUT
class _peekable():
"Wrap iterator with lookahead to both peek and test exhausted"
# Code by Terry Jan Reedy, intentionally submitted to the python-ideas
# mailing list for inclusion into itertools.
# https://mail.python.org/pipermail//python-ideas/2013-February/019633.html
_NONE = object()
def __init__(self, iterable):
self._it = iter(iterable)
self._set_peek()
def __iter__(self):
return self
def __next__(self):
if self:
ret = self.peek
self._set_peek()
return ret
else:
raise StopIteration()
def _set_peek(self):
try:
self.peek = next(self._it)
except StopIteration:
self.peek = self._NONE
def __bool__(self):
return self.peek is not self._NONE
def _parse_type_sig(lex) -> (InOutness, str, str, Optional[str]):
"""
"inout bool bShow" -> (InOutness.INOUT, "bool", "bShow", None)
"in enum[EInventorySlot] Slot" -> (InOutness.IN, "enum", "Slot", "EInventorySlot")
"""
param_kind = _kw_to_inout(
_expect(lex, _TokenType.KW, "inoutness", "tuple param"))
tup_type = _expect(lex, _TokenType.IDENT, "type", "tuple param").ident
local_type = None
if _try_eat(lex, _TokenType.LBRACK):
local_type = _expect(lex, _TokenType.IDENT, "local type",
"tuple param").ident
_expect(lex, _TokenType.RBRACK, "inoutness", "tuple param")
name = _expect(lex, _TokenType.IDENT, "param name", "tuple param")
if name.ident.lower() == "self":
raise ParseError(f"{name} not a valid local variable name")
return param_kind, tup_type, name.ident, local_type
def _parse_tuple_data(lex) -> List[Tuple]:
_expect(lex, _TokenType.LBRACK, "[", "tuple data")
tup = []
comma = False
while True:
if _try_eat(lex, _TokenType.RBRACK):
break
if comma:
_expect(lex, _TokenType.COMMA, ",", "tuple params")
comma = True
if _try_eat(lex, _TokenType.RBRACK):
break
tup.append(_parse_type_sig(lex))
return tup
def parse_event_spec(text: str) -> dict:
lex = _peekable(_lex_event_spec(text))
spec = EventSpec()
comma = False
prev_key = None
ctx = "event specification"
while True:
if not lex:
break
if comma:
spec_ctx = ctx + f" after parsing {prev_key}" if prev_key else ctx
_expect(lex, _TokenType.COMMA, ",", spec_ctx)
if not lex:
break
comma = True
key = _expect(lex, _TokenType.IDENT, "key", ctx).ident
prev_key = key
_expect(lex, _TokenType.COLON, ":", f"key {key}")
if hasattr(spec, key):
raise ParseError(f"error, duplicate key {key}")
elif key == "EventID":
name = _expect(lex, _TokenType.IDENT, "event name", key)
spec.id = name.ident
elif key == "EventSource":
type = _expect(lex, _TokenType.IDENT, "source type", key)
spec.source = EventArg(type.ident)
if _try_eat(lex, _TokenType.LPAREN):
name = _expect(lex, _TokenType.IDENT)
spec.source.name = name.ident
_expect(lex, _TokenType.RPAREN)
elif key == "EventData":
if lex and lex.peek.type == _TokenType.LBRACK:
tup = _parse_tuple_data(lex)
spec.data = EventArg("XComLWTuple")
spec.data.tuple = tup
else:
type = _expect(lex, _TokenType.IDENT, "data type", key)
spec.data = EventArg(type.ident)
if _try_eat(lex, _TokenType.LPAREN):
name = _expect(lex, _TokenType.IDENT, "local name", key)
spec.data.name = name.ident
_expect(lex, _TokenType.RPAREN, ")", key)
elif key == "NewGameState":
b = _expect(lex, _TokenType.IDENT,
"NewGameStateness (yes, none, maybe)", key).ident
if b == "yes":
spec.newgs = NewGameState.YES
elif b == "none" or b == "None":
spec.newgs = NewGameState.NONE
elif b == "maybe":
spec.newgs = NewGameState.MAYBE
else:
raise ParseError("expected yes, none, or maybe")
else:
raise ParseError(
"unexpected key (expected EventID, EventSource, EventData, NewGameState)"
)
if not hasattr(spec, 'id'):
raise ParseError("missing EventID")
if not hasattr(spec, 'data'):
raise ParseError("missing EventData")
if not hasattr(spec, 'source'):
raise ParseError("missing EventSource")
if not hasattr(spec, 'newgs'):
raise ParseError("missing NewGameState")
return spec
| 5,911
| 360
| 611
|
2224eec49cd0fe31d6ede8185df188a65ec015a2
| 5,380
|
py
|
Python
|
Assignments/hw4/build_kNN.py
|
spacemanidol/CLMS572
|
f0380de9912c984ec21607cdb3b1f190853c5ca8
|
[
"MIT"
] | null | null | null |
Assignments/hw4/build_kNN.py
|
spacemanidol/CLMS572
|
f0380de9912c984ec21607cdb3b1f190853c5ca8
|
[
"MIT"
] | null | null | null |
Assignments/hw4/build_kNN.py
|
spacemanidol/CLMS572
|
f0380de9912c984ec21607cdb3b1f190853c5ca8
|
[
"MIT"
] | 1
|
2020-12-26T01:28:41.000Z
|
2020-12-26T01:28:41.000Z
|
import sys
import numpy as np
import math
from collections import Counter
if __name__ == "__main__":
if len(sys.argv) != 6:
print("Usage:build_kNN.py <training_data> <test_data> <k_val> <similarity_func> <sys_output>")
exit(-1)
else:
trainingData, features = readData(sys.argv[1])
testData, _ = readData(sys.argv[2])
candidate, truth = knn(trainingData,testData, int(sys.argv[3]), int(sys.argv[4]),sys.argv[5])
writeMatrix(candidate, truth)
| 43.387097
| 193
| 0.573048
|
import sys
import numpy as np
import math
from collections import Counter
def readData(filename):
labels, features, all_features = [], [], []
with open(filename, 'r') as f:
for l in f:
l = l.strip().split(' ')
featureCount = {}
labels.append(l[0])
for feature in l[1:]:
word, count = feature.split(':')
all_features.append(word)
featureCount[word] = int(count)
features.append(featureCount)
all_features = set(all_features)
return np.array([labels, features]), all_features
def euclidean(a,b):
dist = 0
for feature in set(a).union(set(b)):
valueA, valueB = 0,0
if feature in a:
valueA = a[feature]
if feature in b:
valueB = b[feature]
dist += ((valueA - valueB)*(valueA - valueB))
return math.sqrt(dist)
def cosine(a,b):
sumAB,normA,normB = 0,0,0
for feature in set(a).union(set(b)):
if feature in a and feature in b:
sumAB += a[feature]*b[feature]
if feature in a:
normA += (a[feature]*a[feature])
if feature in b:
normB += (b[feature]*b[feature])
return sumAB/ (math.sqrt(normA)*math.sqrt(normB))
def writeMatrix(candidates, truth):
lookup = {0:'training', 1:'test'}
for i in range(2):
print("Confusion matrix for the {} data:\nrow is the truth, column is the system output\n".format(lookup[i]))
labels = set(truth[i]).union(set(candidate[i]))
d = len(labels)
candidateLength = len(candidate[i])
m = np.zeros([d,d])
label2idx, idx2label = {}, {}
index, count = 0, 0
for label in labels:
label2idx[label] = index
idx2label[index] = label
index += 1
for j in range(candidateLength):
m[label2idx[candidate[i][j]]][label2idx[truth[i][j]]] += 1
if candidate[i][j] == truth[i][j]:
count += 1
out = ''
for j in range(d):
out += ' {}'.format(idx2label[j])
out += '\n'
for j in range(d):
out += idx2label[j]
for k in range(d):
out += ' {}'.format(str(int(m[j][k])))
out += '\n'
print(" {}\n {} accuracy={:.5f}\n".format(out, str(lookup[i].capitalize()), count/candidateLength))
def vote(neighbors, data):
votes, output = {}, ''
for neighbor in neighbors:
label = data[int(neighbor[1])]
if label not in votes:
votes[label] = 0
votes[label] += 1
total = sum(votes.values())
for label in sorted(votes.items(), key = lambda x:-x[1]):
output += ' {} {}'.format(label[0], votes[label[0]]/total)
return sorted(votes.items(), key = lambda x:-x[1])[0][0], output
def search(candidateVector, dimensions, data, i, similarityFunc, k, distance):
for j in range(dimensions):
neighborVector = data[1][j]
if similarityFunc == 1:
distance[j] = [euclidean(candidateVector,neighborVector),j]
elif similarityFunc == 2:
distance[j] = [cosine(candidateVector,neighborVector),j]
else:
print('incorrect similarity function please use cosine or euclidian(2 or 1)')
exit(-1)
if similarityFunc == 1:
if i != -1:
distance[i] = float("inf") #if training we set the distance to itself to infinity
neighbors = distance[distance[:,0].argsort(kind='mergesort')][:k]
else:
if i != -1:
distance[i] = 0 #if training we set the distance to itself zero
neighbors = distance[distance[:,0].argsort(kind='mergesort')][-k:]
best, output = vote(neighbors, data[0])
return best, output
def knn(trainingData, testData, k, similarityFunc, systemOutputFilename):
candidate, truth = [[],[]], [[],[]]
dimensions, testDimensions = len(trainingData[1]), len(testData[1])
distance = np.empty(shape=(dimensions,2))
with open(systemOutputFilename, 'w') as w:
w.write('%%%%% training data:\n')
for i in range(dimensions):
truth[0].append(trainingData[0][i])
best, to_write = search(trainingData[1][i], dimensions, trainingData, i, similarityFunc, k, distance) #for each vector we search for knn neighbors and get a result and output string
w.write('array:{} {} {}\n'.format(i, best, to_write))
candidate[0].append(best)
w.write('\n\n%%%%% test data:\n')
for i in range(testDimensions):
truth[1].append(testData[0][i])
best, to_write = search(testData[1][i], dimensions, trainingData, -1, similarityFunc, k, distance) #for each vector we search for knn neighbors and get a result and output string
w.write('array:{} {} {}\n'.format(i, best, to_write))
candidate[1].append(best)
return candidate, truth
if __name__ == "__main__":
if len(sys.argv) != 6:
print("Usage:build_kNN.py <training_data> <test_data> <k_val> <similarity_func> <sys_output>")
exit(-1)
else:
trainingData, features = readData(sys.argv[1])
testData, _ = readData(sys.argv[2])
candidate, truth = knn(trainingData,testData, int(sys.argv[3]), int(sys.argv[4]),sys.argv[5])
writeMatrix(candidate, truth)
| 4,725
| 0
| 159
|
db5a483b7179a52757b1e303fa5cebe303f5944b
| 1,622
|
py
|
Python
|
pizzeria/workshop/forms.py
|
Zachary-Jackson/Django-Pizzeria
|
5948b10e9e99e7f2e8ffdbe6c0efbb87000808a9
|
[
"BSD-2-Clause"
] | 1
|
2021-05-20T04:48:25.000Z
|
2021-05-20T04:48:25.000Z
|
pizzeria/workshop/forms.py
|
Zachary-Jackson/Django-Pizzeria
|
5948b10e9e99e7f2e8ffdbe6c0efbb87000808a9
|
[
"BSD-2-Clause"
] | null | null | null |
pizzeria/workshop/forms.py
|
Zachary-Jackson/Django-Pizzeria
|
5948b10e9e99e7f2e8ffdbe6c0efbb87000808a9
|
[
"BSD-2-Clause"
] | null | null | null |
from django import forms
from . import models
STATES = [
('AL', 'AL'), ('AK', 'AK'), ('AZ', 'AZ'), ('AR', 'AR'), ('CA', 'CA'),
('CO', 'CO'), ('CT', 'CT'), ('DC', 'DC'), ('DE', 'DE'), ('FL', 'FL'),
('GA', 'GA'), ('HI', 'HI'), ('ID', 'ID'), ('IL', 'IL'), ('IN', 'IN'),
('IA', 'IA'), ('KS', 'KS'), ('KY', 'KY'), ('LA', 'LA'), ('ME', 'ME'),
('MD', 'MD'), ('MA', 'MA'), ('MI', 'MI'), ('MN', 'MN'), ('MS', 'MS'),
('MO', 'MO'), ('MT', 'MT'), ('NE', 'NE'), ('NV', 'NV'), ('NH', 'NH'),
('NJ', 'NJ'), ('NM', 'NM'), ('NY', 'NY'), ('NC', 'NC'), ('ND', 'ND'),
('OH', 'OH'), ('OK', 'OK'), ('OR', 'OR'), ('PA', 'PA'), ('RI', 'RI'),
('SC', 'SC'), ('SD', 'SD'), ('TN', 'TN'), ('TX', 'TX'), ('UT', 'UT'),
('VT', 'VT'), ('VA', 'VA'), ('WA', 'WA'), ('WV', 'WV'), ('WI', 'WI'),
('WY', 'WY')
]
class IngredientForm(forms.ModelForm):
"""Form allowing a new Ingredient to be created"""
class Meta:
"""Defines the model the form uses and what fields to use"""
model = models.Ingredient
fields = ['name']
def clean_name(self):
"""Over-ride the value for name and title case it"""
name = self.cleaned_data['name']
return name.title()
class PizzaForm(forms.ModelForm):
"""Form allowing a new Pizza object to be created"""
class Meta:
"""Defines the model the form uses and what fields to use"""
model = models.Pizza
fields = ['city', 'state', 'crust', 'ingredients', 'name', 'summary']
# Over-ride the State field to be a state selector
widgets = {'state': forms.Select(choices=STATES)}
| 36.044444
| 77
| 0.459926
|
from django import forms
from . import models
STATES = [
('AL', 'AL'), ('AK', 'AK'), ('AZ', 'AZ'), ('AR', 'AR'), ('CA', 'CA'),
('CO', 'CO'), ('CT', 'CT'), ('DC', 'DC'), ('DE', 'DE'), ('FL', 'FL'),
('GA', 'GA'), ('HI', 'HI'), ('ID', 'ID'), ('IL', 'IL'), ('IN', 'IN'),
('IA', 'IA'), ('KS', 'KS'), ('KY', 'KY'), ('LA', 'LA'), ('ME', 'ME'),
('MD', 'MD'), ('MA', 'MA'), ('MI', 'MI'), ('MN', 'MN'), ('MS', 'MS'),
('MO', 'MO'), ('MT', 'MT'), ('NE', 'NE'), ('NV', 'NV'), ('NH', 'NH'),
('NJ', 'NJ'), ('NM', 'NM'), ('NY', 'NY'), ('NC', 'NC'), ('ND', 'ND'),
('OH', 'OH'), ('OK', 'OK'), ('OR', 'OR'), ('PA', 'PA'), ('RI', 'RI'),
('SC', 'SC'), ('SD', 'SD'), ('TN', 'TN'), ('TX', 'TX'), ('UT', 'UT'),
('VT', 'VT'), ('VA', 'VA'), ('WA', 'WA'), ('WV', 'WV'), ('WI', 'WI'),
('WY', 'WY')
]
class IngredientForm(forms.ModelForm):
"""Form allowing a new Ingredient to be created"""
class Meta:
"""Defines the model the form uses and what fields to use"""
model = models.Ingredient
fields = ['name']
def clean_name(self):
"""Over-ride the value for name and title case it"""
name = self.cleaned_data['name']
return name.title()
class PizzaForm(forms.ModelForm):
"""Form allowing a new Pizza object to be created"""
class Meta:
"""Defines the model the form uses and what fields to use"""
model = models.Pizza
fields = ['city', 'state', 'crust', 'ingredients', 'name', 'summary']
# Over-ride the State field to be a state selector
widgets = {'state': forms.Select(choices=STATES)}
| 0
| 0
| 0
|
69c717b6efcc64167d465109f16e86f9f2d5efd3
| 530
|
py
|
Python
|
server/src/tests/samples/classes2.py
|
jhutchings1/pyright
|
2b8593a58a2aecc95dac49cce92fc16678cd4e14
|
[
"MIT"
] | null | null | null |
server/src/tests/samples/classes2.py
|
jhutchings1/pyright
|
2b8593a58a2aecc95dac49cce92fc16678cd4e14
|
[
"MIT"
] | 1
|
2021-08-31T20:37:43.000Z
|
2021-08-31T20:37:43.000Z
|
server/src/tests/samples/classes2.py
|
jhutchings1/pyright
|
2b8593a58a2aecc95dac49cce92fc16678cd4e14
|
[
"MIT"
] | null | null | null |
# This sample tests the reportIncompatibleMethodOverride
# configuration option.
# This should generate an error if reportIncompatibleMethodOverride
# is enabled.
# This should generate an error if reportIncompatibleMethodOverride
# is enabled.
| 24.090909
| 71
| 0.684906
|
# This sample tests the reportIncompatibleMethodOverride
# configuration option.
class ParentClass():
def my_method1(self, a: int):
return 1
def my_method2(self, a: int, b: int):
return 1
class ChildClass(ParentClass):
# This should generate an error if reportIncompatibleMethodOverride
# is enabled.
def my_method1(self, a: str):
return 1
# This should generate an error if reportIncompatibleMethodOverride
# is enabled.
def my_method2(self, a: int):
return 1
| 108
| 8
| 151
|
fc04a536fd4df8abf991154f3648f8761ca61cd0
| 777
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/l2VPNFrameRelay_template.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20
|
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/l2VPNFrameRelay_template.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60
|
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/l2VPNFrameRelay_template.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13
|
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
| 28.777778
| 92
| 0.680824
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class L2VPNFrameRelay(Base):
__slots__ = ()
_SDM_NAME = 'l2VPNFrameRelay'
_SDM_ATT_MAP = {
'HeaderControlWord': 'l2VPNFrameRelay.header.controlWord-1',
}
def __init__(self, parent, list_op=False):
super(L2VPNFrameRelay, self).__init__(parent, list_op)
@property
def HeaderControlWord(self):
"""
Display Name: FR IP CW
Default Value: 0x03cc
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderControlWord']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 152
| 521
| 23
|
adcf45cbcc17af82fe53acf28aef59d0c6beea07
| 1,105
|
py
|
Python
|
firstapp/rancher_api.py
|
mudong1991/DevOpsApi
|
8cc880c7c628f04492427fe73a1a684eadb94e84
|
[
"Apache-2.0"
] | 1
|
2017-12-22T04:12:10.000Z
|
2017-12-22T04:12:10.000Z
|
firstapp/rancher_api.py
|
mudong1991/DevOpsApi
|
8cc880c7c628f04492427fe73a1a684eadb94e84
|
[
"Apache-2.0"
] | 1
|
2020-01-08T01:49:03.000Z
|
2020-01-08T01:49:03.000Z
|
firstapp/rancher_api.py
|
mudong1991/DevOpsApi
|
8cc880c7c628f04492427fe73a1a684eadb94e84
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# file: rancher_api
# author: Mundy
# date: 2017/9/1 0001
"""
rancher api调用
"""
import json
import requests
from django.conf import settings
# --------requests设置--------
time_out = 8
headers = {
'content-type': 'application/json',
'Accept': 'application/json',
}
# 调用receiver_hooks api
| 23.020833
| 78
| 0.697738
|
# -*- coding:utf-8 -*-
# file: rancher_api
# author: Mundy
# date: 2017/9/1 0001
"""
rancher api调用
"""
import json
import requests
from django.conf import settings
# --------requests设置--------
time_out = 8
headers = {
'content-type': 'application/json',
'Accept': 'application/json',
}
def get_response_json(response):
return response.json()
# 调用receiver_hooks api
def request_receiver_hooks(request_url, data):
request_s = requests.Session()
request_s.headers.update(headers)
request_s.verify = False
response = request_s.post(request_url, json.dumps(data), timeout=time_out)
if response.status_code == 200:
return True
else:
return False
def get_rancher_data(request_url, access_key, secret_key):
request_session = requests.Session()
request_session.auth = (access_key, secret_key)
request_session.headers.update(headers)
request_session.verify = False
response = request_session.get(request_url, timeout=time_out)
if response.status_code == 200:
return get_response_json(response)
else:
return ''
| 715
| 0
| 68
|