hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
83103a011e1bb5e482fa869c43bee2cdb39dd21a
| 5,830
|
py
|
Python
|
app/comic/eyra/tasks.py
|
EYRA-Benchmark/grand-challenge.org
|
8264c19fa1a30ffdb717d765e2aa2e6ceccaab17
|
[
"Apache-2.0"
] | 2
|
2019-06-28T09:23:55.000Z
|
2020-03-18T05:52:13.000Z
|
app/comic/eyra/tasks.py
|
EYRA-Benchmark/comic
|
8264c19fa1a30ffdb717d765e2aa2e6ceccaab17
|
[
"Apache-2.0"
] | 112
|
2019-08-12T15:13:27.000Z
|
2022-03-21T15:49:40.000Z
|
app/comic/eyra/tasks.py
|
EYRA-Benchmark/grand-challenge.org
|
8264c19fa1a30ffdb717d765e2aa2e6ceccaab17
|
[
"Apache-2.0"
] | 1
|
2020-03-19T14:19:57.000Z
|
2020-03-19T14:19:57.000Z
|
import json
from datetime import datetime
import time
from functools import reduce
import boto3
from celery import shared_task
from celery.bin.control import inspect
from django.conf import settings
from comic.container_exec.backends.k8s import K8sJob
from comic.eyra.models import Job, Submission, DataFile, JobInput
@shared_task
def run_job(job_pk):
"""Celery task for running a job.
Args:
job_pk: the primary key of the Job object that defines the algorithm run
"""
job = Job.objects.get(pk=job_pk)
if job.status != Job.PENDING:
raise Exception(f"Can't start job with status '{Job.STATUS_CHOICES[job.status][1]}'")
job.status = Job.STARTED
job.started = datetime.now()
job.save()
job.log = ''
try:
with K8sJob(job) as k8s_job:
k8s_job.run()
# keep probing until failure or success
while True:
s = k8s_job.status()
job.log = k8s_job.get_text_logs()
job.save()
if s.failed or s.succeeded:
break
time.sleep(5)
job.status = Job.SUCCESS if s.succeeded else Job.FAILURE
job.log = k8s_job.get_text_logs()
except Exception as e:
job.status = Job.FAILURE
job.log += '\n Error in job executor: \n' + str(e)
raise e
finally:
job.stopped = datetime.now()
job.save()
if job.status == Job.FAILURE:
raise Exception("Job failed")
def create_algorithm_job_for_submission(submission: Submission):
if submission.algorithm_job:
raise Exception('Job already exists for submission')
job_output = DataFile.objects.create(
name='algorithm job output',
)
job_output.file = f"data_files/{str(job_output.pk)}"
job_output.save()
submission.algorithm_job = Job.objects.create(
output=job_output,
submission=submission,
image=submission.image,
)
submission.save()
input_data_file = submission.benchmark.data_set.public_test_data_file
if submission.is_private:
input_data_file = submission.benchmark.data_set.private_test_data_file
job_input = JobInput.objects.create(
job=submission.algorithm_job,
name='test_data',
data_file=input_data_file,
)
def create_evaluation_job_for_submission(submission: Submission):
if submission.evaluation_job:
raise Exception('Job already exists for submission')
job_output = DataFile.objects.create(
name='evaluation job output',
)
job_output.file = f"data_files/{str(job_output.pk)}"
job_output.save()
submission.evaluation_job = Job.objects.create(
output=job_output,
submission=submission,
image=submission.benchmark.evaluation_image
)
submission.save()
job_algorithm_output_input = JobInput.objects.create(
job=submission.evaluation_job,
name='algorithm_output',
data_file=submission.algorithm_job.output,
)
ground_truth_data_file = submission.benchmark.data_set.public_ground_truth_data_file
if submission.is_private:
ground_truth_data_file = submission.benchmark.data_set.private_ground_truth_data_file
job_ground_truth_input = JobInput.objects.create(
job=submission.evaluation_job,
name='ground_truth',
data_file=ground_truth_data_file,
)
@shared_task
def run_submission(submission_pk):
submission: Submission = Submission.objects.get(pk=submission_pk)
create_algorithm_job_for_submission(submission)
create_evaluation_job_for_submission(submission)
if not submission.benchmark.should_evaluate:
submission.algorithm_job.status = Job.SUCCESS
submission.algorithm_job.log = 'Ran externally.'
submission.algorithm_job.save()
submission.evaluation_job.status = Job.SUCCESS
submission.evaluation_job.log = 'Ran externally.'
submission.evaluation_job.save()
submission.metrics = "Should be set externally."
return
try:
run_job(submission.algorithm_job.pk)
except Exception as e:
submission.evaluation_job.status = Job.FAILURE
submission.evaluation_job.log = 'Cannot evaluate, since the implementation job failed.'
submission.evaluation_job.save()
raise e
run_job(submission.evaluation_job.pk)
try:
eval_output = submission.evaluation_job.output.file.read().decode('ascii')
submission.metrics = json.loads(eval_output)['metrics']
except:
submission.metrics = "Error getting 'metrics' value from evaluation output."
submission.save()
@shared_task
def autoscale_gpu_node():
autoscaling_client = boto3.client(
'autoscaling',
region_name=settings.AWS_AUTOSCALING_REGION,
aws_access_key_id=settings.AWS_AUTOSCALING_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_AUTOSCALING_SECRET_ACCESS_KEY,
)
i = inspect()
active_tasks_per_node = [a[1] for a in list(i.active().items())]
scheduled_tasks_per_node = [a[1] for a in list(i.scheduled().items())]
reserved_tasks_per_node = [a[1] for a in list(i.reserved().items())]
tasks_per_node = active_tasks_per_node + scheduled_tasks_per_node + reserved_tasks_per_node
tasks = reduce(lambda x, y: x + y, tasks_per_node)
task_names = [task['name'] for task in tasks]
scale_to = 0
if run_submission.name in task_names:
scale_to = 1
print(f"Scaling to {str(scale_to)} GPU nodes.")
print(autoscaling_client.set_desired_capacity(
AutoScalingGroupName='terraform-eks-eyra-prod01-gpu',
DesiredCapacity=scale_to
))
@shared_task
def sleep_one_sec():
# used for testing basic tasks
time.sleep(1)
return 42
| 29.744898
| 95
| 0.686792
| 0
| 0
| 0
| 0
| 3,577
| 0.613551
| 0
| 0
| 817
| 0.140137
|
83105e3ab7b623c4391c6fa5b2af5b5f65241d9a
| 1,926
|
py
|
Python
|
doc/conf.py
|
djarpin/sagemaker-python-sdk
|
157d8670977243f7f77327175d40364c885482b3
|
[
"Apache-2.0"
] | 1
|
2018-01-19T22:24:38.000Z
|
2018-01-19T22:24:38.000Z
|
doc/conf.py
|
djarpin/sagemaker-python-sdk
|
157d8670977243f7f77327175d40364c885482b3
|
[
"Apache-2.0"
] | null | null | null |
doc/conf.py
|
djarpin/sagemaker-python-sdk
|
157d8670977243f7f77327175d40364c885482b3
|
[
"Apache-2.0"
] | 2
|
2019-08-06T05:48:25.000Z
|
2020-10-04T17:00:55.000Z
|
# -*- coding: utf-8 -*-
import os
import sys
from datetime import datetime
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
if name == "__version__":
return "1.4.0"
else:
return MagicMock()
MOCK_MODULES = ['tensorflow', 'tensorflow.core', 'tensorflow.core.framework', 'tensorflow.python',
'tensorflow.python.framework', 'tensorflow_serving', 'tensorflow_serving.apis']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
version = '1.0'
project = u'sagemaker'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.autosummary',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst' # The suffix of source filenames.
master_doc = 'index' # The master toctree document.
copyright = u'%s, Amazon' % datetime.now().year
# The full version, including alpha/beta/rc tags.
release = version
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
pygments_style = 'default'
autoclass_content = "both"
autodoc_default_flags = ['show-inheritance', 'members', 'undoc-members']
autodoc_member_order = 'bysource'
if 'READTHEDOCS' in os.environ:
html_theme = 'default'
else:
html_theme = 'haiku'
html_static_path = ['_static']
htmlhelp_basename = '%sdoc' % project
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# autosummary
autosummary_generate = True
| 30.09375
| 98
| 0.704569
| 177
| 0.0919
| 0
| 0
| 150
| 0.077882
| 0
| 0
| 1,056
| 0.548287
|
83109a1fa008110e9e6bc3419abde0778a40c3c3
| 1,081
|
py
|
Python
|
django_cassiopeia/views.py
|
galaddirie/django-cassiopeia
|
e3e75e6c815cfc96e3b7ef5991aa1265221a2122
|
[
"MIT"
] | 13
|
2020-07-08T17:23:18.000Z
|
2022-02-13T09:19:42.000Z
|
django_cassiopeia/views.py
|
galaddirie/django-cassiopeia
|
e3e75e6c815cfc96e3b7ef5991aa1265221a2122
|
[
"MIT"
] | 16
|
2020-07-19T22:14:20.000Z
|
2022-03-24T02:57:45.000Z
|
django_cassiopeia/views.py
|
galaddirie/django-cassiopeia
|
e3e75e6c815cfc96e3b7ef5991aa1265221a2122
|
[
"MIT"
] | 6
|
2020-07-21T01:37:54.000Z
|
2022-01-01T19:28:54.000Z
|
from django.shortcuts import render, HttpResponse
from django_cassiopeia import cassiopeia as cass
from time import sleep
import json
# Create your views here.
def test(request):
return render(request, "test/test.html")
def test_request(request, n):
context = {
"n" : n,
}
sleep(n/5)
try:
kalturi = cass.Summoner(name="Kalturi", region="NA")
senna = cass.Champion(id=235)
rune = cass.Rune(id=8112)
match = cass.Match(id=3481455783)
if n < 20:
print(match.creation)
elif n < 40:
print(kalturi.profile_icon.id)
elif n < 60:
print(kalturi.match_history_uri)
elif n < 80:
print(senna.ally_tips)
elif n < 100:
print(rune.name)
else:
history = cass.MatchHistory(summoner=kalturi, begin_index=0, end_index=100)
print(match.id for match in history)
except:
raise RuntimeError("Failed at request "+str(n))
return HttpResponse(json.dumps(context), content_type="application/json")
| 30.027778
| 87
| 0.612396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 95
| 0.087882
|
8311183712fef6e93100cb2e804d36583b7c35d9
| 962
|
py
|
Python
|
sender.py
|
AndrVLDZ/telnet_DAW-master
|
4bce486fad0d4ae51ef695ace118df2af2b1c35f
|
[
"Apache-2.0"
] | null | null | null |
sender.py
|
AndrVLDZ/telnet_DAW-master
|
4bce486fad0d4ae51ef695ace118df2af2b1c35f
|
[
"Apache-2.0"
] | null | null | null |
sender.py
|
AndrVLDZ/telnet_DAW-master
|
4bce486fad0d4ae51ef695ace118df2af2b1c35f
|
[
"Apache-2.0"
] | null | null | null |
import telnetlib
def print_logo(logo=''):
LOGO_DAFAULT = """\033[93m
/\ /\\
/ \\'._ (\_/) _.'/ \\
/_.''._'--('.')--'_.''._\\
| \_ / `;=/ " \=;` \ _/ |
\/ `\__|`\___/`|__/` \/
` \(/|\)/ `
" ` "
DAW_Start_By_VLDZ
\033[0m
"""
if logo != '':
print(logo)
else:
print(LOGO_DAFAULT)
print_logo()
port = int(input('\n PORT:'))
ip_1 = str(input(' Host_1 IP: '))
node_1 = telnetlib.Telnet(ip_1, port)
ip_2 = str(input(' Host_2 IP: '))
node_2 = telnetlib.Telnet(ip_2, port)
while True:
symbol = str(input('==> '))
if symbol == 's':
node_1.write(b's\r\n')
node_2.write(b's\r\n')
elif symbol == 'n':
node_1.write(b'n\r\n')
node_2.write(b'n\r\n')
elif symbol == 'b':
node_1.write(b'b\r\n')
node_2.write(b'b\r\n')
else:
node_1.write(bytes(str.encode(symbol)))
node_2.write(bytes(str.encode(symbol)))
| 22.904762
| 47
| 0.477131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 332
| 0.345114
|
8311c27de6e1db041ba99f1046583892727db0c6
| 43
|
py
|
Python
|
oed/__init__.py
|
wgshen/OED
|
6928ba31396f2e7dd2bd3701f319e1dad3f91346
|
[
"MIT"
] | null | null | null |
oed/__init__.py
|
wgshen/OED
|
6928ba31396f2e7dd2bd3701f319e1dad3f91346
|
[
"MIT"
] | null | null | null |
oed/__init__.py
|
wgshen/OED
|
6928ba31396f2e7dd2bd3701f319e1dad3f91346
|
[
"MIT"
] | 1
|
2021-11-10T05:41:02.000Z
|
2021-11-10T05:41:02.000Z
|
from .oed import OED
__all__ = [
"OED"
]
| 7.166667
| 20
| 0.604651
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 0.116279
|
831472f4490aeaadae4cd1684594efc22e0edd62
| 14,400
|
py
|
Python
|
pyperformance/_manifest.py
|
cappadokes/pyperformance
|
60574dad9585eb5622631502296bb8eae143cdfc
|
[
"MIT"
] | null | null | null |
pyperformance/_manifest.py
|
cappadokes/pyperformance
|
60574dad9585eb5622631502296bb8eae143cdfc
|
[
"MIT"
] | 2
|
2022-03-09T11:14:07.000Z
|
2022-03-09T14:07:47.000Z
|
test/xml_etree/venv/cpython3.11-d52597b1179a-compat-f6a835d45d46-bm-xml_etree/lib/python3.11/site-packages/pyperformance/_manifest.py
|
sebawild/cpython
|
874ba1a9c948af33de2ad229df42e03dc516f0a8
|
[
"0BSD"
] | 1
|
2022-01-04T13:08:31.000Z
|
2022-01-04T13:08:31.000Z
|
__all__ = [
'BenchmarksManifest',
'load_manifest',
'parse_manifest',
]
from collections import namedtuple
import os.path
from . import __version__, DATA_DIR
from . import _benchmark, _utils
DEFAULTS_DIR = os.path.join(DATA_DIR, 'benchmarks')
DEFAULT_MANIFEST = os.path.join(DEFAULTS_DIR, 'MANIFEST')
BENCH_COLUMNS = ('name', 'metafile')
BENCH_HEADER = '\t'.join(BENCH_COLUMNS)
def load_manifest(filename, *, resolve=None):
if not filename:
filename = DEFAULT_MANIFEST
sections = _parse_manifest_file(filename)
return BenchmarksManifest._from_sections(sections, resolve, filename)
def parse_manifest(lines, *, resolve=None, filename=None):
if isinstance(lines, str):
lines = lines.splitlines()
else:
if not filename:
# Try getting the filename from a file.
filename = getattr(lines, 'name', None)
sections = _parse_manifest(lines, filename)
return BenchmarksManifest._from_sections(sections, resolve, filename)
def resolve_default_benchmark(bench):
if isinstance(bench, _benchmark.Benchmark):
spec = bench.spec
else:
spec = bench
bench = _benchmark.Benchmark(spec, '<bogus>')
bench.metafile = None
if not spec.version:
spec = spec._replace(version=__version__)
if not spec.origin:
spec = spec._replace(origin='<default>')
bench.spec = spec
if not bench.metafile:
metafile = os.path.join(DEFAULTS_DIR,
f'bm_{bench.name}',
'pyproject.toml')
bench.metafile = metafile
return bench
class BenchmarksManifest:
@classmethod
def _from_sections(cls, sections, resolve=None, filename=None):
self = cls(filename=filename)
self._add_sections(sections, resolve)
return self
def __init__(self, benchmarks=None, groups=None, filename=None):
self._raw_benchmarks = []
# XXX Support disabling all groups (except all and default)?
self._raw_groups = {}
self._raw_filename = filename
self._byname = {}
self._groups = None
self._tags = None
if benchmarks:
self._add_benchmarks(benchmarks)
if groups:
self._add_groups(groups)
def __repr__(self):
args = (f'{n}={getattr(self, "_raw_" + n)}'
for n in ('benchmarks', 'groups', 'filename'))
return f'{type(self).__name__}({", ".join(args)})'
@property
def benchmarks(self):
return list(self._byname.values())
@property
def groups(self):
names = self._custom_groups()
if not names:
names = set(self._get_tags())
return names | {'all', 'default'}
@property
def filename(self):
return self._raw_filename
def _add_sections(self, sections, resolve):
filename = self._raw_filename
_resolve = resolve
if resolve is None and filename == DEFAULT_MANIFEST:
_resolve = default_resolve = resolve_default_benchmark
sections_seen = {filename: set()}
lastfile = None
for filename, section, data in sections:
if filename != lastfile:
_resolve = resolve
if _resolve is None and filename == DEFAULT_MANIFEST:
_resolve = resolve_default_benchmark
lastfile = filename
if filename not in sections_seen:
sections_seen[filename] = {section}
elif section in sections_seen[filename]:
# For now each section can only show up once.
raise NotImplementedError((section, data))
else:
sections_seen[filename].add(section)
if section == 'includes':
pass
elif section == 'benchmarks':
entries = ((s, m, filename) for s, m in data)
self._add_benchmarks(entries, _resolve)
elif section == 'groups':
for name in data:
self._add_group(name, None)
elif section == 'group':
name, entries = data
self._add_group(name, entries)
else:
raise NotImplementedError((section, data))
def _add_benchmarks(self, entries, resolve):
for spec, metafile, filename in entries:
# XXX Ignore duplicates?
self._add_benchmark(spec, metafile, resolve, filename)
def _add_benchmark(self, spec, metafile, resolve, filename):
if spec.name in self._raw_groups:
raise ValueError(f'a group and a benchmark have the same name ({spec.name})')
if metafile:
if filename:
localdir = os.path.dirname(filename)
metafile = os.path.join(localdir, metafile)
bench = _benchmark.Benchmark(spec, metafile)
else:
metafile = None
bench = spec
self._raw_benchmarks.append((spec, metafile, filename))
if resolve is not None:
bench = resolve(bench)
self._byname[bench.name] = bench
self._groups = None # Force re-resolution.
self._tags = None # Force re-resolution.
def _add_group(self, name, entries):
if name in self._byname:
raise ValueError(f'a group and a benchmark have the same name ({name})')
if name == 'all':
# XXX Emit a warning?
return
if entries:
raw = self._raw_groups.get(name)
if raw is None:
raw = self._raw_groups[name] = list(entries) if entries else None
elif entries is not None:
raw.extend(entries)
elif name in self._raw_groups:
return
else:
self._raw_groups[name] = None
self._groups = None # Force re-resolution.
def _custom_groups(self):
return set(self._raw_groups) - {'all', 'default'}
def _get_tags(self):
if self._tags is None:
self._tags = _get_tags(self._byname.values())
self._tags.pop('all', None) # It is manifest-specific.
self._tags.pop('default', None) # It is manifest-specific.
return self._tags
def _resolve_groups(self):
if self._groups is not None:
return self._groups
raw = {}
for name, entries in self._raw_groups.items():
if entries and entries[0][0] == '-':
entries = list(entries)
entries.insert(0, ('+', '<all>'))
raw[name] = entries
self._groups = _resolve_groups(raw, self._byname)
return self._groups
def resolve_group(self, name, *, fail=True):
if name == 'all':
benchmarks = self._byname.values()
elif name == 'default':
if 'default' not in self._raw_groups:
benchmarks = self._byname.values()
else:
groups = self._resolve_groups()
benchmarks = groups.get(name)
elif not self._custom_groups():
benchmarks = self._get_tags().get(name)
if benchmarks is None and fail:
raise KeyError(name)
else:
groups = self._resolve_groups()
benchmarks = groups.get(name)
if not benchmarks:
if name in (set(self._raw_groups) - {'default'}):
benchmarks = self._get_tags().get(name, ())
elif fail:
raise KeyError(name)
yield from benchmarks or ()
def show(self, *, raw=True, resolved=True):
yield self.filename
yield 'groups:'
if raw:
yield f' {self._raw_groups}'
if resolved:
yield f' {self.groups}'
yield 'default:'
if resolved:
for i, bench in enumerate(self.resolve_group('default')):
yield f' {i:>2} {bench}'
if raw:
yield 'benchmarks (raw):'
for i, bench in enumerate(self._raw_benchmarks):
yield f' {i:>2} {bench}'
if resolved:
yield 'benchmarks:'
for i, bench in enumerate(self.benchmarks):
yield f' {i:>2} {bench}'
#######################################
# internal implementation
def _iter_sections(lines):
lines = (line.split('#')[0].strip()
for line in lines)
name = None
section = None
for line in lines:
if not line:
continue
if line.startswith('[') and line.endswith(']'):
if name:
yield name, section
name = line[1:-1].strip()
section = []
else:
if not name:
raise ValueError(f'expected new section, got {line!r}')
section.append(line)
if name:
yield name, section
else:
raise ValueError('invalid manifest file, no sections found')
def _parse_manifest_file(filename):
relroot = os.path.dirname(filename)
filename = _utils.resolve_file(filename, relroot)
with open(filename) as infile:
yield from _parse_manifest(infile, filename)
def _parse_manifest(lines, filename):
relroot = os.path.dirname(filename)
for section, seclines in _iter_sections(lines):
if section == 'includes':
yield filename, section, list(seclines)
for line in seclines:
if line == '<default>':
line = DEFAULT_MANIFEST
else:
line = _utils.resolve_file(line, relroot)
yield from _parse_manifest_file(line)
elif section == 'benchmarks':
yield filename, section, list(_parse_benchmarks_section(seclines))
elif section == 'groups':
yield filename, section, list(_parse_groups_section(seclines))
elif section.startswith('group '):
section, _, group = section.partition(' ')
entries = list(_parse_group_section(seclines))
yield filename, section, (group, entries)
else:
raise ValueError(f'unsupported section {section!r}')
def _parse_benchmarks_section(lines):
if not lines:
lines = ['<empty>']
lines = iter(lines)
if next(lines) != BENCH_HEADER:
raise ValueError('invalid manifest file, expected benchmarks table header')
version = origin = None
for line in lines:
try:
name, metafile = (None if l == '-' else l
for l in line.split('\t'))
except ValueError:
raise ValueError(f'bad benchmark line {line!r}')
spec = _benchmark.BenchmarkSpec(name or None, version, origin)
metafile = _parse_metafile(metafile, name)
yield spec, metafile
def _parse_metafile(metafile, name):
if not metafile:
return None
elif metafile.startswith('<') and metafile.endswith('>'):
directive, _, extra = metafile[1:-1].partition(':')
if directive == 'local':
if extra:
rootdir = f'bm_{extra}'
basename = f'bm_{name}.toml'
else:
rootdir = f'bm_{name}'
basename = 'pyproject.toml'
# A relative path will be resolved against the manifset file.
return os.path.join(rootdir, basename)
else:
raise ValueError(f'unsupported metafile directive {metafile!r}')
else:
return os.path.abspath(metafile)
def _parse_groups_section(lines):
for name in seclines:
_utils.check_name(name)
yield name
def _parse_group_section(lines):
yielded = False
for line in lines:
if line.startswith('-'):
# Exclude a benchmark or group.
op = '-'
name = line[1:]
elif line.startswith('+'):
op = '+'
name = line[1:]
else:
name = line
_benchmark.check_name(name)
yield op, name
yielded = True
def _get_tags(benchmarks):
# Fill in groups from benchmark tags.
tags = {}
for bench in benchmarks:
for tag in getattr(bench, 'tags', ()):
if tag in tags:
tags[tag].append(bench)
else:
tags[tag] = [bench]
return tags
def _resolve_groups(rawgroups, byname):
benchmarks = set(byname.values())
tags = None
groups = {
'all': list(benchmarks),
}
unresolved = {}
for groupname, entries in rawgroups.items():
if groupname == 'all':
continue
if not entries:
if groupname == 'default':
groups[groupname] = list(benchmarks)
else:
if tags is None:
tags = _get_tags(benchmarks)
groups[groupname] = tags.get(groupname, ())
continue
assert entries[0][0] == '+', (groupname, entries)
unresolved[groupname] = names = set()
for op, name in entries:
if op == '+':
if name == '<all>':
names.update(byname)
elif name in byname or name in rawgroups:
names.add(name)
elif op == '-':
if name == '<all>':
raise NotImplementedError((groupname, op, name))
elif name in byname or name in rawgroups:
if name in names:
names.remove(name)
else:
raise NotImplementedError((groupname, op, name))
while unresolved:
for groupname, names in list(unresolved.items()):
benchmarks = set()
for name in names:
if name in byname:
benchmarks.add(byname[name])
elif name in groups:
benchmarks.update(groups[name])
names.remove(name)
elif name == groupname:
names.remove(name)
break
else: # name in unresolved
names.remove(name)
names.extend(unresolved[name])
break
else:
groups[groupname] = benchmarks
del unresolved[groupname]
return groups
| 33.103448
| 89
| 0.555556
| 6,665
| 0.462847
| 4,572
| 0.3175
| 504
| 0.035
| 0
| 0
| 1,633
| 0.113403
|
8314cb28873762113bd7dff276be8513d9a062b7
| 8,543
|
py
|
Python
|
pimux/function.py
|
pcpcpc1213/pimux
|
6ce9c3a59ac04064d46217bcdad531c7171163da
|
[
"MIT"
] | null | null | null |
pimux/function.py
|
pcpcpc1213/pimux
|
6ce9c3a59ac04064d46217bcdad531c7171163da
|
[
"MIT"
] | null | null | null |
pimux/function.py
|
pcpcpc1213/pimux
|
6ce9c3a59ac04064d46217bcdad531c7171163da
|
[
"MIT"
] | null | null | null |
from . import scrip as t
class misc():
'''
The class misc has miscellaneous methods
of termuxa-pi available.
Available methods are :
battery,
brightness,
vibrate,
contactlist,
torch,
downloadFile
'''
def __init__(self):
pass
def battery(self):
'''
This method return battery status info.
'''
self.batteryvalue=t.compute("termux-battery-status")
return self.batteryvalue["output"]
def brightness(self,Brightness):
'''
Set the brightness of your device.
It takes argument Brightness (int)
from 0 to 100.
'''
self.Brightness=Brightness
self.brightvalue=t.compute(f"termux-brightness {self.Brightness}")
return self.brightvalue["output"]
def vibrate(self,duration=1000):
'''
vibrates your phone.
Default duration is 1000ms.
'''
self.duration=duration
self.vibratevalue=t.compute(f"termux-vibrate -d {self.duration}")
return self.vibratevalue["output"]
def contactlist(self):
'''
Dumps all contact avalable on the phone.
'''
self.cvalue=t.compute("termux-contact-list")
return self.cvalue["output"]
def call(self,number):
'''
Calls a phone number.
'''
self.number=number
return t.compute(f"termux-telephony-call {self.number}")["output"]
def torch(self,switch=False):
'''
Toggles the torch on/off
Takes argument as:
True: turn on
False: turn off
'''
self.switch=switch
if self.switch == False:
self.torchvalue=t.compute("termux-torch off")
return self.torchvalue["output"]
else:
self.torchvalue=t.compute("termux-torch on")
return self.torchvalue["output"]
def downloadFile(self,description="From termux",title="Download",url=" "):
'''
This is the method for downloading anything
from the internet.
The arguments to be supplied are:
- description
- title
- url
'''
self.description=description
self.title=title
self.url=url
self.downloadF=t.compute(f"termux-download -t {self.title} {self.url}")
return self.downloadF["output"]
def fingerprint(self):
'''
This method uses the fingerprint
scanner for authentication. It
returns success or failure in JSON.
'''
self.result=t.compute("termux-fingerprint")["output"]
return self.result["output"]
class tts():
'''
This class is for getting tts-engine
info and for tts support.
There are two methods available:
ttsinfo
and
tts_speak
'''
def __init__(self):
pass
def ttsinfo(self):
'''
Gets tts-engines info as an output.
'''
self.ttsvalue=t.compute("termux-tts-engines")
return self.ttsvalue["output"]
def tts_speak(self,
eng="com.google.android.tts",
lang="eng",
regn="",
variant="",
pitch=1.0,
rate=1.0,
stream="",
text="Hello from termux"):
'''
This is a tts-engine api for conversion of text into speech.
It has arguments:
eng: engine
lang: language
pitch: pitch
rate: ratei
text: text to speak
#for now this feature isn't set
regn: region
variant: variant
stream: stream
for more info visit [termux wiki](https://wiki.termux.com/wiki/Termux-tts-speak)
'''
self.eng=eng
self.lang=lang
self.regn=regn
self.variant=variant
self.pitch=pitch
self.rate=rate
self.stream=stream
self.text=text
self.tvalue=t.compute(f"termux-tts-speak -e {self.eng} -l {self.lang} -p {self.pitch} -r {self.rate} {self.text}")
return self.tvalue["output"]
class camera:
'''
The class camera is for fetching
camera info or taking picture with the
camera on the android.
'''
def __init__(self):
pass
def camera(self):
'''
This method returns camera info
of the android device.It takes no argument.
'''
self.value=t.compute("termux-camera-info")
return self.value["output"]
def takephoto(self,cid=0,saveas="newimg.jpeg"):
'''
This method is for taking picture from the
available camera on the device.
It takes two argument:
cid : camera id in int default(0)
saveas: output file name in str format default("newimg.jpeg")
'''
self.value=t.compute(f"termux-camera-photo -c {cid} {saveas}")
return self.value["output"]
class clipboard:
'''
Clipboard on android stores copied value
for short time until next copy.
This class has two methods for
setting or getting the value from
clipboard.
'''
def __init__(self):
pass
def clipboardGet(self):
'''
The clipboardGet method returns
value stored in the clipboard.
'''
self.value=t.compute("termux-clipboard-get")
return self.value["output"]
def clipboardSet(self,readval=" "):
'''
The clipboardSet method is to be used
when required to store value in the
clipboard.
BY DEFAULT if no argument given
it sets empty value in the
clipboard.
This method takes an argument of readval variable
which is a string.
'''
self.readval=readval
self.value=t.compute(f"termux-clipboard-set {self.readval}")
return self.value["output"]
class wifi:
'''
The class wifi has two methods:
- toggle method switches wifi on or off
- connectInfo method gets wifi connection
info to stdout.
'''
def __init__(self):
pass
def toggle(self,switch=False):
'''
This method has switch as one argument
whose default boolean value is false.
supply True for true(turn on)
and False for false(turn off)
'''
self.switch=switch
if self.switch == False:
self.value=t.compute("termux-wifi-enable false")
return self.value["output"]
else:
self.value=t.compute("termux-wifi-enable true")
return self.value["output"]
def connectInfo(self):
'''
This method returns wifi connection
information in stringified format.
'''
self.value=t.compute(f"termux-wifi-connectioninfo")
return self.value["output"]
class volume:
'''
The class volume has two methods:
- volumeInfo
- volumeControl
'''
def __init__(self):
pass
def volumeInfo(self):
'''
This method returns
all volume info and takes no argument.
'''
self.value=t.compute(f"termux-volume")
return self.value["output"]
def volumeControl(self,stream="ring",volume=5):
'''
This method sets the volume of the
stream.
It takes two arguments:
- stream : in str format
- volume : in int format
'''
self.value=t.compute(f"termux-volume {stream} {volume}")
return self.value["output"]
class notification:
'''
The class notification has two methods:
- notification
- removeNotification
'''
def __init__(self):
pass
def notification(self,title="title",content="content", *args, **kwargs):
'''
This method creates a notification. 'title' and
'content' are mandatory, and all other flags in
termux-notification are optional arguments
'''
cargs=[]
for i in args:
cargs.append(f"--{i}")
for i in kwargs:
cargs.append(f"-{i} {kwargs[i]}" if len(i) == 1 else f"--{i} {kwargs[i]}")
self.value=t.compute(f"temux-notification -t {title} -c {content} {' '.join(cargs)}")
def removeNotification(self,id):
'''
This method removes a notification by the id
supplied as an argument
'''
self.value=t.compute(f"termux-notification-remove {id}")
return self.value["output"]
| 26.780564
| 122
| 0.570057
| 8,492
| 0.99403
| 0
| 0
| 0
| 0
| 0
| 0
| 4,990
| 0.584104
|
83150604a0fb11e77945d0c0fcad08abbb284ce0
| 342
|
py
|
Python
|
download_from_link.py
|
bogdanf555/scripts
|
42b7b36c5891da6dcde8f7889bdf0798f91bef12
|
[
"MIT"
] | null | null | null |
download_from_link.py
|
bogdanf555/scripts
|
42b7b36c5891da6dcde8f7889bdf0798f91bef12
|
[
"MIT"
] | null | null | null |
download_from_link.py
|
bogdanf555/scripts
|
42b7b36c5891da6dcde8f7889bdf0798f91bef12
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import requests
import sys
if __name__ == '__main__':
if len(sys.argv) != 3:
print("Error: you should pass 2 arguments: [link_to_download_from] [path_to_save_downloaded_file]")
exit(1)
url = sys.argv[1]
r = requests.get(url, allow_redirects=True)
open(sys.argv[2], 'wb').write(r.content)
| 24.428571
| 107
| 0.660819
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 124
| 0.362573
|
83153ac6624a05f5b11103f7bcc31634fc8bbca3
| 443
|
py
|
Python
|
vowelsubstring.py
|
boddulurisrisai/python-practice
|
bb9dfd8ea4d1fe3e4a3f7950ba63b0469e0bca28
|
[
"bzip2-1.0.6"
] | 1
|
2021-04-16T07:12:36.000Z
|
2021-04-16T07:12:36.000Z
|
vowelsubstring.py
|
boddulurisrisai/python-practice
|
bb9dfd8ea4d1fe3e4a3f7950ba63b0469e0bca28
|
[
"bzip2-1.0.6"
] | null | null | null |
vowelsubstring.py
|
boddulurisrisai/python-practice
|
bb9dfd8ea4d1fe3e4a3f7950ba63b0469e0bca28
|
[
"bzip2-1.0.6"
] | null | null | null |
import re
b=input('enter string')
r=[];max=-1;z=-1
for i in range(len(b)):
for j in range(i+1,len(b)+1):
c=b[i:j]
for k in c:
if k=='a' or k=='e' or k=='i' or k=='o' or k=='u':
flag=0
else:
flag=1
break
if flag==0:
r.append(c)
for i in r:
if len(i)>max:
max=len(i)
z=i
print(z)
| 21.095238
| 63
| 0.363431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 29
| 0.065463
|
8316bb71d181ce8ce3eff4b2a0a627c1843d8260
| 485
|
py
|
Python
|
syndata/__init__.py
|
Menelau/synthetic_datasets
|
86fd99042cff6a8bbdfa195fe6eee938a9c9d8f5
|
[
"MIT"
] | 6
|
2018-02-07T02:02:00.000Z
|
2020-01-22T10:33:01.000Z
|
syndata/__init__.py
|
Menelau/synthetic_datasets
|
86fd99042cff6a8bbdfa195fe6eee938a9c9d8f5
|
[
"MIT"
] | null | null | null |
syndata/__init__.py
|
Menelau/synthetic_datasets
|
86fd99042cff6a8bbdfa195fe6eee938a9c9d8f5
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# Author: Rafael Menelau Oliveira e Cruz <rafaelmenelau@gmail.com>
#
# License: MIT
"""
The :mod:`deslib.util` This module includes various utilities. They are divided into three parts:
syndata.synthethic_datasets - Provide functions to generate several 2D classification datasets.
syndata.plot_tools - Provides some routines to easily plot datasets and decision borders of a scikit-learn classifier.
"""
from .plot_tools import *
from .synthetic_datasets import *
| 28.529412
| 118
| 0.785567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 417
| 0.859794
|
831850a395edae115c39b123b0382e44942149bf
| 644
|
py
|
Python
|
profiles/migrations/0002_auto_20211214_0825.py
|
praekeltfoundation/ge-web
|
331d22554dfd6b6f6060b1fd7a110f38dd7ddece
|
[
"BSD-2-Clause"
] | 1
|
2022-03-09T15:11:52.000Z
|
2022-03-09T15:11:52.000Z
|
profiles/migrations/0002_auto_20211214_0825.py
|
praekeltfoundation/ge-web
|
331d22554dfd6b6f6060b1fd7a110f38dd7ddece
|
[
"BSD-2-Clause"
] | 14
|
2022-01-03T09:49:41.000Z
|
2022-03-31T12:53:31.000Z
|
profiles/migrations/0002_auto_20211214_0825.py
|
praekeltfoundation/ge-web
|
331d22554dfd6b6f6060b1fd7a110f38dd7ddece
|
[
"BSD-2-Clause"
] | null | null | null |
# Generated by Django 3.1.14 on 2021-12-14 08:25
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0066_collection_management_permissions'),
('profiles', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profilesettings',
name='terms_and_conditions',
field=models.ForeignKey(blank=True, help_text='Choose a Terms and Conditions page', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.page'),
),
]
| 30.666667
| 194
| 0.673913
| 517
| 0.802795
| 0
| 0
| 0
| 0
| 0
| 0
| 221
| 0.343168
|
8318aea9b693ecf60895b29261a418a03e789bc8
| 4,290
|
py
|
Python
|
radmc-3d/version_0.41/examples/run_spher2d_1_nomirror/problem_setup.py
|
dlmatra/miao
|
71799811b21a4249754390a8ec00972723edab99
|
[
"MIT"
] | 1
|
2019-11-23T00:03:40.000Z
|
2019-11-23T00:03:40.000Z
|
radmc-3d/version_0.41/examples/run_spher2d_1_nomirror/problem_setup.py
|
dlmatra/miao
|
71799811b21a4249754390a8ec00972723edab99
|
[
"MIT"
] | 3
|
2021-05-26T12:54:50.000Z
|
2021-05-27T10:58:48.000Z
|
radmc-3d/version_0.41/examples/run_spher2d_1_nomirror/problem_setup.py
|
dlmatra/miao
|
71799811b21a4249754390a8ec00972723edab99
|
[
"MIT"
] | 1
|
2021-12-23T14:09:52.000Z
|
2021-12-23T14:09:52.000Z
|
#
# Import NumPy for array handling
#
import numpy as np
import math
#
# Import plotting libraries (start Python with ipython --matplotlib)
#
#from mpl_toolkits.mplot3d import axes3d
#from matplotlib import pyplot as plt
#
# Some natural constants
#
au = 1.49598e13 # Astronomical Unit [cm]
pc = 3.08572e18 # Parsec [cm]
ms = 1.98892e33 # Solar mass [g]
ts = 5.78e3 # Solar temperature [K]
ls = 3.8525e33 # Solar luminosity [erg/s]
rs = 6.96e10 # Solar radius [cm]
#
# Monte Carlo parameters
#
nphot = 100000
#
# Grid parameters
#
nx = 100
ny = 120
nz = 1
#
# Model parameters
#
rin = 5*au
rout = 100*au
zmaxr = 0.5e0
rho0 = 1e-16 * 10000
prho = -2.e0
hpr = 0.1e0
#
# Star parameters
#
mstar = ms
rstar = rs
tstar = ts
pstar = [0.,0.,0.]
#
# Make the coordinates
#
# Note: The way the xi grid is made is slightly non-standard, but is
# done this way to be consistent with problem_setup.pro (the IDL version)
#
xi = rin * (rout/rin)**(np.linspace(0.,nx,nx+1)/(nx-1.0))
yi = math.pi/2.0 - zmaxr*np.linspace(ny*0.5,-ny*0.5,ny+1)/(ny*0.5)
zi = np.array([0.,math.pi*2])
xc = 0.5e0 * ( xi[0:nx] + xi[1:nx+1] )
yc = 0.5e0 * ( yi[0:ny] + yi[1:ny+1] )
#
# Make the dust density model
#
rr,tt = np.meshgrid(xc,yc,indexing='ij')
zzr = math.pi/2.0 - tt
rhod = rho0 * (rr/au)**prho
rhod = rhod * np.exp(-0.50*(zzr/hpr)**2)
#
# Write the wavelength_micron.inp file
#
lam1 = 0.1e0
lam2 = 7.0e0
lam3 = 25.e0
lam4 = 1.0e4
n12 = 20
n23 = 100
n34 = 30
lam12 = np.logspace(np.log10(lam1),np.log10(lam2),n12,endpoint=False)
lam23 = np.logspace(np.log10(lam2),np.log10(lam3),n23,endpoint=False)
lam34 = np.logspace(np.log10(lam3),np.log10(lam4),n34,endpoint=True)
lam = np.concatenate([lam12,lam23,lam34])
nlam = lam.size
#
# Write the wavelength file
#
with open('wavelength_micron.inp','w+') as f:
f.write('%d\n'%(nlam))
for value in lam:
f.write('%13.6e\n'%(value))
#
#
# Write the stars.inp file
#
with open('stars.inp','w+') as f:
f.write('2\n')
f.write('1 %d\n\n'%(nlam))
f.write('%13.6e %13.6e %13.6e %13.6e %13.6e\n\n'%(rstar,mstar,pstar[0],pstar[1],pstar[2]))
for value in lam:
f.write('%13.6e\n'%(value))
f.write('\n%13.6e\n'%(-tstar))
#
# Write the grid file
#
with open('amr_grid.inp','w+') as f:
f.write('1\n') # iformat
f.write('0\n') # AMR grid style (0=regular grid, no AMR)
f.write('100\n') # Coordinate system
f.write('0\n') # gridinfo
f.write('1 1 0\n') # Include x,y,z coordinate
f.write('%d %d %d\n'%(nx,ny,nz)) # Size of grid
for value in xi:
f.write('%13.6e\n'%(value)) # X coordinates (cell walls)
for value in yi:
f.write('%13.6e\n'%(value)) # Y coordinates (cell walls)
for value in zi:
f.write('%13.6e\n'%(value)) # Z coordinates (cell walls)
#
# Write the density file
#
with open('dust_density.inp','w+') as f:
f.write('1\n') # Format number
f.write('%d\n'%(nx*ny*nz)) # Nr of cells
f.write('1\n') # Nr of dust species
data = rhod.ravel(order='F') # Create a 1-D view, fortran-style indexing
data.tofile(f, sep='\n', format="%13.6e")
f.write('\n')
#
# Dust opacity control file
#
with open('dustopac.inp','w+') as f:
f.write('2 Format number of this file\n')
f.write('1 Nr of dust species\n')
f.write('============================================================================\n')
f.write('1 Way in which this dust species is read\n')
f.write('0 0=Thermal grain\n')
f.write('silicate Extension of name of dustkappa_***.inp file\n')
f.write('----------------------------------------------------------------------------\n')
#
# Write the radmc3d.inp control file
#
with open('radmc3d.inp','w+') as f:
f.write('nphot = %d\n'%(nphot))
f.write('scattering_mode_max = 0\n') # Put this to 1 for isotropic scattering
| 30.642857
| 94
| 0.530536
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,988
| 0.463403
|
83191aecc9d861bb7dfa42c1c5b079d943885a2f
| 5,508
|
py
|
Python
|
colorprinter/pycolor.py
|
edonyzpc/toolkitem
|
3a09ebf45eee8ecd9ff0e441392d5fc746b996e5
|
[
"MIT"
] | 3
|
2015-04-20T08:17:09.000Z
|
2020-07-07T15:22:06.000Z
|
colorprinter/pycolor.py
|
edonyzpc/toolkitem
|
3a09ebf45eee8ecd9ff0e441392d5fc746b996e5
|
[
"MIT"
] | 24
|
2015-11-14T14:54:59.000Z
|
2017-10-23T15:14:45.000Z
|
colorprinter/pycolor.py
|
edonyzpc/toolkitem
|
3a09ebf45eee8ecd9ff0e441392d5fc746b996e5
|
[
"MIT"
] | 1
|
2017-02-28T06:35:44.000Z
|
2017-02-28T06:35:44.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - edonyzpc@gmail.com
#
# twitter : @edonyzpc
#
# Last modified: 2017-03-19 21:24
#
# Filename: pycolor.py
#
# Description: All Rights Are Reserved
#
"""
#import scipy as sp
#import math as m
#import matplotlib as mpl
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D as Ax3
#from scipy import stats as st
#from matplotlib import cm
#import numpy as np
from __future__ import print_function
class PyColor(object):
"""Colorful format string in Linux terminal stdout.
------
STYLE: \033['display model';'foreground';'background'm
DETAILS:
FOREGROUND BACKGOUND COLOR
---------------------------------------
30 40 black
31 41 red
32 42 green
33 43 yellow
34 44 blue
35 45 purple
36 46 cyan
37 47 white
DISPLAY MODEL DETAILS
-------------------------
0 default
1 highlight
4 underline
5 flicker
7 reverse
8 non-visiable
e.g:
\033[1;31;40m <!--1-highlight;31-foreground red;40-background black-->
\033[0m <!--set all into default-->
"""
def __init__(self, fmt=None):
self.magic = '\033['
self.color = fmt
self.__formats = {'red':(0, 31, 40), 'green':(0, 32, 40), 'cyan':(0, 36, 40),
'purple':(0, 35, 40), 'yellow':(0, 33, 40), 'white':(0, 37, 40),
'ured':(4, 31, 40), 'ugreen':(4, 32, 40), 'ucyan':(4, 36, 40),
'upurple':(4, 35, 40), 'uyellow':(4, 33, 40), 'uwhite':(4, 37, 40),
'fred':(5, 31, 40), 'fgreen':(5, 32, 40), 'fcyan':(5, 36, 40),
'fpurple':(5, 35, 40), 'fyellow':(5, 33, 40), 'fwhite':(5, 37, 40),
'nred':(5, 31, 40), 'ngreen':(5, 32, 40), 'ncyan':(5, 36, 40),
'npurple':(5, 35, 40), 'nyellow':(5, 33, 40), 'nwhite':(5, 37, 40)
}
if fmt in self.__formats.keys():
fmt_tmp = self.__formats[fmt]
# display model: [0, 1, 4, 5, 7, 8]
self._mod = str(fmt_tmp[0]) + ';'
# foreground color: [30, 31, 32, 33, 34, 35, 36, 37]
self._fg_color = str(fmt_tmp[1]) + ';'
# background color: [40m, 41m, 42m, 43m, 44m, 45m, 46m, 47m]
self._bg_color = str(fmt_tmp[2]) + 'm'
else:
self._mod = '0;'
self._fg_color = '37;'
self._bg_color = '40m'
# output format string
self._format = self.magic +\
self._mod +\
self._fg_color +\
self._bg_color
# reset the format
self.reset = '\033[0m'
def __call__(self, func):
"""decorator for colorful the printed string in terminal
"""
def wrapper(fmt_str):
"""convert printed string into formated colorful string
"""
func(self.colorstr(fmt_str))
return wrapper
@property
def format(self):
"""
Customized Python Print Color.
"""
return self.color
@format.setter
def format(self, color_str):
"""
New Color.
"""
self.color = color_str
self._format = self.magic +\
';'.join(list(map(str, self.__formats[color_str]))) +\
'm'
def disable(self):
"""
Disable Color Print.
"""
self.color = ''
self._format = ''
def __str2fmts(self, color_str):
"""
Convert description of format into format number
"""
self.format = color_str
def colorstr(self, string, color=None):
"""Contert string to colorful format string
"""
if color is None:
return self._format + string + self.reset
else:
self.__str2fmts(color)
return self._format + string + self.reset
def cprint(color, out_str):
"""Colorful print function instead of standard print
"""
@PyColor(color)
def printer(out_str):
"""inner function of standard print wrapper
"""
print(out_str)
printer(out_str)
| 35.766234
| 93
| 0.396696
| 3,940
| 0.715323
| 0
| 0
| 511
| 0.092774
| 0
| 0
| 3,250
| 0.590051
|
831a95d5b9d61001fca6140bef2832489872b9e3
| 1,684
|
py
|
Python
|
launch/velocity_smoother-composed-launch.py
|
doisyg/velocity_smoother
|
5ba998978e324fd0417ea75483d1f5559820459d
|
[
"BSD-3-Clause"
] | 8
|
2020-02-28T10:40:53.000Z
|
2022-01-15T06:42:11.000Z
|
launch/velocity_smoother-composed-launch.py
|
doisyg/velocity_smoother
|
5ba998978e324fd0417ea75483d1f5559820459d
|
[
"BSD-3-Clause"
] | 9
|
2020-01-20T16:32:14.000Z
|
2022-01-28T13:49:59.000Z
|
launch/velocity_smoother-composed-launch.py
|
doisyg/velocity_smoother
|
5ba998978e324fd0417ea75483d1f5559820459d
|
[
"BSD-3-Clause"
] | 3
|
2020-03-19T09:40:35.000Z
|
2022-01-11T01:47:41.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Open Source Robotics Foundation, Inc.
#
# Software License Agreement (BSD License 2.0)
# https://raw.githubusercontent.com/kobuki-base/velocity_smoother/license/LICENSE
"""Launch the velocity smoother as a composed node with default configuration."""
import os
import ament_index_python.packages
from launch import LaunchDescription
from launch_ros.actions import ComposableNodeContainer
from launch_ros.descriptions import ComposableNode
import yaml
def generate_launch_description():
share_dir = ament_index_python.packages.get_package_share_directory('velocity_smoother')
# Passing parameters to a composed node must be done via a dictionary of
# key -> value pairs. Here we read in the data from the configuration file
# and create a dictionary of it that the ComposableNode will accept.
params_file = os.path.join(share_dir, 'config', 'velocity_smoother_params.yaml')
with open(params_file, 'r') as f:
params = yaml.safe_load(f)['velocity_smoother']['ros__parameters']
container = ComposableNodeContainer(
node_name='velocity_smoother_container',
node_namespace='',
package='rclcpp_components',
node_executable='component_container',
composable_node_descriptions=[
ComposableNode(
package='velocity_smoother',
node_plugin='velocity_smoother::VelocitySmoother',
node_name='velocity_smoother',
parameters=[params]),
],
output='both',
)
return LaunchDescription([container])
| 35.829787
| 92
| 0.694774
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 780
| 0.463183
|
831b642dcce9a13a8398668c6c09e24217cd6b3c
| 3,616
|
py
|
Python
|
lib/taskstats/controller.py
|
tijko/IO-Mon
|
4fb43c6c97b22f9a44eb34ef2221f1ed2abb062b
|
[
"MIT"
] | 1
|
2015-12-17T04:58:09.000Z
|
2015-12-17T04:58:09.000Z
|
lib/taskstats/controller.py
|
tijko/IO-Mon
|
4fb43c6c97b22f9a44eb34ef2221f1ed2abb062b
|
[
"MIT"
] | null | null | null |
lib/taskstats/controller.py
|
tijko/IO-Mon
|
4fb43c6c97b22f9a44eb34ef2221f1ed2abb062b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import struct
import socket
from netlink import *
NETLINK_ROUTE = 0
NETLINK_UNUSED = 1
NETLINK_USERSOCK = 2
NETLINK_FIREWALL = 3
NETLINK_SOCK_DIAG = 4
NETLINK_NFLOG = 5
NETLINK_XFRM = 6
NETLINK_SELINUX = 7
NETLINK_ISCSI = 8
NETLINK_AUDIT = 9
NETLINK_FIB_LOOKUP = 10
NETLINK_CONNECTOR = 11
NETLINK_NETFILTER = 12
NETLINK_IP6_FW = 13
NETLINK_DNRTMSG = 14
NETLINK_KOBJECT_UEVENT = 15
NETLINK_GENERIC = 16
NETLINK_SCSITRANSPORT = 18
NETLINK_ECRYPTFS = 19
NETLINK_RDMA = 20
NETLINK_CRYPTO = 21
NETLINK_INET_DIAG = NETLINK_SOCK_DIAG
class Connection(object):
'''
Base class that establishes a netlink connection with the kernel.
'''
def __init__(self, family):
self.family = family
self.conn = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, family)
self.conn.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 65536)
self.conn.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 65536)
self.conn.bind((0, 0))
def send(self, msg):
self.conn.send(msg)
def recv(self):
return self.conn.recv(65536)
# Genetlink Controller command and attribute values
CTRL_CMD_UNSPEC = 0
CTRL_CMD_NEWFAMILY = 1
CTRL_CMD_DELFAMILY = 2
CTRL_CMD_GETFAMILY = 3
CTRL_CMD_NEWOPS = 4
CTRL_CMD_DELOPS = 5
CTRL_CMD_GETOPS = 6
CTRL_CMD_NEWMCAST_GRP = 7
CTRL_CMD_DELCAST_GRP = 8
CTRL_CMD_GETMCAST_GRP = 9
__CTRL_CMD_MAX = 10
TASKSTATS_GENL_VERSION = 0x1
GENL_HDRLEN = struct.calcsize('BBxx')
class Genlmsg(object):
'''
Generic netlink message container, this class is to encapsulate the fields
of struct genlmsghdr.
struct genlmsghdr {
__u8 cmd;
__u8 version;
__u16 reserved;
};
the `.pack()` method returns a binary c-formatted string of the generic
netlink header and its associated payload.
@param cmd :: the generic netlink command.
@type cmd :: int
@param nlattr :: Nlattr object containing the attributes for the call.
@type nlattr :: Nlattr Class Object
@param version :: the generic netlink version of the interface (defaults to
taskstats)
@type version :: int
'''
def __init__(self, cmd, nlattr, version=TASKSTATS_GENL_VERSION):
self.cmd = cmd
self.version = version
self.nlattr = nlattr
self.payload = self.nlattr.pack()
self.genlen = GENL_HDRLEN + self.nlattr.nla_len
def pack(self):
genlhdr = struct.pack('BBxx', self.cmd, self.version)
return genlhdr + self.payload
class Controller(Connection):
'''
Controller class that establishes a generic netlink connection with
family of the supplied 'genl_name'.
'''
def __init__(self, genl_name):
super(Controller, self).__init__(NETLINK_GENERIC)
self.genl_name = genl_name
self.genlhdr = Genlmsg(CTRL_CMD_GETFAMILY, Nlattr(CTRL_ATTR_FAMILY_NAME,
self.genl_name))
self.attrs = dict()
self.pid = os.getpid()
self.fam_id = self.get_family_id
@property
def get_family_id(self):
nlmsg = Nlmsg(GENL_ID_CTRL, self.pid, self.genlhdr).pack()
self.send(nlmsg)
family_id_reply = self.recv()
parse_response(self, family_id_reply)
return struct.unpack('I', self.attrs[CTRL_ATTR_FAMILY_ID])[0]
| 28.472441
| 80
| 0.638274
| 2,438
| 0.674226
| 0
| 0
| 284
| 0.07854
| 0
| 0
| 978
| 0.270465
|
831cac4a9b399f71b7446e06e08d2d1e23c17328
| 1,335
|
py
|
Python
|
app/marketing/migrations/0002_membership.py
|
NDevox/website
|
76004e667f2295eddd79d500ba21f02a0480412f
|
[
"Apache-2.0"
] | null | null | null |
app/marketing/migrations/0002_membership.py
|
NDevox/website
|
76004e667f2295eddd79d500ba21f02a0480412f
|
[
"Apache-2.0"
] | null | null | null |
app/marketing/migrations/0002_membership.py
|
NDevox/website
|
76004e667f2295eddd79d500ba21f02a0480412f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-12 04:25
from __future__ import unicode_literals
from django.db import migrations, models
def forward(apps, schema_editor):
db_alias = schema_editor.connection.alias
Cron = apps.get_model('django_celery_beat', 'CrontabSchedule')
cron = Cron.objects.using(db_alias).create(minute='0', hour='0')
Task = apps.get_model('django_celery_beat', 'PeriodicTask')
Task.objects.using(db_alias).create(name='Capture slack membership counts',
task='marketing.tasks.capture_snapshot_of_user_count', # noqa
crontab=cron)
class Migration(migrations.Migration):
initial = True
dependencies = [
('marketing', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Membership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('member_count', models.IntegerField()),
('deleted_count', models.IntegerField()),
('bot_count', models.IntegerField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
),
migrations.RunPython(forward),
]
| 32.560976
| 114
| 0.605243
| 656
| 0.491386
| 0
| 0
| 0
| 0
| 0
| 0
| 331
| 0.24794
|
831cd9a75c39325f8b2e668fec868da457fe98e6
| 4,552
|
py
|
Python
|
Solutions/VMX2-VoicemailExpress/Code/vmx_transcriber.py
|
cbgandhi-code/amazon-connect-salesforce-scv
|
fc5da5445b01295e530b50aa774598e91087c57a
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
Solutions/VMX2-VoicemailExpress/Code/vmx_transcriber.py
|
cbgandhi-code/amazon-connect-salesforce-scv
|
fc5da5445b01295e530b50aa774598e91087c57a
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
Solutions/VMX2-VoicemailExpress/Code/vmx_transcriber.py
|
cbgandhi-code/amazon-connect-salesforce-scv
|
fc5da5445b01295e530b50aa774598e91087c57a
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
# Version: 2022.03.23
"""
**********************************************************************************************************************
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated *
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation *
* the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and *
* to permit persons to whom the Software is furnished to do so. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO *
* THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF *
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS *
* IN THE SOFTWARE. *
**********************************************************************************************************************
"""
import json
import boto3
import os
import logging
logger = logging.getLogger()
logger.setLevel(logging.getLevelName(os.getenv('lambda_logging_level', 'INFO')))
def lambda_handler(event, context):
logger.debug(event)
# Establish a loop counter
loop_counter = 0
# Process the incoming S3 event
for recording in event['Records']:
# Increment loop
loop_counter = loop_counter+1
# Grab incoming data elements from the S3 event
try:
recording_key = recording['s3']['object']['key']
recording_name = recording_key.replace('voicemail_recordings/','')
contact_id = recording_name.replace('.wav','')
recording_bucket = recording['s3']['bucket']['name']
except Exception as e:
logger.error(e)
logger.debug('Record {0} Result: Failed to extract data from event'.format(loop_counter))
continue
# Establish the S3 client and get the object tags
try:
s3_client = boto3.client('s3')
object_data = s3_client.get_object_tagging(
Bucket=recording_bucket,
Key=recording_key
)
object_tags = object_data['TagSet']
loaded_tags = {}
for i in object_tags:
loaded_tags.update({i['Key']:i['Value']})
except Exception as e:
logger.error(e)
logger.debug('Record {0} Result: Failed to extract tags from object'.format(loop_counter))
continue
# Build the Recording URL
try:
recording_url = 'https://{0}.s3-{1}.amazonaws.com/{2}'.format(recording_bucket, recording['awsRegion'], recording_key)
except Exception as e:
logger.error(e)
logger.debug('Record {0} Result: Failed to generate recording URL'.format(loop_counter))
continue
# Do the transcription
try:
# Esteablish the client
transcribe_client = boto3.client('transcribe')
# Submit the transcription job
transcribe_response = transcribe_client.start_transcription_job(
TranscriptionJobName=contact_id,
LanguageCode=loaded_tags['vm_lang'],
MediaFormat='wav',
Media={
'MediaFileUri': recording_url
},
OutputBucketName=os.environ['s3_transcripts_bucket']
)
except Exception as e:
logger.error(e)
logger.debug('Record {0} Result: Transcription job failed'.format(loop_counter))
continue
logger.debug('Record {0} Result: Success!'.format(loop_counter))
return {
'status': 'complete',
'result': '{0} records processed'.format(loop_counter)
}
| 43.352381
| 130
| 0.536028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,502
| 0.549649
|
831d6ec37b4d0e0a6e4200545a3b9e01d0fe7f0e
| 306
|
py
|
Python
|
api/permissions.py
|
soltanoff/simple_file_server
|
4e825358341fae0564fc498e8374a3d3cdda199e
|
[
"MIT"
] | 2
|
2018-06-15T11:39:42.000Z
|
2019-08-14T20:55:15.000Z
|
api/permissions.py
|
soltanoff/simple_file_server
|
4e825358341fae0564fc498e8374a3d3cdda199e
|
[
"MIT"
] | 7
|
2018-12-04T07:35:24.000Z
|
2022-03-11T23:12:10.000Z
|
api/permissions.py
|
soltanoff/simple_file_server
|
4e825358341fae0564fc498e8374a3d3cdda199e
|
[
"MIT"
] | null | null | null |
from rest_framework import permissions
class IsStaffOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow admins to edit it.
"""
def has_object_permission(self, request, view, obj):
return request.method in permissions.SAFE_METHODS or request.user.is_staff
| 27.818182
| 82
| 0.751634
| 264
| 0.862745
| 0
| 0
| 0
| 0
| 0
| 0
| 66
| 0.215686
|
831dc5f3bb8ccadfd806896689571f12c96946bc
| 712
|
py
|
Python
|
capstone/rl/utils/linear_annealing.py
|
davidrobles/mlnd-capstone-code
|
19ca88aaa137665af147da9bbd0e510829a14cf1
|
[
"MIT"
] | 2
|
2017-04-13T18:31:39.000Z
|
2017-05-06T05:14:12.000Z
|
capstone/rl/utils/linear_annealing.py
|
davidrobles/mlnd-capstone-code
|
19ca88aaa137665af147da9bbd0e510829a14cf1
|
[
"MIT"
] | null | null | null |
capstone/rl/utils/linear_annealing.py
|
davidrobles/mlnd-capstone-code
|
19ca88aaa137665af147da9bbd0e510829a14cf1
|
[
"MIT"
] | null | null | null |
from .callbacks import Callback
class LinearAnnealing(Callback):
def __init__(self, obj, param, init, final, n_episodes):
self.doing = 'inc' if init < final else 'dec'
self.obj = obj
self.param = param
self.init = init
self.final = final
self.n_episodes = n_episodes
self.change_rate = (final - init) / n_episodes
def on_episode_end(self, episode, qf):
if ((self.doing == 'inc' and getattr(self.obj, self.param) < self.final) or
(self.doing == 'dec' and getattr(self.obj, self.param) > self.final)):
prev = getattr(self.obj, self.param)
setattr(self.obj, self.param, prev + self.change_rate)
| 35.6
| 83
| 0.606742
| 677
| 0.950843
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 0.02809
|
8320400ac8c357808906cc6070706d68af6624bc
| 6,466
|
py
|
Python
|
genTraining_recurr.py
|
lasinger/-3DVideos2Stereo
|
9608654ec37d157133c43531ac0002102e86dbab
|
[
"MIT"
] | 62
|
2020-01-15T10:27:46.000Z
|
2022-03-14T09:23:58.000Z
|
genTraining_recurr.py
|
lasinger/-3DVideos2Stereo
|
9608654ec37d157133c43531ac0002102e86dbab
|
[
"MIT"
] | 4
|
2020-03-10T08:13:59.000Z
|
2021-12-09T09:35:58.000Z
|
genTraining_recurr.py
|
lasinger/-3DVideos2Stereo
|
9608654ec37d157133c43531ac0002102e86dbab
|
[
"MIT"
] | 15
|
2020-01-17T02:06:54.000Z
|
2022-02-24T06:32:40.000Z
|
from __future__ import print_function
import numpy as np
import argparse
import glob
import os
import errno
import math
import cv2
from random import shuffle
from shutil import copyfile
parser = argparse.ArgumentParser(
description="create training/test/validation sets from video list"
)
parser.add_argument("--videoListPath", type=str, help="path to videos", required=True)
parser.add_argument(
"--fpsSingle", type=int, help="fps for single frame processing", default=2
)
parser.add_argument(
"--numRecurrent", type=int, help="how many recurent steps", default=3
)
parser.add_argument(
"--fpsRecurrent", type=int, help="fps for reccurent part", default=24
)
parser.add_argument(
"--chapterTiming",
type=str,
help="start and end timing list for all chapters",
default="timingChapters.txt",
)
parser.add_argument("--name", type=str, help="run name", default="training")
parser.add_argument("--blacklist", type=str, help="ignore video", default="-1")
parser.add_argument(
"--whitelist",
type=str,
help="specifies list of selected videos, if not set all videos are selected",
default="-1",
)
args = parser.parse_args()
def silentremove(filename):
try:
os.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise # re-raise exception if a different error occurred
def processChapter_cutlist(
video,
chap,
origFramerate,
timing,
outputFileSingle,
cutList,
numRecurrent,
fpsRecurrent,
):
videoNameSplit = video.split("/")
videoName = videoNameSplit[-2]
imgPathRel = videoName + "/chapter" + str(chap) + "/"
modFrameFactorSingle = int(round(origFramerate / args.fpsSingle))
stepRecurrent = int(round(origFramerate / fpsRecurrent))
numRecurrent = (
numRecurrent + stepRecurrent * 2
) # extra frames in case of flow estimation
logFilename = video + "log" + str(chap) + ".txt"
with open(logFilename, "r") as fp:
with open(outputFileSingle, "a") as ofp_single:
prevIdx = -1
# iterate over log list
for cnt, line in enumerate(fp):
idx = line.find("pts_time:")
if idx == -1:
continue
pts_time = float(line[idx + 9 : idx + 9 + 7])
idx2 = line.find("n:")
frame_idx = int(line[idx2 + 2 : idx2 + 2 + 5]) + 1
# use floor here to be on the save side
if pts_time <= timing[0] or pts_time > math.floor(timing[1]):
continue
# ignore if at cut position
if pts_time in cutList:
continue
# sequence already processed
if frame_idx < prevIdx:
continue
largerElemCutList = [
x for x in cutList if x > pts_time and x < timing[1]
]
largerElemCutList.append(timing[1])
cutTimeNext = min(largerElemCutList)
smallerElemCutList = [
x for x in cutList if x < pts_time and x > timing[0]
]
smallerElemCutList.append(timing[0])
seqLength = (cutTimeNext - pts_time) * origFramerate
# for long sequences jump to some point later in the same sequence
jump = min(int(seqLength), origFramerate * 4)
prevIdx = frame_idx + int(jump)
# ignore if sequence to short
if seqLength < numRecurrent * stepRecurrent:
continue
imgFilename = {}
existing = True
for ri in range(0, numRecurrent * stepRecurrent):
frame_recurr = int(frame_idx + ri + 1)
frame_str = str(frame_recurr).zfill(8)
if ri % stepRecurrent != 0:
continue
ri_rec = int(ri / stepRecurrent)
imgFilename[ri_rec] = "out" + frame_str
if existing == False:
continue
for ri in range(stepRecurrent * 2, numRecurrent):
if (ri - stepRecurrent * 2) % modFrameFactorSingle == 0:
ofp_single.write(imgPathRel + imgFilename[ri] + "\n")
def processShotFile(video, shotFile):
numFrames = 0
cutList = []
with open(video + shotFile, "r") as fp:
for cnt, line in enumerate(fp):
# get cuts
idx = line.find("pkt_pts_time=")
if idx != -1:
numFrames = numFrames + 1
pts_time = float(line[idx + 13 : idx + 13 + 8])
cutList.append(pts_time)
return cutList
def main():
videoList = glob.glob(args.videoListPath + "*/")
origFramerate = 24
trainingSingleFile = (
args.videoListPath
+ args.name
+ "_"
+ str(args.fpsSingle)
+ "fpsSingle_"
+ str(args.fpsRecurrent)
+ "fps_"
+ str(args.numRecurrent)
+ "frames"
+ "_single.txt"
)
silentremove(trainingSingleFile)
for video in videoList:
print(video)
videoNameSplit = video.split("/")
videoName = videoNameSplit[-2]
if videoName in args.blacklist:
print(videoName + " on blacklist")
continue
if args.whitelist != "-1" and videoName not in args.whitelist:
print(videoName + " not on whitelist")
continue
print("processing " + videoName)
cutList = processShotFile(video, "shots.txt")
print(len(cutList))
timingList = []
with open(video + args.chapterTiming, "r") as fp:
timingListTmp = fp.read().splitlines()
for timingLine in timingListTmp:
timingList.append([float(x) for x in timingLine.split(",")])
chapterList = glob.glob(video + "log*.txt")
numChapters = len(chapterList)
validChapters = range(2, numChapters)
trainingSet = validChapters
for chap in trainingSet:
processChapter_cutlist(
video,
chap,
origFramerate,
timingList[chap - 1],
trainingSingleFile,
cutList,
args.numRecurrent,
args.fpsRecurrent,
)
main()
| 30.64455
| 86
| 0.558769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 957
| 0.148005
|
83207ebe69e3bf9bcd3f660b07c8f5bca9f8663b
| 2,038
|
py
|
Python
|
seeq/addons/clustering/__main__.py
|
seeq12/seeq-clustering
|
220793499d5f9669e7d9dde4820af0eee27f84dc
|
[
"Apache-2.0"
] | 3
|
2021-10-15T05:32:44.000Z
|
2021-12-14T16:33:24.000Z
|
seeq/addons/clustering/__main__.py
|
seeq12/seeq-clustering
|
220793499d5f9669e7d9dde4820af0eee27f84dc
|
[
"Apache-2.0"
] | 2
|
2021-11-19T17:46:06.000Z
|
2022-01-20T06:54:00.000Z
|
seeq/addons/clustering/__main__.py
|
seeq12/seeq-clustering
|
220793499d5f9669e7d9dde4820af0eee27f84dc
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import argparse
from ._install_addon import install_addon
def cli_interface():
""" Installs Seeq Add-on Tool """
parser = argparse.ArgumentParser(description='Install Clustering as a Seeq Add-on Tool')
parser.add_argument('--username', type=str, default=None,
help='Username or Access Key of Seeq admin user installing the tool(s) ')
parser.add_argument('--seeq_url', type=str,
help="Seeq hostname URL with the format https://my.seeq.com/ or https://my.seeq.com:34216")
parser.add_argument('--app_url', type=str,
help="URL of clustering app notebook with the format e.g. https://my.seeq.com/data-lab/CBA9A827-35A8-4944-8A74-EE7008DC3ED8/notebooks/hb/seeq/addons/clustering/App.ipynb")
parser.add_argument('--users', type=str, nargs='*', default=[],
help="List of the Seeq users to will have access to the Correlation Add-on Tool,"
" default: %(default)s")
parser.add_argument('--groups', type=str, nargs='*', default=['Everyone'],
help="List of the Seeq groups to will have access to the Correlation Add-on Tool, "
"default: %(default)s")
parser.add_argument('--password', type=str, default=None,
help="Password of Seeq user installing the tool. Must supply a password if not supplying an accesskey for username")
parser.add_argument('--sort_key', type=str, default=None,
help="A string, typically one character letter. The sort_key determines the order in which the Add-on Tools are displayed in the tool panel, "
"default: %(default)s")
return parser.parse_args()
if __name__ == '__main__':
args = cli_interface()
install_addon(
sort_key=args.sort_key,
permissions_group=args.groups,
permissions_users=args.users,
username=args.username,
password=args.password
)
| 49.707317
| 195
| 0.632483
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 964
| 0.473013
|
8321d10093f3ed3b6d58be76b8214f867e414822
| 939
|
py
|
Python
|
utils/customchecks.py
|
arielbeje/good-bot-name
|
de1429ea5b653fd8ee88d649452ebef7e7399e5b
|
[
"MIT"
] | 10
|
2018-04-08T00:02:18.000Z
|
2022-01-25T18:34:06.000Z
|
utils/customchecks.py
|
arielbeje/good-bot-name
|
de1429ea5b653fd8ee88d649452ebef7e7399e5b
|
[
"MIT"
] | 14
|
2018-01-26T16:55:09.000Z
|
2021-09-19T11:35:58.000Z
|
utils/customchecks.py
|
arielbeje/Good_Bot_Name
|
de1429ea5b653fd8ee88d649452ebef7e7399e5b
|
[
"MIT"
] | 14
|
2018-02-14T01:35:08.000Z
|
2021-03-30T12:18:03.000Z
|
"""
Code stolen from https://github.com/Rapptz/discord.py
"""
import functools
import discord
from discord.ext import commands
from . import sql
class NotAModError(commands.CheckFailure):
pass
class NoTokenError(Exception):
pass
def is_mod():
async def predicate(ctx):
ch = ctx.channel
permissions = ch.permissions_for(ctx.author)
if permissions.administrator:
return True
msg = ctx.message
if not msg.guild:
raise NotAModError()
return False
getter = functools.partial(discord.utils.get, msg.author.roles)
modroles = [int(result[0]) for result in await sql.fetch("SELECT roleid FROM modroles WHERE serverid=?", str(ctx.message.guild.id))]
if not any(getter(id=role) is not None for role in modroles):
raise NotAModError()
return False
return True
return commands.check(predicate)
| 24.076923
| 140
| 0.652822
| 90
| 0.095847
| 0
| 0
| 0
| 0
| 637
| 0.678381
| 107
| 0.113951
|
83226ea13035cf8a8cc076a6baf244dd22963a78
| 3,107
|
py
|
Python
|
tests/test_lambda_lapsed.py
|
BostonDSA/actionnetwork-activist-sync
|
f4b45ec85d59ac252c5572974381e96ec0107add
|
[
"MIT"
] | 1
|
2021-12-14T17:34:20.000Z
|
2021-12-14T17:34:20.000Z
|
tests/test_lambda_lapsed.py
|
BostonDSA/actionnetwork-activist-sync
|
f4b45ec85d59ac252c5572974381e96ec0107add
|
[
"MIT"
] | null | null | null |
tests/test_lambda_lapsed.py
|
BostonDSA/actionnetwork-activist-sync
|
f4b45ec85d59ac252c5572974381e96ec0107add
|
[
"MIT"
] | null | null | null |
import json
import importlib
import os
import unittest
from unittest.mock import Mock
from moto import mock_dynamodb2
import boto3
from lambda_local.context import Context
os.environ['ENVIRONMENT'] = 'TEST'
os.environ['LOG_LEVEL'] = 'CRITICAL'
os.environ['DSA_KEY'] = 'TESTKEY'
class TestLapsed(unittest.TestCase):
@mock_dynamodb2
def test_in_both_is_noop(self):
import lambda_lapsed
from actionnetwork_activist_sync.actionnetwork import ActionNetwork
from actionnetwork_activist_sync.state_model import State
State.create_table(billing_mode='PAY_PER_REQUEST')
j_karl = json.dumps({
'Email': 'kmarx@marxists.org',
'firstname': 'Karl',
'lastname': 'Marx'
})
self.create_karl_state(State, lambda_lapsed.cur_batch, State.PROCESSED)
self.create_karl_state(State, lambda_lapsed.prev_batch, State.PROCESSED)
mock_an = Mock(ActionNetwork)
lambda_lapsed.get_actionnetwork = lambda a: mock_an
result = lambda_lapsed.lambda_handler({}, Context(5))
self.assertEqual(result['removed'], 0)
self.assertEqual(result['cur_count'], 1)
self.assertEqual(result['prev_count'], 1)
@mock_dynamodb2
def test_not_in_cur_but_in_prev_gets_removed(self):
import lambda_lapsed
from actionnetwork_activist_sync.actionnetwork import ActionNetwork
from actionnetwork_activist_sync.state_model import State
# this lets us make sure the mock gets called
os.environ['DRY_RUN'] = '0'
importlib.reload(lambda_lapsed)
State.create_table(billing_mode='PAY_PER_REQUEST')
self.create_friedrich_state(State, lambda_lapsed.cur_batch, State.PROCESSED)
self.create_karl_state(State, lambda_lapsed.prev_batch, State.PROCESSED)
mock_an = Mock(ActionNetwork)
mock_an.remove_member_by_email = Mock()
lambda_lapsed.get_actionnetwork = lambda a: mock_an
result = lambda_lapsed.lambda_handler({}, Context(5))
mock_an.remove_member_by_email.assert_called_once_with(
'kmarx@marxists.org'
)
self.assertEqual(result['removed'], 1)
self.assertEqual(result['cur_count'], 1)
self.assertEqual(result['prev_count'], 1)
del os.environ['DRY_RUN']
def create_karl_state(self, State, batch, status):
state = State(
batch,
'kmarx@marxists.org',
raw=json.dumps({
'Email': 'kmarx@marxists.org',
'firstname': 'Karl',
'lastname': 'Marx'
}),
status=status
)
state.save()
return state
def create_friedrich_state(self, State, batch, status):
state = State(
batch,
'fengels@marxists.org',
raw=json.dumps({
'Email': 'fengles@marxists.org',
'firstname': 'Friedrich',
'lastname': 'Engels'
}),
status=status
)
state.save()
return state
| 31.07
| 84
| 0.633408
| 2,825
| 0.909237
| 0
| 0
| 2,013
| 0.647892
| 0
| 0
| 473
| 0.152237
|
832283ba27d3f56129d5cb0cef3c3b8a60934088
| 2,974
|
py
|
Python
|
tests/test_motif_finder.py
|
gaybro8777/RStudio-GitHub-Analysis
|
014195c90ca49f64d28c9fcd96d128301ff65157
|
[
"BSD-2-Clause"
] | 2
|
2020-09-13T11:55:13.000Z
|
2021-05-23T01:29:19.000Z
|
tests/test_motif_finder.py
|
gaybro8777/RStudio-GitHub-Analysis
|
014195c90ca49f64d28c9fcd96d128301ff65157
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_motif_finder.py
|
gaybro8777/RStudio-GitHub-Analysis
|
014195c90ca49f64d28c9fcd96d128301ff65157
|
[
"BSD-2-Clause"
] | 2
|
2020-10-17T20:18:37.000Z
|
2021-05-23T01:29:25.000Z
|
"""
This script tests the classes and functions from motif_finder.py.
Parameters
----------
None
Returns
-------
Assertion errors if tests fail
"""
import sys
import random
import pickle
import networkx as nx
from github_analysis.big_cloud_scratch import git_graph
from github_analysis.data_layer import getCommitsByProjectIds
from github_analysis.cluster import get_embedding_clusters
from github_analysis.motif_finder import *
clusters = get_embedding_clusters(random_state=0)
projects_cluster = getCommitsByProjectIds(clusters[0])
G = git_graph(projects_cluster)
mf = MotifFinder(G)
# Unit tests
def test_main_output_type():
pass
def test_sample_initial_node_output_type():
"""Check that MotifFinder.sample_initial_node outputs an integer."""
assert type(mf.sample_initial_node()) == int
def test_sample_initial_node_output():
"""Check that MotifFinder.sample_initial_node outputs a node in the given graph."""
assert mf.sample_initial_node() in G
def test_get_random_child_output_type():
"""Check that MotifFinder.get_random_child outputs an integer."""
assert type(mf.get_random_child(355738534)) == int
def test_get_random_child_no_children():
"""Check that MotifFinder.get_random_child outputs None if there are no children."""
assert mf.get_random_child(139371373) is None
def test_get_random_child_output():
"""Check that MotifFinder.get_random_child outputs a child of the node its been given."""
initial_node = mf.sample_initial_node()
child = mf.get_random_child(initial_node)
assert child in G.successors(initial_node)
def test_get_sample_motif_bad_input():
"""Check that MotifFinder.get_sample_motif raises an error when not given an integer for the k param."""
try:
mf.get_sample_motif('5')
except TypeError:
return True
raise TypeError
def test_get_sample_motif_output_type():
"""Check that MotifFinder.get_sample_motif outputs a networkx directed graph."""
assert type(mf.get_sample_motif(5)) == nx.classes.digraph.DiGraph
def test_get_sample_motif_output():
"""Check that MotifFinder.get_sample_motif outputs a networkx directed graph that is a subgraph of G."""
subgraph = mf.get_sample_motif(5)
for node in subgraph:
if node in G:
continue
else:
raise ValueError('Subgraph doesnt contain same nodes as graph')
def test_get_motif_samples_bad_input():
"""Check that MotifFinder.get_motif_samples raises an error when not given an integer for the k and num_samples
param."""
try:
mf.get_motif_samples('5', '5')
except TypeError:
return True
raise TypeError
def test_get_motif_samples_output_type():
"""Check that MotifFinder.get_sample_motif outputs a dictionary."""
assert type(mf.get_motif_samples(5,5)) == dict
def test_get_motifs_by_cluster_output_type():
assert type(get_motifs_by_cluster(clusters)) == dict
# def test_get_motifs
| 28.596154
| 115
| 0.751849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,104
| 0.371217
|
83245e358084afd5d7f959c3a7aebfc9ab55bb73
| 1,107
|
py
|
Python
|
torrent.py
|
fishy/scripts
|
91abd0451cae916d885f4ff0fd2f69d335d37cf3
|
[
"BSD-3-Clause"
] | 4
|
2016-05-09T13:42:23.000Z
|
2021-11-29T15:16:11.000Z
|
torrent.py
|
fishy/scripts
|
91abd0451cae916d885f4ff0fd2f69d335d37cf3
|
[
"BSD-3-Clause"
] | null | null | null |
torrent.py
|
fishy/scripts
|
91abd0451cae916d885f4ff0fd2f69d335d37cf3
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import sys
import os
from types import StringType
# get bencode package from http://github.com/fishy/scripts/downloads
from bencode.bencode import bencode, bdecode, BTFailure
try :
torrent = sys.argv[1]
except IndexError :
print "Usage: \"%s <torrent_file> [tracker_url]\" to show torrent info (without tracker_url), or to add tracker(s)" % sys.argv[0]
sys.exit()
size = os.stat(torrent).st_size
file = open(torrent, "rb")
data = file.read(size)
file.close()
info = bdecode(data)
if len(sys.argv) == 2 :
print info
sys.exit()
if 'announce-list' not in info :
list = [info['announce']]
for i in range(len(sys.argv)-2) :
tracker = sys.argv[i+2]
if tracker not in list :
list.append(tracker)
print list
info['announce-list'] = [list]
else :
list = info['announce-list'][0]
if type(list) == StringType :
list = [list]
for i in range(len(sys.argv)-2) :
tracker = sys.argv[i+2]
if tracker not in list :
list.append(tracker)
print list
info['announce-list'][0] = list
writedata = bencode(info)
file = open(torrent, "wb")
file.write(writedata)
file.close()
| 23.0625
| 130
| 0.68925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 276
| 0.249322
|
8324b2ef51cf900faa05fab3ea2e0b781034e744
| 4,786
|
py
|
Python
|
test/test_mdsspath.py
|
jpevans/mdssdiff
|
88573bdc89b00b023ce59c9b0fa19c6e6be760ce
|
[
"Apache-2.0"
] | 1
|
2019-11-05T00:34:20.000Z
|
2019-11-05T00:34:20.000Z
|
test/test_mdsspath.py
|
jpevans/mdssdiff
|
88573bdc89b00b023ce59c9b0fa19c6e6be760ce
|
[
"Apache-2.0"
] | 13
|
2017-03-08T03:37:43.000Z
|
2020-06-19T01:03:04.000Z
|
test/test_mdsspath.py
|
jpevans/mdssdiff
|
88573bdc89b00b023ce59c9b0fa19c6e6be760ce
|
[
"Apache-2.0"
] | 2
|
2020-09-14T12:04:43.000Z
|
2020-11-29T22:16:13.000Z
|
#!/usr/bin/env python
"""
Copyright 2015 ARC Centre of Excellence for Climate Systems Science
author: Aidan Heerdegen <aidan.heerdegen@anu.edu.au>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import pytest
import sys
import os
import shutil
import shlex
import subprocess
import pdb #; pdb.set_trace()
from mdssdiff import mdsspath
from mdssdiff import mdssdiff
dirs = ["1","2","3"]
dirtree = os.path.join(*dirs)
paths = [ ["1","lala"], ["1","po"], ["1","2","Mickey"], ["1","2","Minny"], ["1","2","Pluto"], ["1","2","3","Ren"], ["1","2","3","Stimpy"] ]
remote = "remote"
dirtreeroot = dirs[0]
verbose=False
prefix='test_mdss'
dumbname = 'nowayisthereadirectorycalledthis'
# Test if we have a working mdss to connect to
try:
if 'DEBUGLOCAL' in os.environ:
raise ValueError('A very specific bad thing happened')
project=os.environ['PROJECT']
mdsspath.mdss_ls(".",project)
except:
# Monkey-patch to use local file commands if we don't
print("\n\n!!! No mdss: Monkey patching to use local commands !!!\n")
mdsspath._mdss_ls_cmd = 'ls -l --time-style=+"%Y-%m-%d %H:%M ___ "'
mdsspath._mdss_put_cmd = 'cp'
mdsspath._mdss_get_cmd = 'cp'
mdsspath._mdss_mkdir_cmd = 'mkdir'
mdsspath._mdss_rm_cmd = 'rm'
mdsspath._mdss_rmdir_cmd = 'rmdir'
project=''
def touch(fname, times=None):
# http://stackoverflow.com/a/1160227/4727812
with open(fname, 'a'):
os.utime(fname, times)
def mtime(fname):
return os.stat(fname).st_mtime
def runcmd(cmd):
subprocess.check_call(shlex.split(cmd),stderr=subprocess.STDOUT)
def setup_module(module):
if verbose: print ("setup_module module:%s" % module.__name__)
try:
shutil.rmtree(dirtreeroot)
except:
pass
os.makedirs(dirtree)
for p in paths:
touch(os.path.join(*p))
# Write 3 bytes into a local file
file = os.path.join(*paths[2])
fh = open(file,"wb")
fh.write(b"\x5F\x9D\x3E")
fh.close()
# shutil.copytree(dirtreeroot, os.path.join(remote,dirtreeroot))
# Make our top level directory
runcmd(" ".join([mdsspath._mdss_mkdir_cmd.format(project),prefix]))
# Copy files into it
runcmd(" ".join([mdsspath._mdss_put_cmd.format(project),'-r',dirs[0],prefix]))
def teardown_module(module):
if verbose: print ("teardown_module module:%s" % module.__name__)
shutil.rmtree(dirtreeroot)
runcmd(" ".join([mdsspath._mdss_rm_cmd.format(project),'-r',prefix]))
runcmd(" ".join([mdsspath._mdss_rmdir_cmd.format(project),dumbname]))
def test_integrity():
assert(os.path.isdir(dirs[0]))
assert(not mdsspath.isdir(dumbname,project))
mdsspath.mdss_mkdir(dumbname,project)
assert(mdsspath.isdir(dumbname,project))
assert(mdsspath.mdss_listdir(os.path.join(prefix,dirs[0]),project)[0:2] == (['2'], ['lala', 'po']))
assert(mdsspath.getsize(os.path.join(prefix,*paths[2]),project) == 3)
def test_localmtime():
"""
Test localmtime returns datetime object without seconds resolution
"""
dt = mdsspath.localmtime(os.path.join(*paths[2]))
assert(dt.second == 0)
def test_get():
# Testing slightly out of order, but it is very useful to use it here so I will
listinglocal = mdssdiff.getlisting(dirs[0],recursive=True)
for file in listinglocal:
# print(file)
assert(os.path.isfile(file))
# This will (indirectly) test mdsspath.walk
listingremote = mdssdiff.getlisting(os.path.join(prefix,dirs[0]),project,recursive=True)
# pdb.set_trace()
for file in listingremote:
# print(file)
assert(mdsspath.isfile(file,project))
assert(os.path.relpath(file,prefix) in listinglocal)
# check the modification times are the same (within a minute resolution)
assert(mdsspath.getmtime(file,project) == mdsspath.localmtime(os.path.relpath(file,prefix)))
missingfile = listinglocal.pop()
os.remove(missingfile)
mdsspath.remote_get(prefix, missingfile, project)
assert(os.path.isfile(missingfile))
def test_put():
newfile = os.path.join(dirtree,'newfile')
touch(newfile)
mdsspath.remote_put(prefix, newfile, project)
mdsspath.remote_get(prefix, newfile, project)
assert(os.path.isfile(newfile))
| 31.906667
| 139
| 0.688884
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,766
| 0.368993
|
8324f3cc8eee905419a3c23f1df365cd7b4e9b24
| 30
|
py
|
Python
|
symbench_athens_client/tests/models/test_pipelines.py
|
valtron/symbench-athens-client
|
11482f5d385217898cfc5cb6ff9d76b19a3f7356
|
[
"Apache-2.0"
] | null | null | null |
symbench_athens_client/tests/models/test_pipelines.py
|
valtron/symbench-athens-client
|
11482f5d385217898cfc5cb6ff9d76b19a3f7356
|
[
"Apache-2.0"
] | 43
|
2021-08-19T20:16:43.000Z
|
2022-03-30T18:54:42.000Z
|
symbench_athens_client/tests/models/test_pipelines.py
|
valtron/symbench-athens-client
|
11482f5d385217898cfc5cb6ff9d76b19a3f7356
|
[
"Apache-2.0"
] | 2
|
2021-11-09T06:07:06.000Z
|
2022-01-13T17:04:29.000Z
|
class TestPipelines:
pass
| 10
| 20
| 0.733333
| 29
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
8325f8d80722ee18d5ca87486dae7d369fe6e6ee
| 1,192
|
py
|
Python
|
applications/trilinos_application/python_scripts/PressureMultiLevelSolver.py
|
AndreaVoltan/MyKratos7.0
|
e977752722e8ef1b606f25618c4bf8fd04c434cc
|
[
"BSD-4-Clause"
] | 2
|
2020-04-30T19:13:08.000Z
|
2021-04-14T19:40:47.000Z
|
applications/TrilinosApplication/python_scripts/PressureMultiLevelSolver.py
|
Jacklwln/Kratos
|
12ffe332622d7e8ea3e4a10bc061beb9d8e6e8de
|
[
"BSD-4-Clause"
] | 1
|
2020-04-30T19:19:09.000Z
|
2020-05-02T14:22:36.000Z
|
applications/TrilinosApplication/python_scripts/PressureMultiLevelSolver.py
|
Jacklwln/Kratos
|
12ffe332622d7e8ea3e4a10bc061beb9d8e6e8de
|
[
"BSD-4-Clause"
] | 1
|
2020-06-12T08:51:24.000Z
|
2020-06-12T08:51:24.000Z
|
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
from KratosMultiphysics import *
from KratosMultiphysics.TrilinosApplication import *
def MultilevelLinearSolver(tolerance, max_iterations):
# settings for the iterative solver
aztec_parameters = ParameterList()
aztec_parameters.set("AZ_solver", "AZ_bicgstab")
aztec_parameters.set("AZ_output", "AZ_none")
#aztec_parameters.set("AZ_output", 10)
# settings of the ML solver
MLList = ParameterList()
default_settings = EpetraDefaultSetter()
default_settings.SetDefaults(MLList, "SA")
MLList.set("ML output", 10)
MLList.set("max levels", 3)
MLList.set("increasing or decreasing", "increasing")
MLList.set("aggregation: type", "MIS")
# MLList.set("coarse: type","Amesos-Superludist");
MLList.set("smoother: type", "Chebyshev")
MLList.set("smoother: sweeps", 3);
MLList.set("smoother: pre or post", "both");
MLList.set("ML output", 0);
# tolerance = 1e-4
nit_max = 1000
linear_solver = MultiLevelSolver(aztec_parameters, MLList, tolerance, nit_max);
return linear_solver
| 35.058824
| 134
| 0.723993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 455
| 0.381711
|
83263868a21483660a3b2d0dc61af080e81df193
| 3,960
|
py
|
Python
|
Hood/views.py
|
Gakur/NeiApp
|
2a9955a23877de10ed3436fd25d56208bca22887
|
[
"MIT"
] | null | null | null |
Hood/views.py
|
Gakur/NeiApp
|
2a9955a23877de10ed3436fd25d56208bca22887
|
[
"MIT"
] | null | null | null |
Hood/views.py
|
Gakur/NeiApp
|
2a9955a23877de10ed3436fd25d56208bca22887
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse ,HttpResponseRedirect, Http404
from django.urls import reverse
from django.contrib.auth.forms import UserCreationForm
from .models import *
from .forms import UserRegisterForm
from django.contrib import messages
from django.contrib.auth import authenticate, login , logout
from django.contrib.auth import login as auth_login
from django.contrib.auth.decorators import login_required
from . decorators import unauthenticated_user
from .forms import *
# Create your views here.
@login_required(login_url='/accounts/login/')
def EditProfile(request,username):
user = get_object_or_404(User, username=username)
profile = Profile.objects.get(user = user)
form = EditProfileForm(instance=profile)
if request.method == "POST":
form = EditProfileForm(request.POST, request.FILES, instance=profile)
if form.is_valid():
data = form.save(commit=False)
data.user = user
data.hood = profile.hood
data.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
else:
form = EditProfileForm(instance=profile)
legend = 'Edit Profile'
return render(request, 'profile.html', {'legend':legend, 'form':EditProfileForm})
@login_required(login_url='/accounts/login/')
def create_profile(request):
title = "NHood"
current_user = request.user
title = "Create Profile"
if request.method == 'POST':
form = CreateProfileForm(request.POST, request.FILES)
if form.is_valid():
profile = form.save(commit=False)
profile.user = current_user
profile.save()
return HttpResponseRedirect('/')
else:
form = CreateProfileForm()
return render(request, 'create_profile.html', {"form": CreateProfileForm, "title": title})
# ============ Home Page
@login_required(login_url='/accounts/login/')
def index(request):
return render(request, 'index.html')
# ============ View for list of neighbour hoods to display
@login_required(login_url='/accounts/login/')
def location(request):
neighbourhoods = Neighbourhood.objects.all()
return render(request, 'location.html', {'neighbourhoods':neighbourhoods} )
# =========== For Each neighbour hood
@login_required(login_url='/accounts/login/')
def estate(request, id):
neighbourhoods = Neighbourhood.objects.get(id =id)
location = Neighbourhood.objects.get(id =id)
context = {'location': location, 'neighbourhoods':neighbourhoods}
return render(request, 'area.html', context)
## ===Add new Business
@login_required(login_url='/accounts/login/')
def add_biz(request):
user = User.objects.filter(id = request.user.id).first()
profile = UserProfile.objects.filter(user = user).first()
if request.method == 'POST':
business_form = AddBusinessForm(request.POST)
if business_form.is_valid():
business = Business(name = request.POST['name'],owner = user,business_neighborhood = profile.neighborhood,email=request.POST['email'])
business.save()
return redirect('area.html')
else:
business_form = AddBusinessForm()
return render(request,'business/business.html',{'business_form':business_form})
def search(request):
try:
if 'business' in request.GET and request.GET['business']:
search_term = request.GET.get('business')
searched_business = Business.objects.get(name__icontains=search_term)
return render(request,'search.html',{'searched_business':searched_business})
except (ValueError,Business.DoesNotExist):
message = "Oops! We couldn't find the business you're looking for."
return render(request,'search.html',{'message':message})
return render(request,'search.html',{{"message":message}},{"searched_business":searched_business})
| 38.076923
| 146
| 0.693687
| 0
| 0
| 0
| 0
| 2,585
| 0.652778
| 0
| 0
| 735
| 0.185606
|
832672b5a45d6ed1bcae4c5d5f38bb3800726d8c
| 3,325
|
py
|
Python
|
ckanext-hdx_package/ckanext/hdx_package/tests/test_metadata_fields.py
|
OCHA-DAP/hdx-ckan
|
202e0c44adc4ea8d0b90141e69365b65cce68672
|
[
"Apache-2.0"
] | 58
|
2015-01-11T09:05:15.000Z
|
2022-03-17T23:44:07.000Z
|
ckanext-hdx_package/ckanext/hdx_package/tests/test_metadata_fields.py
|
OCHA-DAP/hdx-ckan
|
202e0c44adc4ea8d0b90141e69365b65cce68672
|
[
"Apache-2.0"
] | 1,467
|
2015-01-01T16:47:44.000Z
|
2022-02-28T16:51:20.000Z
|
ckanext-hdx_package/ckanext/hdx_package/tests/test_metadata_fields.py
|
OCHA-DAP/hdx-ckan
|
202e0c44adc4ea8d0b90141e69365b65cce68672
|
[
"Apache-2.0"
] | 17
|
2015-05-06T14:04:21.000Z
|
2021-11-11T19:58:16.000Z
|
'''
Created on May 16, 2014
@author: alexandru-m-g
'''
import json
import webtest
import logging
import ckan.plugins as p
import ckan.lib.create_test_data as ctd
import ckan.lib.search as search
import ckan.model as model
import ckan.logic as logic
import ckan.lib.helpers as h
import ckan.tests.legacy as legacy_tests
from ckan.config.middleware import make_app
from pylons import config
import ckanext.hdx_theme.tests.hdx_test_base as hdx_test_base
import ckanext.hdx_package.helpers.caching as caching
log = logging.getLogger(__name__)
class TestMetadataFields(hdx_test_base.HdxBaseTest):
def test_cannot_create_dataset_wo_source(self):
try:
p.load('hdx_package')
except Exception as e:
log.warn('Module already loaded')
log.info(str(e))
testsysadmin = model.User.by_name('testsysadmin')
result = legacy_tests.call_action_api(self.app, 'package_create', name='test-dataset',
private=False, package_creator='test-creator',
apikey=testsysadmin.apikey, status=409)
# result = tk.get_action('package_create')({'user':'testsysadmin'},{'name': 'test-dataset', 'private':False})
assert 'dataset_source' in result, 'The error needs to be related to the source'
assert 'Missing value' in result['dataset_source'], 'The problem needs to be that the source info is missing'
# def test_private_is_private(self):
# try:
# p.load('hdx_package')
# except Exception as e:
# log.warn('Module already loaded')
# log.info(str(e))
# tester = model.User.by_name('tester')
# tests.call_action_api(self.app, 'organization_create',
# name='test_org_2',
# apikey=tester.apikey)
# tests.call_action_api(self.app, 'package_create', name='test-dataset-private',
# private=True, owner_org='test_org_2',package_creator='test-creator', dataset_source='test',
# resources=[{'url':'text_upload_file.txt'}], apikey=tester.apikey, status=409)
# ds = tests.call_action_api(self.app, 'package_show', id='test-dataset-private', apikey=tester.apikey, status=409)
# r = requests.get(ds['resources'][0]['url'])
# assert r.text == 'Hello World'
def test_tags_autocomplete(self):
data_dict = {
'name': 'Topics',
'tags': [
{
'name': 'health'
}
]
}
logic.get_action('vocabulary_create')({'ignore_auth': True}, data_dict)
offset = '/api/2/util/tag/autocomplete?incomplete=a'
res = self.app.get(offset, )
assert res.status_code in [200,302]
r = json.loads(res.body)
assert len(r['ResultSet']['Result']) > 0
def _related_create(self, title, description, type, url, image_url):
usr = logic.get_action('get_site_user')({'model':model,'ignore_auth': True},{})
context = dict(model=model, user=usr['name'], session=model.Session)
data_dict = dict(title=title,description=description,
url=url,image_url=image_url,type=type)
return logic.get_action("related_create")( context, data_dict )
| 34.635417
| 123
| 0.62797
| 2,775
| 0.834586
| 0
| 0
| 0
| 0
| 0
| 0
| 1,478
| 0.444511
|
832770b6da611f24d004cf5564b612a2e18401f6
| 524
|
py
|
Python
|
inject.py
|
edouardpoitras/process_injection_example
|
0b22488a83a5516788411e4974090d1df2bd6494
|
[
"MIT"
] | 4
|
2021-05-01T06:56:14.000Z
|
2022-01-24T10:00:31.000Z
|
inject.py
|
edouardpoitras/process_injection_example
|
0b22488a83a5516788411e4974090d1df2bd6494
|
[
"MIT"
] | null | null | null |
inject.py
|
edouardpoitras/process_injection_example
|
0b22488a83a5516788411e4974090d1df2bd6494
|
[
"MIT"
] | 1
|
2021-04-30T16:52:11.000Z
|
2021-04-30T16:52:11.000Z
|
import sys
import psutil
from pyinjector import inject
if len(sys.argv) != 3:
print("Usage: python inject.py <process-name> <shared-library>")
exit()
_, process_name, shared_library = sys.argv
for process in psutil.process_iter():
if process.name() == process_name:
print(f"Found {process_name} - injecting {shared_library} into PID {process.pid}")
inject(process.pid, shared_library)
print("Injected successfully")
exit()
print(f"Unable to find process named {process_name}")
| 29.111111
| 90
| 0.696565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 201
| 0.383588
|
8327de2cbfa7508d6d7ec9cb75195ac2a23e5a16
| 3,235
|
py
|
Python
|
infer.py
|
yanivbl6/deep-griffinlim-iteration
|
b96165c0c11e00bff1e033f93aeca6fafe9833d3
|
[
"MIT"
] | null | null | null |
infer.py
|
yanivbl6/deep-griffinlim-iteration
|
b96165c0c11e00bff1e033f93aeca6fafe9833d3
|
[
"MIT"
] | null | null | null |
infer.py
|
yanivbl6/deep-griffinlim-iteration
|
b96165c0c11e00bff1e033f93aeca6fafe9833d3
|
[
"MIT"
] | 1
|
2020-10-12T15:31:27.000Z
|
2020-10-12T15:31:27.000Z
|
# noinspection PyUnresolvedReferences
##import matlab.engine
import os
import shutil
from argparse import ArgumentError, ArgumentParser
from torch.utils.data import DataLoader
from dataset import ComplexSpecDataset
from hparams1 import hp
from train import Trainer
from pathlib import Path
from os import listdir
parser = ArgumentParser()
parser.add_argument('-l','--list',action='store_true')
parser.add_argument('-n','--network',type=str)
parser.add_argument('-m','--mel2spec',type=str)
parser.add_argument('-d','--device',type=int, default=0)
parser.add_argument('--dest',type=str, default="../result/inference")
parser.add_argument('--network_results',type=str, default="../result/ngc_degli")
parser.add_argument('--mel2spec_results',type=str, default="../result/mel2spec")
parser.add_argument('-p','--perf', action='store_true')
parser.add_argument('-b','--batch_size', type=int, default=16)
args = parser.parse_args()
##import pdb; pdb.set_trace()
if args.list:
print('-'*30)
print("Available Networks:")
for f in listdir(args.network_results):
full_path = "%s/%s" % (args.network_results,f)
if not os.path.isdir(full_path):
continue
checkpoints = []
full_path_train = "%s/train" % full_path
if not os.path.exists(full_path_train):
continue
for e in listdir(full_path_train):
if e.__str__()[-2:] == "pt":
checkpoints.append(int(e.split('.')[0]))
if len(checkpoints) > 0:
checkpoints.sort()
print("%s : %s" % (f,checkpoints.__str__()))
print('-'*30)
print("Available Mel2Spec infered data:")
for f in listdir(args.mel2spec_results):
full_path = "%s/%s" % (args.mel2spec_results,f)
if not os.path.isdir(full_path):
continue
checkpoints = []
for e in listdir(full_path):
if e.split('_')[0] == "infer":
checkpoints.append(int(e.split('_')[1]))
if len(checkpoints) > 0:
checkpoints.sort()
print("%s : %s" % (f,checkpoints.__str__()))
print('-'*30)
if not args.network is None:
net_split = args.network.split(":")
networkDir = net_split[0]
networkEpoch = net_split[1]
if args.perf:
sub = "perf"
else:
sub = "quality"
if not args.mel2spec is None:
mel_split = args.mel2spec.split(":")
mel2specDir = mel_split[0]
mel2specEpoch = mel_split[1]
mel_dest = f"{args.mel2spec_results}/{mel2specDir}/infer_{mel2specEpoch}"
full_dest= f"{args.dest}/{sub}/{networkDir}_E{networkEpoch}_mel2spec_{mel2specDir}_E{mel2specEpoch}"
else:
mel_dest = f"~/deep-griffinlim-iteration/mel2spec/baseline_data"
full_dest= f"{args.dest}/{sub}/{networkDir}_E{networkEpoch}_baseline"
os.makedirs(args.dest, exist_ok=True)
command = "test"
if args.perf:
full_dest = full_dest + "_B%d" % args.batch_size
command = "perf"
cmd=f"python main.py --{command} --device {args.device} --from {networkEpoch} --logdir {args.network_results}/{networkDir} --path_feature {mel_dest} --dest_test {full_dest} --batch_size {args.batch_size}"
print(cmd)
| 32.35
| 208
| 0.641731
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 935
| 0.289026
|
8329042f7336cfa333d46696e6595794b06050cc
| 11,603
|
py
|
Python
|
Disc_train.py
|
avinsit123/kpgen_GAN
|
e5ca04b9c6e43f8049dcf8e5b8fa44ab4e4702c3
|
[
"MIT"
] | 1
|
2020-05-28T23:18:51.000Z
|
2020-05-28T23:18:51.000Z
|
Disc_train.py
|
avinsit123/kpgen_GAN
|
e5ca04b9c6e43f8049dcf8e5b8fa44ab4e4702c3
|
[
"MIT"
] | null | null | null |
Disc_train.py
|
avinsit123/kpgen_GAN
|
e5ca04b9c6e43f8049dcf8e5b8fa44ab4e4702c3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 15:10:45 2019
@author: r17935avinash
"""
################################ IMPORT LIBRARIES ###############################################################
import torch
import numpy as np
import pykp.io
import torch.nn as nn
from utils.statistics import RewardStatistics
from utils.time_log import time_since
import time
from sequence_generator import SequenceGenerator
from utils.report import export_train_and_valid_loss, export_train_and_valid_reward
import sys
import logging
import os
from evaluate import evaluate_reward
from pykp.reward import *
import math
EPS = 1e-8
import argparse
import config
import logging
import os
import json
from pykp.io import KeyphraseDataset
from pykp.model import Seq2SeqModel
from torch.optim import Adam
import pykp
from pykp.model import Seq2SeqModel
import train_ml
import train_rl
from utils.time_log import time_since
from utils.data_loader import load_data_and_vocab
from utils.string_helper import convert_list_to_kphs
import time
import numpy as np
import random
from torch import device
from hierarchal_attention_Discriminator import Discriminator
from torch.nn import functional as F
#####################################################################################################
#def Check_Valid_Loss(valid_data_loader,D_model,batch,generator,opt,perturb_std):
##### TUNE HYPERPARAMETERS ##############
## batch_reward_stat, log_selected_token_dist = train_one_batch(batch, generator, optimizer_rl, opt, perturb_std)
#########################################################
def train_one_batch(D_model,one2many_batch, generator, opt,perturb_std):
src, src_lens, src_mask, src_oov, oov_lists, src_str_list, trg_str_2dlist, trg, trg_oov, trg_lens, trg_mask, _, title, title_oov, title_lens, title_mask = one2many_batch
one2many = opt.one2many
one2many_mode = opt.one2many_mode
if one2many and one2many_mode > 1:
num_predictions = opt.num_predictions
else:
num_predictions = 1
if torch.cuda.is_available():
src = src.to(opt.device)
src_mask = src_mask.to(opt.device)
src_oov = src_oov.to(opt.device)
if opt.title_guided:
title = title.to(opt.device)
title_mask = title_mask.to(opt.device)
eos_idx = opt.word2idx[pykp.io.EOS_WORD]
delimiter_word = opt.delimiter_word
batch_size = src.size(0)
topk = opt.topk
reward_type = opt.reward_type
reward_shaping = opt.reward_shaping
baseline = opt.baseline
match_type = opt.match_type
regularization_type = opt.regularization_type ## DNT
regularization_factor = opt.regularization_factor ##DNT
devices = opt.device
if regularization_type == 2:
entropy_regularize = True
else:
entropy_regularize = False
start_time = time.time()
sample_list, log_selected_token_dist, output_mask, pred_eos_idx_mask, entropy, location_of_eos_for_each_batch, location_of_peos_for_each_batch = generator.sample(
src, src_lens, src_oov, src_mask, oov_lists, opt.max_length, greedy=False, one2many=one2many,
one2many_mode=one2many_mode, num_predictions=num_predictions, perturb_std=perturb_std, entropy_regularize=entropy_regularize, title=title, title_lens=title_lens, title_mask=title_mask)
pred_str_2dlist = sample_list_to_str_2dlist(sample_list, oov_lists, opt.idx2word, opt.vocab_size, eos_idx, delimiter_word, opt.word2idx[pykp.io.UNK_WORD], opt.replace_unk,
src_str_list, opt.separate_present_absent, pykp.io.PEOS_WORD)
target_str_2dlist = convert_list_to_kphs(trg)
"""
src = [batch_size,abstract_seq_len]
target_str_2dlist = list of list of true keyphrases
pred_str_2dlist = list of list of false keyphrases
"""
total_abstract_loss = 0
batch_mine = 0
abstract_t = torch.Tensor([]).to(devices)
abstract_f = torch.Tensor([]).to(devices)
kph_t = torch.Tensor([]).to(devices)
kph_f = torch.Tensor([]).to(devices)
h_kph_t_size = 0
h_kph_f_size = 0
len_list_t,len_list_f = [],[]
for idx, (src_list, pred_str_list,target_str_list) in enumerate(zip(src, pred_str_2dlist,target_str_2dlist)):
batch_mine+=1
if (len(target_str_list)==0 or len(pred_str_list)==0):
continue
h_abstract_t,h_kph_t = D_model.get_hidden_states(src_list,target_str_list)
h_abstract_f,h_kph_f = D_model.get_hidden_states(src_list,pred_str_list)
len_list_t.append(h_kph_t.size(1))
len_list_f.append(h_kph_f.size(1))
h_kph_t_size = max(h_kph_t_size,h_kph_t.size(1))
h_kph_f_size = max(h_kph_f_size,h_kph_f.size(1))
for idx, (src_list, pred_str_list,target_str_list) in enumerate(zip(src, pred_str_2dlist,target_str_2dlist)):
batch_mine+=1
if (len(target_str_list)==0 or len(pred_str_list)==0):
continue
h_abstract_t,h_kph_t = D_model.get_hidden_states(src_list,target_str_list)
h_abstract_f,h_kph_f = D_model.get_hidden_states(src_list,pred_str_list)
p1d = (0,0,0,h_kph_t_size - h_kph_t.size(1))
p2d = (0,0,0,h_kph_f_size - h_kph_f.size(1))
h_kph_t = F.pad(h_kph_t,p1d)
h_kph_f = F.pad(h_kph_f,p2d)
abstract_t = torch.cat((abstract_t,h_abstract_t),dim=0)
abstract_f = torch.cat((abstract_f,h_abstract_f),dim=0)
kph_t = torch.cat((kph_t,h_kph_t),dim=0)
kph_f = torch.cat((kph_f,h_kph_f),dim=0)
_,real_rewards,abstract_loss_real = D_model.calculate_context(abstract_t,kph_t,1,len_list_t)
_,fake_rewards,abstract_loss_fake = D_model.calculate_context(abstract_f,kph_f,0,len_list_f)
avg_batch_loss = ( abstract_loss_real + abstract_loss_fake )
avg_real = real_rewards
avg_fake = fake_rewards
return avg_batch_loss,avg_real,avg_fake
def main(opt):
clip = 5
start_time = time.time()
train_data_loader, valid_data_loader, word2idx, idx2word, vocab = load_data_and_vocab(opt, load_train=True)
load_data_time = time_since(start_time)
logging.info('Time for loading the data: %.1f' % load_data_time)
print("Data Successfully Loaded __.__.__.__.__.__.__.__.__.__.__.__.__.__.")
model = Seq2SeqModel(opt)
## if torch.cuda.is_available():
if torch.cuda.is_available():
model.load_state_dict(torch.load(opt.model_path))
model = model.to(opt.gpuid)
else:
model.load_state_dict(torch.load(opt.model_path,map_location="cpu"))
print("___________________ Generator Initialised and Loaded _________________________")
generator = SequenceGenerator(model,
bos_idx=opt.word2idx[pykp.io.BOS_WORD],
eos_idx=opt.word2idx[pykp.io.EOS_WORD],
pad_idx=opt.word2idx[pykp.io.PAD_WORD],
peos_idx=opt.word2idx[pykp.io.PEOS_WORD],
beam_size=1,
max_sequence_length=opt.max_length,
copy_attn=opt.copy_attention,
coverage_attn=opt.coverage_attn,
review_attn=opt.review_attn,
cuda=opt.gpuid > -1
)
init_perturb_std = opt.init_perturb_std
final_perturb_std = opt.final_perturb_std
perturb_decay_factor = opt.perturb_decay_factor
perturb_decay_mode = opt.perturb_decay_mode
hidden_dim = opt.D_hidden_dim
embedding_dim = opt.D_embedding_dim
n_layers = opt.D_layers
D_model = Discriminator(opt.vocab_size,embedding_dim,hidden_dim,n_layers,opt.word2idx[pykp.io.PAD_WORD])
print("The Discriminator Description is ",D_model)
if opt.pretrained_Discriminator :
if torch.cuda.is_available() :
D_model.load_state_dict(torch.load(opt.Discriminator_model_path))
D_model = D_model.to(opt.gpuid)
else:
D_model.load_state_dict(torch.load(opt.Discriminator_model_path,map_location="cpu"))
else :
if torch.cuda.is_available() :
D_model = D_model.to(opt.gpuid)
else:
D_model.load_state_dict(torch.load(opt.Discriminator_model_path,map_location="cpu"))
D_optimizer = torch.optim.Adam(D_model.parameters(),opt.learning_rate)
print("Beginning with training Discriminator")
print("########################################################################################################")
total_epochs = 5
for epoch in range(total_epochs):
total_batch = 0
print("Starting with epoch:",epoch)
for batch_i, batch in enumerate(train_data_loader):
best_valid_loss = 1000
D_model.train()
D_optimizer.zero_grad()
if perturb_decay_mode == 0: # do not decay
perturb_std = init_perturb_std
elif perturb_decay_mode == 1: # exponential decay
perturb_std = final_perturb_std + (init_perturb_std - final_perturb_std) * math.exp(-1. * total_batch * perturb_decay_factor)
elif perturb_decay_mode == 2: # steps decay
perturb_std = init_perturb_std * math.pow(perturb_decay_factor, math.floor((1+total_batch)/4000))
avg_batch_loss , _ , _ = train_one_batch( D_model , batch , generator , opt , perturb_std )
torch.nn.utils.clip_grad_norm_( D_model.parameters() , clip)
avg_batch_loss.backward()
D_optimizer.step()
D_model.eval()
if batch_i % 4000 == 0:
total = 0
valid_loss_total , valid_real_total , valid_fake_total = 0 , 0 , 0
for batch_j , valid_batch in enumerate(valid_data_loader):
total += 1
valid_loss , valid_real , valid_fake = train_one_batch( D_model , valid_batch , generator , opt ,perturb_std )
valid_loss_total += valid_loss.cpu().detach().numpy()
valid_real_total += valid_real.cpu().detach().numpy()
valid_fake_total += valid_fake.cpu().detach().numpy()
D_optimizer.zero_grad()
print("Currently loss is " , valid_loss_total.item() / total )
print("Currently real loss is " , valid_real_total.item() / total )
print("Currently fake loss is " , valid_fake_total.item() / total )
if best_valid_loss > valid_loss_total.item() / total :
print("Loss Decreases so saving the file ...............----------->>>>>")
state_dfs = D_model.state_dict()
torch.save(state_dfs,"Discriminator_checkpts/Attention_Disriminator_" + str(epoch) + ".pth.tar")
best_valid_loss = valid_loss_total.item() / total
######################################
| 46.78629
| 594
| 0.613807
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,018
| 0.173921
|
832ab6e2559ee453e6521a8fd912db337cc8fa7d
| 4,568
|
py
|
Python
|
VQ3D/camera_pose_estimation/get_median_intrinsics.py
|
emulhall/episodic-memory
|
27bafec6e09c108f0efe5ac899eabde9d1ac40cc
|
[
"MIT"
] | 27
|
2021-10-16T02:39:17.000Z
|
2022-03-31T11:16:11.000Z
|
VQ3D/camera_pose_estimation/get_median_intrinsics.py
|
emulhall/episodic-memory
|
27bafec6e09c108f0efe5ac899eabde9d1ac40cc
|
[
"MIT"
] | 5
|
2022-03-23T04:53:36.000Z
|
2022-03-29T23:39:07.000Z
|
VQ3D/camera_pose_estimation/get_median_intrinsics.py
|
emulhall/episodic-memory
|
27bafec6e09c108f0efe5ac899eabde9d1ac40cc
|
[
"MIT"
] | 13
|
2021-11-25T19:17:29.000Z
|
2022-03-25T14:01:47.000Z
|
import os
import sys
import json
import argparse
import numpy as np
sys.path.append('Camera_Intrinsics_API/')
from get_camera_intrinsics import CameraIntrinsicsHelper
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir",
type=str,
default='data/videos_sfm/',
help="COLMAP output folder of videos",
)
parser.add_argument(
"--input_dir_greedy",
type=str,
default='data/videos_sfm_greedy/',
help="Folder for the COLMAP outputs - greedy.",
)
parser.add_argument(
"--annotation_dir",
type=str,
default='data/v1/annotations/',
help="annotation folder. Must contain the vq3d_<split>.json files.",
)
parser.add_argument(
"--output_filename",
type=str,
default='data/v1/scan_to_intrinsics.json',
)
args = parser.parse_args()
dataset = {}
for split in ['train', 'val']:
a = json.load(open(os.path.join(args.annotation_dir,
f'vq3d_{split}.json'), 'r'))
for video in a['videos']:
video_uid=video['video_uid']
scan_uid=video['scan_uid']
dataset[video_uid]=scan_uid
helper = CameraIntrinsicsHelper()
datadir=args.input_dir
datadir_2=args.input_dir_greedy
cpt=0
all_intrinsics = {}
for video_uid in os.listdir(datadir):
scan_uid=dataset[video_uid]
intrinsic_txt = os.path.join(datadir,
video_uid,
'sparse',
'0',
'cameras.txt')
if not os.path.isfile(intrinsic_txt):
intrinsic_txt = os.path.join(datadir_2,
video_uid,
'sparse',
'0',
'cameras.txt')
if not os.path.isfile(intrinsic_txt):
cpt+=1
else:
intrinsics = helper.parse_colmap_intrinsics(intrinsic_txt)
if scan_uid not in all_intrinsics:
all_intrinsics[scan_uid]={}
token = (intrinsics['width'], intrinsics['height'])
if token not in all_intrinsics[scan_uid]:
all_intrinsics[scan_uid][token] = []
all_intrinsics[scan_uid][token].append(
(
intrinsics['f'],
intrinsics['cx'],
intrinsics['cy'],
intrinsics['k1'],
intrinsics['k2'],
)
)
else:
intrinsics = helper.parse_colmap_intrinsics(intrinsic_txt)
if scan_uid not in all_intrinsics:
all_intrinsics[scan_uid]={}
token = (intrinsics['width'], intrinsics['height'])
if token not in all_intrinsics[scan_uid]:
all_intrinsics[scan_uid][token] = []
all_intrinsics[scan_uid][token].append(
(
intrinsics['f'],
intrinsics['cx'],
intrinsics['cy'],
intrinsics['k1'],
intrinsics['k2'],
)
)
outputs = {}
for scan_uid, d in all_intrinsics.items():
print(' ')
print('Scan uid: ', scan_uid)
outputs[scan_uid]={}
for resolution, v in d.items():
print(' -- resolution: ', resolution)
resolution_str = str(resolution)
outputs[scan_uid][resolution_str]={
'f': np.median([float(i[0]) for i in v]),
'cx': np.median([float(i[1]) for i in v]),
'cy': np.median([float(i[2]) for i in v]),
'k1': np.median([float(i[3]) for i in v]),
'k2': np.median([float(i[4]) for i in v]),
}
for i in v:
print(' -- -- -- : ', i)
print(' ')
print(' -- -- -- : ',
outputs[scan_uid][resolution_str]['f'],
outputs[scan_uid][resolution_str]['cx'],
outputs[scan_uid][resolution_str]['cy'],
outputs[scan_uid][resolution_str]['k1'],
outputs[scan_uid][resolution_str]['k2'],
)
json.dump(outputs, open(output_filename, 'w'))
| 34.089552
| 76
| 0.479203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 628
| 0.137478
|
832b2f005e0af85ddb6e44118b2f277f3ecf6b06
| 571
|
py
|
Python
|
Dataset/Leetcode/valid/78/455.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/valid/78/455.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/valid/78/455.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
class Solution:
def __init__(self):
self.result = []
def XXX(self, nums):
return self.helper(nums, 0, [])
def helper(self, nums, index, temp):
if index == len(nums):
self.result.append(temp)
return
self.result.append(temp)
for i in range(index, len(nums)):
self.helper(nums, i + 1, temp + [nums[i]])
return self.result
undefined
for (i = 0; i < document.getElementsByTagName("code").length; i++) { console.log(document.getElementsByTagName("code")[i].innerText); }
| 27.190476
| 139
| 0.576182
| 416
| 0.728546
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.021016
|
832b736a0869d3dc222dea9d11955ffc80809ec5
| 1,322
|
py
|
Python
|
IDS/IDS/urls.py
|
YashwantChauhan/SDL
|
0d48dfa129d72316f35967df98ce2f1e6f949fc5
|
[
"MIT"
] | 2
|
2020-12-24T15:13:49.000Z
|
2021-06-05T15:43:58.000Z
|
IDS/IDS/urls.py
|
YashwantChauhan/SDL
|
0d48dfa129d72316f35967df98ce2f1e6f949fc5
|
[
"MIT"
] | 2
|
2021-12-28T14:06:20.000Z
|
2021-12-28T14:25:44.000Z
|
IDS/IDS/urls.py
|
YashwantChauhan/SDL
|
0d48dfa129d72316f35967df98ce2f1e6f949fc5
|
[
"MIT"
] | null | null | null |
"""IDS URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from Apps.home import views as home_views
from Apps.Signup import views as Signup_views
from Apps.Dashboard import urls as Dash_urls
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('' , home_views.home , name='home' ),
path('Signin/' , Signup_views.signin , name='Signin' ),
path('Signup/' , Signup_views.signup , name='Signup'),
path('Signout/', Signup_views.logout , name='logout'),
path('Dashboard/', include(Dash_urls.urlpatterns) ),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 38.882353
| 77
| 0.723147
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 706
| 0.534039
|
832ba0a49717dd57c782af2a65a1680399effe7f
| 1,574
|
py
|
Python
|
setup.py
|
preetmishra/nooz
|
e7ee6958bac7edcc85ab157b6dbe07071fde887c
|
[
"MIT"
] | 7
|
2020-03-18T06:30:55.000Z
|
2021-04-06T16:38:25.000Z
|
setup.py
|
preetmishra/nooz
|
e7ee6958bac7edcc85ab157b6dbe07071fde887c
|
[
"MIT"
] | 1
|
2020-06-29T16:12:45.000Z
|
2020-06-29T16:12:45.000Z
|
setup.py
|
preetmishra/nooz
|
e7ee6958bac7edcc85ab157b6dbe07071fde887c
|
[
"MIT"
] | 2
|
2021-03-21T02:52:39.000Z
|
2021-05-26T08:34:58.000Z
|
import codecs
import os
from setuptools import find_packages, setup
def long_description():
if not (os.path.isfile('README.md') and os.access('README.md', os.R_OK)):
return ''
with codecs.open('README.md', encoding='utf8') as f:
return f.read()
linting_deps = [
'mypy==0.761',
'pycodestyle==2.5.0',
]
setup(
name='nooz',
version='0.1.0',
description='Trending headlines right in your terminal.',
long_description=long_description(),
long_description_content_type='text/markdown',
url='https://github.com/preetmishra/nooz',
author='Preet Mishra',
author_email='ipreetmishra@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: End Users/Desktop',
'Topic :: Internet',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Operating System :: OS Independent',
],
python_requires='>=3.5, <=3.8',
keywords='news',
packages=find_packages(),
zip_safe=True,
entry_points={
'console_scripts': [
'nooz = nooz.run:main',
],
},
extras_require={
'linting': linting_deps,
},
install_requires=[
'mypy_extensions>=0.4',
'requests>=2.23.0',
'urwid==2.1.0',
'urllib3>=1.25.8'
],
)
| 24.984127
| 77
| 0.584498
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 726
| 0.461245
|
832d8379190a88d84a40dc951ecd801770c36c11
| 11,454
|
py
|
Python
|
deeplodocus/callbacks/saver.py
|
amelgrenier/deeplodocus
|
0a017faae098cddc436e82e83b85e66caf18b522
|
[
"MIT"
] | null | null | null |
deeplodocus/callbacks/saver.py
|
amelgrenier/deeplodocus
|
0a017faae098cddc436e82e83b85e66caf18b522
|
[
"MIT"
] | null | null | null |
deeplodocus/callbacks/saver.py
|
amelgrenier/deeplodocus
|
0a017faae098cddc436e82e83b85e66caf18b522
|
[
"MIT"
] | null | null | null |
from decimal import Decimal
import torch
from torch.nn import Module
import os
from deeplodocus.utils.notification import Notification
from deeplodocus.utils.flags.save import *
from deeplodocus.utils.flags.event import *
from deeplodocus.utils.flags.notif import *
from deeplodocus.utils.flags.ext import DEEP_EXT_PYTORCH, DEEP_EXT_ONNX
from deeplodocus.utils.flags.msg import DEEP_MSG_MODEL_SAVED, DEEP_MSG_SAVER_IMPROVED, DEEP_MSG_SAVER_NOT_IMPROVED
from deeplodocus.core.metrics.over_watch_metric import OverWatchMetric
from deeplodocus.brain.signal import Signal
from deeplodocus.brain.thalamus import Thalamus
from deeplodocus.utils.generic_utils import get_corresponding_flag
from deeplodocus.utils.flags.flag_lists import DEEP_LIST_SAVE_SIGNAL, DEEP_LIST_SAVE_FORMATS
class Saver(object):
"""
AUTHORS:
--------
:author: Alix Leroy
:author: Samuel Westlake
DESCRIPTION:
------------
Class to handle the saving of the model
"""
def __init__(
self,
name: str = "no_model_name",
save_directory: str = "weights",
save_signal: Flag = DEEP_EVENT_ON_EPOCH_END,
method: Flag = DEEP_SAVE_FORMAT_PYTORCH,
overwrite: bool = False
):
self.name = name
self.directory = save_directory
self.save_signal = get_corresponding_flag(DEEP_LIST_SAVE_SIGNAL, save_signal)
self.method = get_corresponding_flag(DEEP_LIST_SAVE_FORMATS, method) # Can be onnx or pt
self.best_overwatch_metric = None
self.training_loss = None
self.model = None
self.optimizer = None
self.epoch_index = -1
self.batch_index = -1
self.validation_loss = None
self.overwrite = overwrite
self.inp = None
# Set the extension
if DEEP_SAVE_FORMAT_PYTORCH.corresponds(self.method):
self.extension = DEEP_EXT_PYTORCH
elif DEEP_SAVE_FORMAT_ONNX.corresponds(self.method):
self.extension = DEEP_EXT_ONNX
if not os.path.isfile(self.directory):
os.makedirs(self.directory, exist_ok=True)
# Connect the save to the computation of the overwatched metric
Thalamus().connect(
receiver=self.on_overwatch_metric_computed,
event=DEEP_EVENT_OVERWATCH_METRIC_COMPUTED,
expected_arguments=["current_overwatch_metric"]
)
Thalamus().connect(
receiver=self.on_training_end,
event=DEEP_EVENT_ON_TRAINING_END,
expected_arguments=[]
)
Thalamus().connect(
receiver=self.save_model,
event=DEEP_EVENT_SAVE_MODEL,
expected_arguments=[]
)
Thalamus().connect(
receiver=self.set_training_loss,
event=DEEP_EVENT_SEND_TRAINING_LOSS,
expected_arguments=["training_loss"]
)
Thalamus().connect(
receiver=self.set_save_params,
event=DEEP_EVENT_SEND_SAVE_PARAMS_FROM_TRAINER,
expected_arguments=[
"model",
"optimizer",
"epoch_index",
"validation_loss",
"inp"
]
)
"""
ON BATCH END NOT TO BE IMPLEMENTED FOR EFFICIENCY REASONS
def on_batch_end(self, model:Module):
pass
"""
def on_training_end(self) -> None:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Called once the training is finished
PARAMETERS:
-----------
RETURN:
-------
:return: None
"""
if DEEP_SAVE_SIGNAL_END_TRAINING.corresponds(self.save_signal):
self.save_model()
def on_overwatch_metric_computed(self, current_overwatch_metric: OverWatchMetric):
"""
AUTHORS:
--------
:author: Alix Leroy
:author: Samuel Westlake
DESCRIPTION:
------------
Check if saving the model is required
PARAMETERS:
-----------
:param current_overwatch_metric: float: The value of the metric to over watch
RETURN:
-------
:return -> bool: Whether the model should be saved or not
"""
# Save if there is no metric to compare against
if self.best_overwatch_metric is None:
self.best_overwatch_metric = current_overwatch_metric
save = True
else:
# If the new metric has to be smaller than the best one
if DEEP_SAVE_CONDITION_LESS.corresponds(current_overwatch_metric.get_condition()):
# If the model improved since last batch => Save
if self.best_overwatch_metric.get_value() > current_overwatch_metric.get_value():
Notification(
DEEP_NOTIF_SUCCESS,
DEEP_MSG_SAVER_IMPROVED % (
current_overwatch_metric.name,
"%.4e" % Decimal(
self.best_overwatch_metric.get_value()
- current_overwatch_metric.get_value()
)
)
)
self.best_overwatch_metric = current_overwatch_metric
save = True
# No improvement => Return False
else:
Notification(
DEEP_NOTIF_INFO,
DEEP_MSG_SAVER_NOT_IMPROVED % current_overwatch_metric.name
)
save = False
# If the new metric has to be bigger than the best one (e.g. The accuracy of a classification)
elif DEEP_SAVE_CONDITION_GREATER.corresponds(current_overwatch_metric.get_condition()):
# If the model improved since last batch => Save
if self.best_overwatch_metric.get_value() < current_overwatch_metric.get_value():
Notification(
DEEP_NOTIF_SUCCESS,
DEEP_MSG_SAVER_IMPROVED % (
current_overwatch_metric.name,
"%.4e" % Decimal(
current_overwatch_metric.get_value()
- self.best_overwatch_metric.get_value()
)
)
)
self.best_overwatch_metric = current_overwatch_metric
save = True
# No improvement => Return False
else:
Notification(
DEEP_NOTIF_INFO,
DEEP_MSG_SAVER_NOT_IMPROVED % current_overwatch_metric.name
)
save = False
else:
Notification(DEEP_NOTIF_FATAL, "The following saving condition does not exist : %s"
% current_overwatch_metric.get_condition())
save = False
if save is True:
self.save_model()
def save_model(self) -> None:
"""
AUTHORS:
--------
:author: Alix Leroy
:author: Samuel Westlake
DESCRIPTION:
------------
Save the model
PARAMETERS:
-----------
RETURN:
-------
:return: None
"""
# Set training_loss
Thalamus().add_signal(
Signal(
event=DEEP_EVENT_REQUEST_TRAINING_LOSS,
args=[]
)
)
# Set model and stuff
Thalamus().add_signal(
Signal(
event=DEEP_EVENT_REQUEST_SAVE_PARAMS_FROM_TRAINER,
args=[]
)
)
file_path = self.__get_file_path()
# If we want to save to the pytorch format
if DEEP_SAVE_FORMAT_PYTORCH.corresponds(self.method):
# TODO: Finish try except statements here after testing...
# try:
torch.save(
{
"model_state_dict": self.model.state_dict(),
"epoch": self.epoch_index,
"training_loss": self.training_loss,
"validation_loss": self.validation_loss,
"optimizer_state_dict": self.optimizer.state_dict()
},
file_path
)
# except:
# Notification(DEEP_NOTIF_ERROR, "Error while saving the pytorch model and weights" )
# self.__handle_error_saving(model)
# If we want to save to the ONNX format
elif DEEP_SAVE_FORMAT_ONNX.corresponds(self.method):
# TODO: and here. Also fix onnx export function
Notification(DEEP_NOTIF_FATAL, "Save as onnx format not implemented yet")
# try:
# torch.onnx._export(model, inp, file_path,
# export_params=True,
# verbose=True,
# input_names=input_names,
# output_names=output_names)
# except:
# Notification(DEEP_NOTIF_ERROR, "Error while saving the ONNX model and weights" )
# self.__handle_error_saving(model)
Notification(DEEP_NOTIF_SUCCESS, DEEP_MSG_MODEL_SAVED % file_path)
def set_training_loss(self, training_loss):
"""
:param training_loss:
:return:
"""
self.training_loss = training_loss
def set_save_params(self, model, optimizer, epoch_index, validation_loss, inp):
"""
:param model:
:param optimizer:
:param epoch_index:
:param validation_loss:
:param inp:
:return:
"""
self.model = model
self.optimizer = optimizer
self.epoch_index = epoch_index
self.validation_loss = validation_loss
self.inp = inp
def __get_file_path(self):
if self.epoch_index is None:
file_path = "%s/%s%s" % (
self.directory,
self.name,
self.extension
)
else:
if self.save_signal.corresponds(DEEP_SAVE_SIGNAL_END_BATCH):
# Set the file path as 'directory/name_epoch_batch.ext'
file_path = "%s/%s_%s_%s%s" % (
self.directory,
self.name,
str(self.epoch_index).zfill(3),
str(self.batch_index).zfill(8),
self.extension
)
# If saving at the end of each epoch
else:
# Set the file path as 'directory/name_epoch.ext'
file_path = "%s/%s_%s%s" % (
self.directory,
self.name,
str(self.epoch_index).zfill(3),
self.extension
)
return file_path
| 34.293413
| 115
| 0.527589
| 10,655
| 0.930243
| 0
| 0
| 0
| 0
| 0
| 0
| 3,259
| 0.284529
|
832debbd59e85b8ca2ff3010595d819d90400d10
| 2,812
|
py
|
Python
|
mridc/collections/reconstruction/models/cascadenet/ccnn_block.py
|
jerke123/mridc
|
7e22ac50f8df73f2305d61979da2a5d59874546e
|
[
"Apache-2.0"
] | null | null | null |
mridc/collections/reconstruction/models/cascadenet/ccnn_block.py
|
jerke123/mridc
|
7e22ac50f8df73f2305d61979da2a5d59874546e
|
[
"Apache-2.0"
] | null | null | null |
mridc/collections/reconstruction/models/cascadenet/ccnn_block.py
|
jerke123/mridc
|
7e22ac50f8df73f2305d61979da2a5d59874546e
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
__author__ = "Dimitrios Karkalousos"
import torch
from mridc.collections.common.parts.fft import fft2c, ifft2c
from mridc.collections.common.parts.utils import complex_conj, complex_mul
class CascadeNetBlock(torch.nn.Module):
"""
Model block for CascadeNet & Convolution Recurrent Neural Network.
This model applies a combination of soft data consistency with the input model as a regularizer.
A series of these blocks can be stacked to form the full variational network.
"""
def __init__(self, model: torch.nn.Module, fft_type: str = "orthogonal", no_dc: bool = False):
"""
Initialize the model block.
Args:
model: Model to apply soft data consistency.
fft_type: Type of FFT to use.
no_dc: Whether to remove the DC component.
"""
super().__init__()
self.model = model
self.fft_type = fft_type
self.no_dc = no_dc
self.dc_weight = torch.nn.Parameter(torch.ones(1))
def sens_expand(self, x: torch.Tensor, sens_maps: torch.Tensor) -> torch.Tensor:
"""
Expand the sensitivity maps to the same size as the input.
Args:
x: Input data.
sens_maps: Sensitivity maps.
Returns:
SENSE reconstruction expanded to the same size as the input.
"""
return fft2c(complex_mul(x, sens_maps), fft_type=self.fft_type)
def sens_reduce(self, x: torch.Tensor, sens_maps: torch.Tensor) -> torch.Tensor:
"""
Reduce the sensitivity maps to the same size as the input.
Args:
x: Input data.
sens_maps: Sensitivity maps.
Returns:
SENSE reconstruction reduced to the same size as the input.
"""
x = ifft2c(x, fft_type=self.fft_type)
return complex_mul(x, complex_conj(sens_maps)).sum(dim=1, keepdim=True)
def forward(
self,
pred: torch.Tensor,
ref_kspace: torch.Tensor,
sens_maps: torch.Tensor,
mask: torch.Tensor,
) -> torch.Tensor:
"""
Forward pass of the model.
Args:
pred: Predicted k-space data.
ref_kspace: Reference k-space data.
sens_maps: Sensitivity maps.
mask: Mask to apply to the data.
Returns
-------
Reconstructed image.
"""
zero = torch.zeros(1, 1, 1, 1, 1).to(pred)
soft_dc = torch.where(mask.bool(), pred - ref_kspace, zero) * self.dc_weight
eta = self.sens_reduce(pred, sens_maps)
eta = self.model(eta.squeeze(1).permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
eta = self.sens_expand(eta, sens_maps)
if not self.no_dc:
eta = pred - soft_dc - eta
return eta
| 30.565217
| 100
| 0.604908
| 2,606
| 0.926743
| 0
| 0
| 0
| 0
| 0
| 0
| 1,353
| 0.481152
|
832fa03411fdc8cba2cd96e51a219e3ef9e4283a
| 975
|
py
|
Python
|
main.py
|
BL-Lac149597870/drugVQA
|
604703d66457c958ddc9eeb35268391edb6c4996
|
[
"MIT"
] | null | null | null |
main.py
|
BL-Lac149597870/drugVQA
|
604703d66457c958ddc9eeb35268391edb6c4996
|
[
"MIT"
] | null | null | null |
main.py
|
BL-Lac149597870/drugVQA
|
604703d66457c958ddc9eeb35268391edb6c4996
|
[
"MIT"
] | null | null | null |
'''
Author: QHGG
Date: 2021-02-27 13:42:43
LastEditTime: 2021-03-01 23:26:38
LastEditors: QHGG
Description:
FilePath: /drugVQA/main.py
'''
import torch
from sklearn import metrics
import warnings
warnings.filterwarnings("ignore")
torch.cuda.set_device(0)
print('cuda size == 1')
from trainAndTest import *
import time
def timeLable():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def main():
"""
Parsing command line parameters, reading data, fitting and scoring a SEAL-CI model.
"""
losses,accs,testResults = train(trainArgs)
with open("logs/"+ timeLable() +"losses.txt", "w") as f:
f.writelines([str(log) + '\n' for log in losses])
with open("logs/"+ timeLable() +"accs.txt", "w") as f:
f.writelines([str(log) + '\n' for log in accs])
with open("logs/"+ timeLable() +"testResults.txt", "w") as f:
f.writelines([str(log) + '\n' for log in testResults])
if __name__ == "__main__":
main()
| 28.676471
| 87
| 0.645128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 372
| 0.381538
|
83300c0b6a409b6ab5643fe5a44ff448c026f263
| 4,773
|
py
|
Python
|
networkx/algorithms/tests/test_cuts.py
|
jebogaert/networkx
|
8563c3313223a53c548530f39c8cfb6e433539d3
|
[
"BSD-3-Clause"
] | 2
|
2020-11-25T12:01:15.000Z
|
2021-02-02T03:46:23.000Z
|
networkx/algorithms/tests/test_cuts.py
|
jebogaert/networkx
|
8563c3313223a53c548530f39c8cfb6e433539d3
|
[
"BSD-3-Clause"
] | 1
|
2020-11-15T23:07:09.000Z
|
2020-11-15T23:07:09.000Z
|
networkx/algorithms/tests/test_cuts.py
|
jebogaert/networkx
|
8563c3313223a53c548530f39c8cfb6e433539d3
|
[
"BSD-3-Clause"
] | 2
|
2020-12-21T11:41:13.000Z
|
2021-01-08T17:09:21.000Z
|
"""Unit tests for the :mod:`networkx.algorithms.cuts` module."""
import networkx as nx
class TestCutSize:
"""Unit tests for the :func:`~networkx.cut_size` function."""
def test_symmetric(self):
"""Tests that the cut size is symmetric."""
G = nx.barbell_graph(3, 0)
S = {0, 1, 4}
T = {2, 3, 5}
assert nx.cut_size(G, S, T) == 4
assert nx.cut_size(G, T, S) == 4
def test_single_edge(self):
"""Tests for a cut of a single edge."""
G = nx.barbell_graph(3, 0)
S = {0, 1, 2}
T = {3, 4, 5}
assert nx.cut_size(G, S, T) == 1
assert nx.cut_size(G, T, S) == 1
def test_directed(self):
"""Tests that each directed edge is counted once in the cut."""
G = nx.barbell_graph(3, 0).to_directed()
S = {0, 1, 2}
T = {3, 4, 5}
assert nx.cut_size(G, S, T) == 2
assert nx.cut_size(G, T, S) == 2
def test_directed_symmetric(self):
"""Tests that a cut in a directed graph is symmetric."""
G = nx.barbell_graph(3, 0).to_directed()
S = {0, 1, 4}
T = {2, 3, 5}
assert nx.cut_size(G, S, T) == 8
assert nx.cut_size(G, T, S) == 8
def test_multigraph(self):
"""Tests that parallel edges are each counted for a cut."""
G = nx.MultiGraph(["ab", "ab"])
assert nx.cut_size(G, {"a"}, {"b"}) == 2
class TestVolume:
"""Unit tests for the :func:`~networkx.volume` function."""
def test_graph(self):
G = nx.cycle_graph(4)
assert nx.volume(G, {0, 1}) == 4
def test_digraph(self):
G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 0)])
assert nx.volume(G, {0, 1}) == 2
def test_multigraph(self):
edges = list(nx.cycle_graph(4).edges())
G = nx.MultiGraph(edges * 2)
assert nx.volume(G, {0, 1}) == 8
def test_multidigraph(self):
edges = [(0, 1), (1, 2), (2, 3), (3, 0)]
G = nx.MultiDiGraph(edges * 2)
assert nx.volume(G, {0, 1}) == 4
class TestNormalizedCutSize:
"""Unit tests for the :func:`~networkx.normalized_cut_size`
function.
"""
def test_graph(self):
G = nx.path_graph(4)
S = {1, 2}
T = set(G) - S
size = nx.normalized_cut_size(G, S, T)
# The cut looks like this: o-{-o--o-}-o
expected = 2 * ((1 / 4) + (1 / 2))
assert expected == size
def test_directed(self):
G = nx.DiGraph([(0, 1), (1, 2), (2, 3)])
S = {1, 2}
T = set(G) - S
size = nx.normalized_cut_size(G, S, T)
# The cut looks like this: o-{->o-->o-}->o
expected = 2 * ((1 / 2) + (1 / 1))
assert expected == size
class TestConductance:
"""Unit tests for the :func:`~networkx.conductance` function."""
def test_graph(self):
G = nx.barbell_graph(5, 0)
# Consider the singleton sets containing the "bridge" nodes.
# There is only one cut edge, and each set has volume five.
S = {4}
T = {5}
conductance = nx.conductance(G, S, T)
expected = 1 / 5
assert expected == conductance
class TestEdgeExpansion:
"""Unit tests for the :func:`~networkx.edge_expansion` function."""
def test_graph(self):
G = nx.barbell_graph(5, 0)
S = set(range(5))
T = set(G) - S
expansion = nx.edge_expansion(G, S, T)
expected = 1 / 5
assert expected == expansion
class TestNodeExpansion:
"""Unit tests for the :func:`~networkx.node_expansion` function."""
def test_graph(self):
G = nx.path_graph(8)
S = {3, 4, 5}
expansion = nx.node_expansion(G, S)
# The neighborhood of S has cardinality five, and S has
# cardinality three.
expected = 5 / 3
assert expected == expansion
class TestBoundaryExpansion:
"""Unit tests for the :func:`~networkx.boundary_expansion` function."""
def test_graph(self):
G = nx.complete_graph(10)
S = set(range(4))
expansion = nx.boundary_expansion(G, S)
# The node boundary of S has cardinality six, and S has
# cardinality three.
expected = 6 / 4
assert expected == expansion
class TestMixingExpansion:
"""Unit tests for the :func:`~networkx.mixing_expansion` function."""
def test_graph(self):
G = nx.barbell_graph(5, 0)
S = set(range(5))
T = set(G) - S
expansion = nx.mixing_expansion(G, S, T)
# There is one cut edge, and the total number of edges in the
# graph is twice the total number of edges in a clique of size
# five, plus one more for the bridge.
expected = 1 / (2 * (5 * 4 + 1))
assert expected == expansion
| 29.83125
| 75
| 0.550178
| 4,660
| 0.976325
| 0
| 0
| 0
| 0
| 0
| 0
| 1,388
| 0.290802
|
8330e631a49e6776f2efa9742d5ed0e6a7e38620
| 6,556
|
py
|
Python
|
src/utility.py
|
bbookman/demand
|
47101843ab84f4161e618edfa5a8e8fea2e1d955
|
[
"MIT"
] | null | null | null |
src/utility.py
|
bbookman/demand
|
47101843ab84f4161e618edfa5a8e8fea2e1d955
|
[
"MIT"
] | null | null | null |
src/utility.py
|
bbookman/demand
|
47101843ab84f4161e618edfa5a8e8fea2e1d955
|
[
"MIT"
] | null | null | null |
import sys, re, pdb
from bs4 import BeautifulSoup as beautiful
from datetime import datetime
import requests, logging
import timeout_decorator, pandas as pd
import socket, urllib3
def read_input_file():
#todo - what if argument is not there or invalid?
print_and_log('Reading input file')
file = sys.argv[2]
#?????
return results
def get_zip_code():
# First argument ..
# todo what if arg is not there or invalid
print_and_log(f'Got command line zip code {sys.argv[1]} ', 'info')
return sys.argv[1]
def make_date_string():
stamp = datetime.now()
date_string = stamp.strftime('%Y-%d-%m-%H-%M-%S')
return date_string
def make_time_string():
stamp = datetime.now()
time_string = stamp.strftime('%H:%M')
return time_string
def build_site_url(template, title, zipcode='', radius='90', age='60'):
""" Makes an url with each query item inserted into the url template
site_id: type = str, value of site id like 'indeed' or 'careerbuilder'
template: type = str, the url template. example: 'http://indeed.com?{}&afg=&rfr=&title={}'
title: type = str, job title using escape characters that are site dependent. example: 'software+quality+engineer'
zipcode: type = str, ZIP CODE
radius: type = str, represents the radius of the job search. example: '50' (miles)
age: type = str, the number of days the job description has been posted. example: '30' (days)
returns an url string
"""
url = template.format(title = title, zipcode = zipcode, radius = radius, age = age)
print_and_log(f'Built site url: {url}')
return url
def build_job_title(title, title_separator):
""" Takes list of title words and adds site specific separator between words
title: string
separator: type = string
returns string
"""
result =''
words = title.split()
for word in words:
result+= word + title_separator
return result[:-1]
@timeout_decorator.timeout(10)
def get_all_anchors(soup):
print_and_log('Getting All Anchors')
return soup('a')
@timeout_decorator.timeout(10)
def get_anchors_by_selector(title_selector, soup):
print_and_log(f'Getting Anchors by selector: {title_selector}')
return soup('a', title_selector)
def add_site_id(site_id, ref):
print_and_log('Adding site id to href for complete url')
return f'http://{site_id}.com{ref}'
def title_meets_threshold(title, title_word_values, threshold=90):
print('Evaluating job title against threshold')
total = 0
if not title:
return False
t = re.sub(r"(?<=[A-z])\&(?=[A-z])", " ", title.lower())
t = re.sub(r"(?<=[A-z])\-(?=[A-z])", " ", t)
for word, value in title_word_values.items():
if word.lower() in t:
total+=value
if total >= threshold:
print_and_log(f'Met threshold: {title}')
return True
print_and_log(f'Not met threshold: {title}')
return False
@timeout_decorator.timeout(10)
def get_soup(url, skill_dict):
soup = None
if url == 'http://dice.com/jobs/browsejobs':
print_and_log(make_data_frame(skill_dict))
sys.exit()
elif url in 'http://simplyhired.comhttps://www.simplyhired':
return soup
else:
print_and_log(f'Getting raw html from: {url}' )
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:63.0) Gecko/20100101 Firefox/63.0'
session = requests.Session()
session.headers.update({'User-Agent': user_agent})
try:
response = session.get(url)
body = response.text
soup = beautiful(body, 'html.parser')
print_and_log('Got raw html')
except urllib3.exceptions.NewConnectionError as e:
print_and_log(e, 'error')
write_file(skill_dict, title='new_connection_error_encountered_captured_results')
except socket.gaierror as s:
print_and_log(s, 'error')
write_file(skill_dict, title='socket_error_encountered_captured_results')
except socket.error as e:
print_and_log(e, 'error')
write_file(skill_dict, title='socket_error_encountered_captured_results')
except Exception as e:
print_and_log(e, 'error')
write_file(skill_dict, title='exception_encountered_captured_results')
except BaseException as b:
print_and_log(b, 'error')
write_file(skill_dict, title='exception_encountered_captured_results')
return soup
def clean_text(text):
body = re.split(r'\W+', text)
return [word.lower() for word in body]
@timeout_decorator.timeout(10)
def get_title_by_tag(selector, tag, soup):
print_and_log(f'Getting job title by tag: {tag}, selector: {selector}')
data = soup(tag, selector)
text = ''
if data:
text = data[0].text
text = text.strip('\n')
text = text.strip()
text = text.rstrip()
text = text.lstrip()
print_and_log(f'Got title: {text}')
return text
@timeout_decorator.timeout(10)
def filter_links(links, link_selector):
print_and_log(f'Filtering links, selector:{link_selector}')
return [link for link in links if link_selector.lower() in link.lower()]
def like(string):
"""
Return a compiled regular expression that matches the given
string with any prefix and postfix, e.g. if string = "hello",
the returned regex matches r".*hello.*"
"""
string_ = string
if not isinstance(string_, str):
string_ = str(string_)
regex = MATCH_ALL + re.escape(string_) + MATCH_ALL
return re.compile(regex, flags=re.DOTALL)
def set_log(filename, level): #todo level options
logging.basicConfig(filename=filename, level=level)
def report(e: Exception):
logging.exception(str(e))
def print_and_log(text, level = 'info'):
print(text)
if level == 'debug':
logging.debug(text)
elif level == 'info':
logging.info(text)
elif level == 'warning':
logging.warning(text)
def make_data_frame(skill_dict):
series = pd.Series(skill_dict)
df = series.to_frame('skill_count')
df.sort_values('skill_count', ascending=False, inplace=True)
df['percent'] = df['skill_count'] / df['skill_count'].sum() * 100
df.round(2)
return df
def write_file(skill_dict, zipcode = '99999', title = 'RESULTS', ):
d = make_date_string()
file_name = f'{title}_{zipcode}_{d}results.txt'
with open(file_name, 'w') as file:
file.write(f'[{title}: [{zipcode}: {skill_dict} ]]')
| 33.111111
| 119
| 0.657413
| 0
| 0
| 0
| 0
| 2,492
| 0.38011
| 0
| 0
| 2,310
| 0.352349
|
8331c341859f7ceb90f3dad9bbc18d41377413e5
| 1,940
|
py
|
Python
|
section_11_(api)/dicts_and_lists.py
|
hlcooll/python_lessons
|
3790f98cbc5a0721fcfc9e5f52ba79a64878f362
|
[
"MIT"
] | 425
|
2015-01-13T03:19:10.000Z
|
2022-03-13T00:34:44.000Z
|
section_11_(api)/dicts_and_lists.py
|
Supercodero/python-lessons
|
38409c318e7a62d30b2ffd68f8a7a5a5ec00778d
|
[
"MIT"
] | null | null | null |
section_11_(api)/dicts_and_lists.py
|
Supercodero/python-lessons
|
38409c318e7a62d30b2ffd68f8a7a5a5ec00778d
|
[
"MIT"
] | 178
|
2015-01-08T05:01:05.000Z
|
2021-12-02T00:56:58.000Z
|
# Dictionaries and lists, together
# Loading from https://raw.githubusercontent.com/shannonturner/education-compliance-reports/master/investigations.json
investigations = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [
-112.073032,
33.453527
]
},
"properties": {
"marker-symbol": "marker",
"marker-color": "#D4500F",
"address": " AZ ",
"name": "Arizona State University"
}
},
{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [
-121.645734,
39.648248
]
},
"properties": {
"marker-symbol": "marker",
"marker-color": "#D4500F",
"address": " CA ",
"name": "Butte-Glen Community College District"
}
},
]
}
# The first level is a dictionary with two keys: type and features
# type's value is a string: FeatureCollection
# features' value is a list of dictionaries
# We're going to focus on the features list.
# Each item in the features list is a dictionary that has three keys: type, geometry, and properties
# If we wanted to access all of the properies for the first map point, here's how:
print investigations['features'][0]['properties']
# list of dictionaries ^ ^ ^
# first map point | | properties
# {
# "marker-symbol": "marker",
# "marker-color": "#D4500F",
# "address": " AZ ",
# "name": "Arizona State University"
# }
# As we see above, properties is itself a dictionary
# To get the name of that map point:
print investigations['features'][0]['properties']['name']
# Arizona State University
# Generally speaking, if what's between the square brackets is a number, you're accessing a list.
# If it's a string, you're accessing a dictionary.
# If you get stuck or are getting errors, try printing out the item and the key or index.
| 26.216216
| 118
| 0.625258
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,515
| 0.780928
|
833213154f6c6064adf75a6066412d88861a6169
| 19,345
|
py
|
Python
|
stickers/__init__.py
|
secretisdead/stickers
|
5159c637de2c204fdbdc6aafbebca949c492c203
|
[
"MIT"
] | null | null | null |
stickers/__init__.py
|
secretisdead/stickers
|
5159c637de2c204fdbdc6aafbebca949c492c203
|
[
"MIT"
] | null | null | null |
stickers/__init__.py
|
secretisdead/stickers
|
5159c637de2c204fdbdc6aafbebca949c492c203
|
[
"MIT"
] | 1
|
2021-09-05T06:18:01.000Z
|
2021-09-05T06:18:01.000Z
|
import uuid
import time
import re
from ipaddress import ip_address
from enum import Enum
from datetime import datetime, timezone
from sqlalchemy import Table, Column, PrimaryKeyConstraint, LargeBinary as sqla_binary, Float
from sqlalchemy import Integer, String, MetaData, distinct
from sqlalchemy.dialects.mysql import VARBINARY as mysql_binary
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import func, and_, or_
from statement_helper import sort_statement, paginate_statement, id_filter
from statement_helper import time_cutoff_filter, string_like_filter
from statement_helper import string_equal_filter
from statement_helper import bitwise_filter
from idcollection import IDCollection
from parse_id import parse_id, get_id_bytes, generate_or_parse_id
class Sticker:
def __init__(
self,
id=None,
creation_time=None,
name='',
display='',
category='',
category_order=0,
group_bits=0,
):
self.id, self.id_bytes = generate_or_parse_id(id)
if None == creation_time:
creation_time = time.time()
self.creation_time = int(creation_time)
self.creation_datetime = datetime.fromtimestamp(
self.creation_time,
timezone.utc,
)
self.name = str(name)
self.display = str(display)
self.category = str(category)
self.category_order = int(category_order)
if isinstance(group_bits, int):
group_bits = group_bits.to_bytes(2, 'big')
else:
group_bits = bytes(group_bits)
self.group_bits = group_bits
class CollectedSticker:
def __init__(
self,
id=None,
receive_time=None,
user_id='',
sticker_id='',
):
self.id, self.id_bytes = generate_or_parse_id(id)
if None == receive_time:
receive_time = time.time()
self.receive_time = int(receive_time)
self.receive_datetime = datetime.fromtimestamp(
self.receive_time,
timezone.utc,
)
self.user_id, self.user_id_bytes = parse_id(user_id)
self.sticker_id, self.sticker_id_bytes = parse_id(sticker_id)
self.sticker = None
class StickerPlacement:
def __init__(
self,
id=None,
placement_time=None,
subject_id='',
user_id='',
sticker_id='',
position_x=0.0,
position_y=0.0,
rotation=0.0,
scale=0.0,
):
self.id, self.id_bytes = generate_or_parse_id(id)
if None == placement_time:
placement_time = time.time()
self.placement_time = int(placement_time)
self.placement_datetime = datetime.fromtimestamp(
self.placement_time,
timezone.utc,
)
self.subject_id, self.subject_id_bytes = generate_or_parse_id(subject_id)
self.user_id, self.user_id_bytes = parse_id(user_id)
self.sticker_id, self.sticker_id_bytes = parse_id(sticker_id)
self.position_x = float(position_x)
self.position_y = float(position_y)
self.rotation = float(rotation)
self.scale = float(scale)
self.sticker = None
class Stickers:
def __init__(self, engine, db_prefix='', install=False, connection=None):
self.engine = engine
self.engine_session = sessionmaker(bind=self.engine)()
self.db_prefix = db_prefix
self.name_length = 16
self.display_length = 32
self.category_length = 16
metadata = MetaData()
default_bytes = 0b0 * 16
if 'mysql' == self.engine_session.bind.dialect.name:
Binary = mysql_binary
else:
Binary = sqla_binary
# stickers tables
self.stickers = Table(
self.db_prefix + 'stickers',
metadata,
Column('id', Binary(16), default=default_bytes),
Column('creation_time', Integer, default=0),
Column('name', String(self.name_length)),
Column('display', String(self.display_length)),
Column('category', String(self.category_length)),
Column('category_order', Integer, default=0),
Column('group_bits', Integer, default=0),
PrimaryKeyConstraint('id'),
)
# collected stickers tables
self.collected_stickers = Table(
self.db_prefix + 'collected_stickers',
metadata,
Column('id', Binary(16), default=default_bytes),
Column('receive_time', Integer, default=0),
Column('user_id', Binary(16), default=default_bytes),
Column('sticker_id', Binary(16), default=default_bytes),
PrimaryKeyConstraint('id'),
)
# placed stickers tables
self.sticker_placements = Table(
self.db_prefix + 'sticker_placements',
metadata,
Column('id', Binary(16), default=default_bytes),
Column('placement_time', Integer, default=0),
Column('subject_id', Binary(16), default=default_bytes),
Column('user_id', Binary(16), default=default_bytes),
Column('sticker_id', Binary(16), default=default_bytes),
Column('position_x', Float, default=0),
Column('position_y', Float, default=0),
Column('rotation', Float, default=0),
Column('scale', Float, default=0),
PrimaryKeyConstraint('id'),
)
if connection:
self.connection = connection
else:
self.connection = self.engine.connect()
if install:
for table in [
self.stickers,
self.collected_stickers,
self.sticker_placements,
]:
table.create(bind=self.engine, checkfirst=True)
def uninstall(self):
for table in [
self.stickers,
self.collected_stickers,
self.sticker_placements,
]:
table.drop(self.engine)
# retrieve stickers
def get_sticker(self, id):
stickers = self.search_stickers(filter={'ids': id})
return stickers.get(id)
def prepare_stickers_search_statement(self, filter):
conditions = []
conditions += id_filter(filter, 'ids', self.stickers.c.id)
conditions += time_cutoff_filter(
filter,
'created',
self.stickers.c.creation_time,
)
conditions += string_like_filter(
filter,
'name',
self.stickers.c.name,
)
conditions += string_like_filter(
filter,
'display',
self.stickers.c.display,
)
conditions += string_equal_filter(
filter,
'category',
self.stickers.c.category,
)
conditions += bitwise_filter(
filter,
'group_bits',
self.stickers.c.group_bits,
)
statement = self.stickers.select()
if conditions:
statement = statement.where(and_(*conditions))
return statement
def count_stickers(self, filter={}):
statement = self.prepare_stickers_search_statement(filter)
statement = statement.with_only_columns(
[func.count(self.stickers.c.id)]
)
return self.connection.execute(statement).fetchone()[0]
def search_stickers(
self,
filter={},
sort='',
order='',
page=0,
perpage=None
):
statement = self.prepare_stickers_search_statement(filter)
statement = sort_statement(
statement,
self.stickers,
sort,
order,
'creation_time',
True,
[
'creation_time',
'id',
],
)
statement = paginate_statement(statement, page, perpage)
result = self.connection.execute(statement).fetchall()
if 0 == len(result):
return IDCollection()
stickers = IDCollection()
for row in result:
sticker = Sticker(
id=row[self.stickers.c.id],
creation_time=row[self.stickers.c.creation_time],
name=row[self.stickers.c.name],
display=row[self.stickers.c.display],
category=row[self.stickers.c.category],
category_order=row[self.stickers.c.category_order],
group_bits=row[self.stickers.c.group_bits],
)
stickers.add(sticker)
return stickers
# manipulate stickers
def create_sticker(self, **kwargs):
sticker = Sticker(**kwargs)
# preflight check for existing id
if self.count_stickers(filter={'ids': sticker.id_bytes}):
raise ValueError('Sticker ID collision')
self.connection.execute(
self.stickers.insert(),
id=sticker.id_bytes,
creation_time=int(sticker.creation_time),
name=str(sticker.name),
display=str(sticker.display),
category=str(sticker.category),
category_order=int(sticker.category_order),
group_bits=int.from_bytes(sticker.group_bits, 'big'),
)
return sticker
def update_sticker(self, id, **kwargs):
sticker = Sticker(id=id, **kwargs)
updates = {}
if 'creation_time' in kwargs:
updates['creation_time'] = int(sticker.creation_time)
if 'name' in kwargs:
updates['name'] = str(sticker.name)
if 'display' in kwargs:
updates['display'] = str(sticker.display)
if 'category' in kwargs:
updates['category'] = str(sticker.category)
if 'category_order' in kwargs:
updates['category_order'] = int(sticker.category_order)
if 'group_bits' in kwargs:
updates['group_bits'] = int.from_bytes(sticker.group_bits, 'big')
if 0 == len(updates):
return
self.connection.execute(
self.stickers.update().values(**updates).where(
self.stickers.c.id == sticker.id_bytes
)
)
def delete_sticker(self, id):
id = get_id_bytes(id)
self.connection.execute(
self.collected_stickers.delete().where(
self.collected_stickers.c.id == id
)
)
self.connection.execute(
self.sticker_placements.delete().where(
self.sticker_placements.c.id == id
)
)
self.connection.execute(
self.stickers.delete().where(self.stickers.c.id == id)
)
# retrieve collected stickers
def get_collected_sticker(self, id):
collected_stickers = self.search_collected_stickers(filter={'ids': id})
return collected_stickers.get(id)
def prepare_collected_stickers_search_statement(self, filter):
conditions = []
conditions += id_filter(filter, 'ids', self.collected_stickers.c.id)
conditions += time_cutoff_filter(
filter,
'received',
self.collected_stickers.c.receive_time,
)
conditions += id_filter(
filter,
'user_ids',
self.collected_stickers.c.user_id,
)
conditions += id_filter(
filter,
'sticker_ids',
self.collected_stickers.c.sticker_id,
)
statement = self.collected_stickers.select()
if conditions:
statement = statement.where(and_(*conditions))
return statement
def count_collected_stickers(self, filter={}):
statement = self.prepare_collected_stickers_search_statement(filter)
statement = statement.with_only_columns(
[func.count(self.collected_stickers.c.id)]
)
return self.connection.execute(statement).fetchone()[0]
def search_collected_stickers(
self,
filter={},
sort='',
order='',
page=0,
perpage=None
):
statement = self.prepare_collected_stickers_search_statement(filter)
statement = sort_statement(
statement,
self.collected_stickers,
sort,
order,
'receive_time',
True,
[
'receive_time',
'id',
],
)
statement = paginate_statement(statement, page, perpage)
result = self.connection.execute(statement).fetchall()
if 0 == len(result):
return IDCollection()
sticker_ids = []
for row in result:
sticker_ids.append(row[self.collected_stickers.c.sticker_id])
stickers = self.search_stickers(filter={'sticker_ids': sticker_ids})
collected_stickers = IDCollection()
for row in result:
collected_sticker = CollectedSticker(
id=row[self.collected_stickers.c.id],
receive_time=row[self.collected_stickers.c.receive_time],
user_id=row[self.collected_stickers.c.user_id],
sticker_id=row[self.collected_stickers.c.sticker_id],
)
if collected_sticker.sticker_id_bytes in stickers:
collected_sticker.sticker = stickers.get(
collected_sticker.sticker_id_bytes
)
collected_stickers.add(collected_sticker)
return collected_stickers
# manipulate collected stickers
def grant_sticker(self, sticker_id, user_id, receive_time=None):
sticker_id = get_id_bytes(sticker_id)
user_id = get_id_bytes(user_id)
collected_stickers = self.search_collected_stickers(
filter={'user_ids': user_id, 'sticker_ids': sticker_id},
)
if 0 < len(collected_stickers):
raise ValueError('Specified user already has the specified sticker')
collected_sticker = CollectedSticker(
user_id=user_id,
sticker_id=sticker_id,
receive_time=receive_time,
)
if self.count_collected_stickers(filter={'ids': collected_sticker.id_bytes}):
raise ValueError('Collected sticker ID collision')
self.connection.execute(
self.collected_stickers.insert(),
id=collected_sticker.id_bytes,
receive_time=int(collected_sticker.receive_time),
user_id=collected_sticker.user_id_bytes,
sticker_id=collected_sticker.sticker_id_bytes,
)
return collected_sticker
def revoke_sticker(self, id):
id = get_id_bytes(id)
self.connection.execute(
self.collected_stickers.delete().where(
self.collected_stickers.c.id == id,
)
)
def get_collected_stickers(self, user_id):
return self.search_collected_stickers(filter={'user_ids': user_id})
# retrieve sticker placements
def get_sticker_placement(self, id):
sticker_placements = self.search_sticker_placements(filter={'ids': id})
return sticker_placements.get(id)
def prepare_sticker_placements_search_statement(self, filter):
conditions = []
conditions += id_filter(filter, 'ids', self.sticker_placements.c.id)
conditions += time_cutoff_filter(
filter,
'placed',
self.sticker_placements.c.placement_time,
)
conditions += id_filter(
filter,
'subject_ids',
self.sticker_placements.c.subject_id,
)
conditions += id_filter(
filter,
'user_ids',
self.sticker_placements.c.user_id,
)
conditions += id_filter(
filter,
'sticker_ids',
self.sticker_placements.c.sticker_id,
)
statement = self.sticker_placements.select()
if conditions:
statement = statement.where(and_(*conditions))
return statement
def count_sticker_placements(self, filter={}):
statement = self.prepare_sticker_placements_search_statement(filter)
statement = statement.with_only_columns(
[func.count(self.sticker_placements.c.id)]
)
return self.connection.execute(statement).fetchone()[0]
def search_sticker_placements(
self,
filter={},
sort='',
order='',
page=0,
perpage=None
):
statement = self.prepare_sticker_placements_search_statement(filter)
statement = sort_statement(
statement,
self.sticker_placements,
sort,
order,
'placement_time',
True,
[
'placement_time',
'id',
],
)
statement = paginate_statement(statement, page, perpage)
result = self.connection.execute(statement).fetchall()
if 0 == len(result):
return IDCollection()
sticker_ids = []
for row in result:
sticker_ids.append(row[self.sticker_placements.c.sticker_id])
stickers = self.search_stickers(filter={'sticker_ids': sticker_ids})
sticker_placements = IDCollection()
for row in result:
sticker_placement = StickerPlacement(
id=row[self.sticker_placements.c.id],
placement_time=row[self.sticker_placements.c.placement_time],
subject_id=row[self.sticker_placements.c.subject_id],
user_id=row[self.sticker_placements.c.user_id],
sticker_id=row[self.sticker_placements.c.sticker_id],
position_x=row[self.sticker_placements.c.position_x],
position_y=row[self.sticker_placements.c.position_y],
rotation=row[self.sticker_placements.c.rotation],
scale=row[self.sticker_placements.c.scale],
)
if sticker_placement.sticker_id_bytes in stickers:
sticker_placement.sticker = stickers.get(
sticker_placement.sticker_id_bytes
)
sticker_placements.add(sticker_placement)
return sticker_placements
# manipulate sticker placements
def place_sticker(self, **kwargs):
sticker_placement = StickerPlacement(**kwargs)
self.connection.execute(
self.sticker_placements.insert(),
id=sticker_placement.id_bytes,
placement_time=int(sticker_placement.placement_time),
subject_id=sticker_placement.subject_id_bytes,
user_id=sticker_placement.user_id_bytes,
sticker_id=sticker_placement.sticker_id_bytes,
position_x=float(sticker_placement.position_x),
position_y=float(sticker_placement.position_y),
rotation=float(sticker_placement.rotation),
scale=float(sticker_placement.scale),
)
return sticker_placement
def unplace_sticker(self, id):
id = get_id_bytes(id)
self.connection.execute(
self.sticker_placements.delete().where(
self.sticker_placements.c.id == id
)
)
#TODO tests
def prune_user_sticker_placements(self, subject_id, user_id, maximum_stickers):
try:
subject_id = get_id_bytes(subject_id)
#TODO narrow catch
except:
return
try:
user_id = get_id_bytes(user_id)
#TODO narrow catch
except:
return
placements = self.search_sticker_placements(
filter={
'subject_ids': subject_id,
'user_ids': user_id,
},
sort='placement_time',
order='desc',
)
conditions = []
i = 0
for placement in placements.values():
i += 1
if i < maximum_stickers:
continue
conditions.append(self.sticker_placements.c.id == placement.id_bytes)
if not conditions:
return
statement = self.sticker_placements.delete().where(
and_(
self.sticker_placements.c.subject_id == subject_id,
self.sticker_placements.c.user_id == user_id,
or_(*conditions),
)
)
self.connection.execute(statement)
#TODO tests
def unplace_by_user(self, user_id):
try:
user_id = get_id_bytes(user_id)
#TODO narrow catch
except:
return
self.connection.execute(
self.sticker_placements.delete().where(
self.sticker_placements.c.user_id == user_id
)
)
# unique categories
def get_unique_categories(self):
statement = self.stickers.select().with_only_columns(
[self.stickers.c.category]
).group_by(self.stickers.c.category)
result = self.engine.execute(statement).fetchall()
unique_categories = []
for row in result:
unique_categories.append(row[self.stickers.c.category])
return unique_categories
#TODO tests
def get_user_unique_sticker_placement_counts(self, user_id):
user_id = get_id_bytes(user_id)
statement = self.sticker_placements.select().where(
self.sticker_placements.c.user_id == user_id
).with_only_columns(
[
self.sticker_placements.c.sticker_id,
func.count(distinct(self.sticker_placements.c.subject_id)),
]
).group_by(
self.sticker_placements.c.sticker_id
)
result = self.connection.execute(statement).fetchall()
unique_sticker_placement_counts = {}
for row in result:
sticker_id, count = row
sticker_id, sticker_id_bytes = parse_id(sticker_id)
unique_sticker_placement_counts[sticker_id] = count
return unique_sticker_placement_counts
#TODO tests
def get_subject_sticker_placement_counts(self, subject_ids):
if list != type(subject_ids):
subject_ids = [subject_ids]
conditions = []
for subject_id in subject_ids:
subject_id, subject_id_bytes = parse_id(subject_id)
conditions.append(
self.sticker_placements.c.subject_id == subject_id_bytes
)
statement = self.sticker_placements.select().where(
or_(*conditions)
).with_only_columns(
[
self.sticker_placements.c.subject_id,
func.count(self.sticker_placements.c.id),
]
).group_by(
self.sticker_placements.c.subject_id
)
result = self.connection.execute(statement).fetchall()
subject_sticker_placement_counts = {}
for row in result:
subject_id, count = row
subject_id, subject_id_bytes = parse_id(subject_id)
subject_sticker_placement_counts[subject_id] = count
return subject_sticker_placement_counts
# anonymization
def anonymize_id(self, id, new_id=None):
id = get_id_bytes(id)
if not new_id:
new_id = uuid.uuid4().bytes
self.connection.execute(
self.collected_stickers.update().values(user_id=new_id).where(
self.collected_stickers.c.user_id == id,
)
)
self.connection.execute(
self.sticker_placements.update().values(user_id=new_id).where(
self.sticker_placements.c.user_id == id,
)
)
return new_id
| 27.208158
| 93
| 0.730887
| 18,566
| 0.959731
| 0
| 0
| 0
| 0
| 0
| 0
| 1,321
| 0.068286
|
833402a878296c2dae40def1c9fff8397df42c38
| 3,035
|
py
|
Python
|
include/MPE3.py
|
jhgalino/MPv2
|
2f5e29d67bccc4538c5aaad2e69e817041414199
|
[
"MIT"
] | null | null | null |
include/MPE3.py
|
jhgalino/MPv2
|
2f5e29d67bccc4538c5aaad2e69e817041414199
|
[
"MIT"
] | null | null | null |
include/MPE3.py
|
jhgalino/MPv2
|
2f5e29d67bccc4538c5aaad2e69e817041414199
|
[
"MIT"
] | null | null | null |
def differentiate(fxn: str) -> str:
if fxn == "x":
return "1"
dividedFxn = getFirstLevel(fxn)
coeffOrTrig: str = dividedFxn[0]
exponent: str = dividedFxn[2]
insideParentheses: str = dividedFxn[1]
if coeffOrTrig.isalpha():
ans = computeTrig(coeffOrTrig, insideParentheses)
ans = ans + "*" + differentiate(insideParentheses)
if ans.endswith("*1"):
ans = list(ans)
ans.pop()
ans.pop()
ans = "".join(ans)
return ans
if len(exponent) != 0:
if len(coeffOrTrig) != 0 and coeffOrTrig.isnumeric():
ans = computeExpWithCoeff(coeffOrTrig, insideParentheses, exponent)
ans = ans + "*" + differentiate(insideParentheses)
ans = ans.replace("^1", "")
if ans.endswith("*1"):
ans = list(ans)
ans.pop()
ans.pop()
ans = "".join(ans)
return ans
else:
ans = computeExpWithoutCoeff(insideParentheses, exponent)
ans = ans + "*" + differentiate(insideParentheses)
ans = ans.replace("^1", "")
if ans.endswith("*1"):
ans = list(ans)
ans.pop()
ans.pop()
ans = "".join(ans)
return ans
if len(coeffOrTrig) == 0 and len(exponent) == 0:
ans = "1" + "*" + differentiate(insideParentheses)
ans = ans.replace("^1", "")
if ans.endswith("*1"):
ans = list(ans)
ans.pop()
ans.pop()
ans = "".join(ans)
return ans
def getFirstLevel(function: str) -> list:
indexOfOpen = function.find("(")
indexOfClose = function.rfind(")")
function = list(function)
function[indexOfOpen] = "|"
function[indexOfClose] = "|"
function = "".join(function)
assert function.count("|") == 2, "| != 2" # assert division by 2
return function.split("|")
def computeTrig(trig: str, inside: str) -> str:
if trig == "sin":
return "(cos({}))".format(inside)
elif trig == "cos":
return "(-sin({}))".format(inside)
elif trig == "tan":
return "(sec({})^2)".format(inside)
if trig == "sec":
return "(sec({})tan({}))".format(inside, inside)
if trig == "csc":
return "(-csc({})cot({}))".format(inside, inside)
if trig == "cot":
return "(-csc({})^2)".format(inside)
def computeExpWithCoeff(coeff: str, inside: str, exp: str) -> str:
cf = int(coeff)
expnt = int(exp.replace("^", ""))
cf = cf * expnt
expnt -= 1
return "{}({})^{}".format(cf, inside, expnt)
def computeExpWithoutCoeff(inside: str, exp: str) -> str:
expnt = int(exp.replace("^", ""))
cf = int(exp.replace("^", ""))
expnt -= 1
return "{}({})^{}".format(cf, inside, expnt)
OTHER_RECURSIVE_FUNCTIONS = [
"getFirstLevel",
"computeTrig",
"computeExpWithCoeff",
"computeExpWithoutCoeff",
]
print(differentiate("3(x)^3"))
| 30.35
| 79
| 0.524547
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 348
| 0.114662
|
8334d38451b05f8a06133e98e01f204b3df51a55
| 3,072
|
py
|
Python
|
obsolete_object_wise_scoring_ben.py
|
agk2000/catalyst_project
|
6bae324f24d6d6382e84dcf1f2fedf0d896371e1
|
[
"MIT"
] | null | null | null |
obsolete_object_wise_scoring_ben.py
|
agk2000/catalyst_project
|
6bae324f24d6d6382e84dcf1f2fedf0d896371e1
|
[
"MIT"
] | null | null | null |
obsolete_object_wise_scoring_ben.py
|
agk2000/catalyst_project
|
6bae324f24d6d6382e84dcf1f2fedf0d896371e1
|
[
"MIT"
] | 1
|
2021-09-11T14:55:26.000Z
|
2021-09-11T14:55:26.000Z
|
import sys
from mrs_utils import misc_utils, vis_utils
from mrs_utils import eval_utils
import os
from skimage import io
import numpy as np
import matplotlib.pyplot as plt
# Creat object scorer class
osc = eval_utils.ObjectScorer(min_th=0.5, link_r=20, eps=2)
# Define the source
data_dir = '/scratch/sr365/Catalyst_data/2021_03_21_15_C_90/H3_raw'
conf_dir = '/scratch/sr365/Catalyst_data/2021_03_21_15_C_90/save_root/H3_img_H2_model'
save_name = 'H3_img_H2_model'
def get_conf_true_from_img(lbl_file, conf_file):
"""
The function to get the p r curve (object-wise) from a labelled photo and the
"""
lbl_img, conf_img = misc_utils.load_file(lbl_file)[:,:,0]/255, misc_utils.load_file(conf_file)
# Group objects
lbl_groups = osc.get_object_groups(lbl_img)
conf_groups = osc.get_object_groups(conf_img)
lbl_group_img = eval_utils.display_group(lbl_groups, lbl_img.shape[:2], need_return=True)
conf_group_img = eval_utils.display_group(conf_groups, conf_img.shape[:2], need_return=True)
# Score the conf map
conf_list, true_list = eval_utils.score(conf_img, lbl_img, min_th=0.5, link_r=10, iou_th=0.5)
return conf_list, true_list
def plot_PR_curve(conf_list, true_list, save_name='PR_curve'):
"""
The function to plot PR curve from a list of confidence and true list
"""
ap, p, r, _ = eval_utils.get_precision_recall(conf_list, true_list)
plt.plot(r[1:], p[1:])
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('recall')
plt.ylabel('precision')
plt.title('AP={:.2f}'.format(ap))
plt.tight_layout()
plt.savefig('../PR_curves/' + save_name + '.png')
if __name__ == '__main__':
large_conf_list, large_true_list = [], []
for file in os.listdir(conf_dir):
print("processing file: ", file)
if not file.endswith('_conf.png'):
continue
# get the file names
conf_file = os.path.join(conf_dir, file)
lbl_file = os.path.join(data_dir, file.replace('_conf',''))
# get the conf_list and true list
conf_list, true_list = get_conf_true_from_img(lbl_file, conf_file)
if len(conf_list) == 0 or len(true_list) == 0:
print("Either you don't have a true file or a ground truth", file)
continue
print("conf_list shape:", np.shape(conf_list))
print("true_list shape:", np.shape(true_list))
print("large conf list shape:", np.shape(large_conf_list))
print("large true list shape:", np.shape(large_true_list))
if len(large_conf_list) == 0:
large_conf_list = conf_list
large_true_list = true_list
else:
large_conf_list = np.concatenate((large_conf_list, conf_list), axis=0)
large_true_list = np.concatenate((large_true_list, true_list), axis=0)
np.save('../PR_curves/conf_list.npy', large_conf_list)
np.save('../PR_curves/true_list.npy', large_true_list)
plot_PR_curve(np.reshape(large_conf_list, [-1,]), np.reshape(large_true_list, [-1,]), save_name = save_name)
| 37.925926
| 112
| 0.682617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 764
| 0.248698
|
8335f3aa44031d6db4debfb0403cae80df9a5fe1
| 28,012
|
py
|
Python
|
compare.py
|
dreamersnme/future
|
87462ea1ef2dfd056e26ede85448af160df7d2ac
|
[
"MIT"
] | 86
|
2019-03-24T16:53:12.000Z
|
2022-02-25T11:48:57.000Z
|
compare.py
|
dreamersnme/future
|
87462ea1ef2dfd056e26ede85448af160df7d2ac
|
[
"MIT"
] | 1
|
2020-11-15T16:36:54.000Z
|
2020-11-15T16:36:54.000Z
|
compare.py
|
dreamersnme/future
|
87462ea1ef2dfd056e26ede85448af160df7d2ac
|
[
"MIT"
] | 33
|
2019-03-22T00:26:20.000Z
|
2022-03-25T02:56:17.000Z
|
# --------------------------- IMPORT LIBRARIES -------------------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
import data_preprocessing as dp
from sklearn.preprocessing import MinMaxScaler
import keras
from keras.models import Sequential
from keras.layers.recurrent import LSTM
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.models import load_model
from keras.layers import Dense, Dropout
# ------------------------- GLOBAL PARAMETERS -------------------------
# Start and end period of historical data in question
START_TRAIN = datetime(2008, 12, 31)
END_TRAIN = datetime(2017, 2, 12)
START_TEST = datetime(2017, 2, 12)
END_TEST = datetime(2019, 2, 22)
STARTING_ACC_BALANCE = 100000
NUMBER_NON_CORR_STOCKS = 5
# Number of times of no-improvement before training is stop.
PATIENCE = 30
# Pools of stocks to trade
DJI = ['MMM', 'AXP', 'AAPL', 'BA', 'CAT', 'CVX', 'CSCO', 'KO', 'DIS', 'XOM', 'GE', 'GS', 'HD', 'IBM', 'INTC', 'JNJ',
'JPM', 'MCD', 'MRK', 'MSFT', 'NKE', 'PFE', 'PG', 'UTX', 'UNH', 'VZ', 'WMT']
DJI_N = ['3M', 'American Express', 'Apple', 'Boeing', 'Caterpillar', 'Chevron', 'Cisco Systems', 'Coca-Cola', 'Disney'
, 'ExxonMobil', 'General Electric', 'Goldman Sachs', 'Home Depot', 'IBM', 'Intel', 'Johnson & Johnson',
'JPMorgan Chase', 'McDonalds', 'Merck', 'Microsoft', 'NIKE', 'Pfizer', 'Procter & Gamble',
'United Technologies', 'UnitedHealth Group', 'Verizon Communications', 'Wal Mart']
# Market and macroeconomic data to be used as context data
CONTEXT_DATA = ['^GSPC', '^DJI', '^IXIC', '^RUT', 'SPY', 'QQQ', '^VIX', 'GLD', '^TYX', '^TNX', 'SHY', 'SHV']
# --------------------------------- CLASSES ------------------------------------
class Trading:
def __init__(self, recovered_data_lstm, portfolio_stock_price, portfolio_stock_volume, test_set, non_corr_stocks):
self.test_set = test_set
self.ncs = non_corr_stocks
self.stock_price = portfolio_stock_price
self.stock_volume = portfolio_stock_volume
self.generate_signals(recovered_data_lstm)
def generate_signals(self, predicted_tomorrow_close):
"""
Generate trade signla from the prediction of the LSTM model
:param predicted_tomorrow_close:
:return:
"""
predicted_tomorrow_close.columns = self.stock_price.columns
predicted_next_day_returns = (predicted_tomorrow_close / predicted_tomorrow_close.shift(1) - 1).dropna()
next_day_returns = (self.stock_price / self.stock_price.shift(1) - 1).dropna()
signals = pd.DataFrame(index=predicted_tomorrow_close.index, columns=self.stock_price.columns)
for s in self.stock_price.columns:
for d in next_day_returns.index:
if predicted_tomorrow_close[s].loc[d] > self.stock_price[s].loc[d] and next_day_returns[s].loc[
d] > 0 and predicted_next_day_returns[s].loc[d] > 0:
signals[s].loc[d] = 2
elif predicted_tomorrow_close[s].loc[d] < self.stock_price[s].loc[d] and next_day_returns[s].loc[
d] < 0 and predicted_next_day_returns[s].loc[d] < 0:
signals[s].loc[d] = -2
elif predicted_tomorrow_close[s].loc[d] > self.stock_price[s].loc[d]:
signals[s].loc[d] = 2
elif next_day_returns[s].loc[d] > 0:
signals[s].loc[d] = 1
elif next_day_returns[s].loc[d] < 0:
signals[s].loc[d] = -1
elif predicted_next_day_returns[s].loc[d] > 0:
signals[s].loc[d] = 2
elif predicted_next_day_returns[s].loc[d] < 0:
signals[s].loc[d] = -1
else:
signals[s].loc[d] = 0
signals.loc[self.stock_price.index[0]] = [0, 0, 0, 0, 0]
self.signals = signals
def _sell(self, stock, sig, day):
"""
Perform and record sell transactions.
"""
# Get the index of the stock
idx = self.ncs.index(stock)
# Only need to sell the unit recommended by the trading agent, not necessarily all stock unit.
num_share = min(abs(int(sig)), self.state[idx + 1])
commission = dp.Trading.commission(num_share, self.stock_price.loc[day][stock])
# Calculate slipped price. Though, at max trading volume of 10 shares, there's hardly any slippage
transacted_price = dp.Trading.slippage_price(self.stock_price.loc[day][stock], -num_share,
self.stock_volume.loc[day][stock])
# If there is existing stock holding
if self.state[idx + 1] > 0:
# Only need to sell the unit recommended by the trading agent, not necessarily all stock unit.
# Update account balance after transaction
self.state[0] += (transacted_price * num_share) - commission
# Update stock holding
self.state[idx + 1] -= num_share
# Reset transacted buy price record to 0.0 if there is no more stock holding
if self.state[idx + 1] == 0.0:
self.buy_price[idx] = 0.0
else:
pass
def _buy(self, stock, sig, day):
"""
Perform and record buy transactions.
"""
idx = self.ncs.index(stock)
# Calculate the maximum possible number of stock unit the current cash can buy
available_unit = self.state[0] // self.stock_price.loc[day][stock]
num_share = min(available_unit, int(sig))
# Deduct the traded amount from account balance. If available balance is not enough to purchase stock unit
# recommended by trading agent's action, just use what is left.
commission = dp.Trading.commission(num_share, self.stock_price.loc[day][stock])
# Calculate slipped price. Though, at max trading volume of 10 shares, there's hardly any slippage
transacted_price = dp.Trading.slippage_price(self.stock_price.loc[day][stock], num_share,
self.stock_volume.loc[day][stock])
# Revise number of share to trade if account balance does not have enough
if (self.state[0] - commission) < transacted_price * num_share:
num_share = (self.state[0] - commission) // transacted_price
self.state[0] -= (transacted_price * num_share) + commission
# If there are existing stock holding already, calculate the average buy price
if self.state[idx + 2] > 0.0:
existing_unit = self.state[idx + 2]
previous_buy_price = self.buy_price[idx]
additional_unit = min(available_unit, int(sig))
new_holding = existing_unit + additional_unit
self.buy_price[idx] = ((existing_unit * previous_buy_price) + (
self.stock_price.loc[day][stock] * additional_unit)) / new_holding
# if there is no existing stock holding, simply record the current buy price
elif self.state[idx + 2] == 0.0:
self.buy_price[idx] = self.stock_price.loc[day][stock]
# Update stock holding at its index
self.state[idx + 1] += min(available_unit, int(sig))
def execute_trading(self, non_corr_stocks):
"""
This function performs long only trades for the LSTM model.
"""
# The money in the trading account
self.acc_balance = [STARTING_ACC_BALANCE]
self.total_asset = self.acc_balance
self.portfolio_asset = [0.0]
self.buy_price = np.zeros((1, len(non_corr_stocks))).flatten()
# Unrealized profit and loss
self.unrealized_pnl = [0.0]
# The value of all-stock holdings
self.portfolio_value = 0.0
# The state of the trading environment, defined by account balance, unrealized profit and loss, relevant
# stock technical data & current stock holdings
self.state = self.acc_balance + self.unrealized_pnl + [0 for i in range(len(non_corr_stocks))]
# Slide through the timeline
for d in self.test_set.index[:-1]:
signals = self.signals.loc[d]
# Get the stocks to be sold
sell_stocks = signals[signals < 0].sort_values(ascending=True)
# Get the stocks to be bought
buy_stocks = signals[signals > 0].sort_values(ascending=True)
for idx, sig in enumerate(sell_stocks):
self._sell(sell_stocks.index[idx], sig, d)
for idx, sig in enumerate(buy_stocks):
self._buy(buy_stocks.index[idx], sig, d)
self.unrealized_pnl = np.sum(np.array(self.stock_price.loc[d] - self.buy_price) * np.array(
self.state[2:]))
# Current state space
self.state = [self.state[0]] + [self.unrealized_pnl] + list(self.state[2:])
# Portfolio value is the current stock prices multiply with their respective holdings
portfolio_value = sum(np.array(self.stock_price.loc[d]) * np.array(self.state[2:]))
# Total asset = account balance + portfolio value
total_asset_ending = self.state[0] + portfolio_value
# Update account balance statement
self.acc_balance = np.append(self.acc_balance, self.state[0])
# Update portfolio value statement
self.portfolio_asset = np.append(self.portfolio_asset, portfolio_value)
# Update total asset statement
self.total_asset = np.append(self.total_asset, total_asset_ending)
trading_book = pd.DataFrame(index=self.test_set.index,
columns=["Cash balance", "Portfolio value", "Total asset", "Returns", "CumReturns"])
trading_book["Cash balance"] = self.acc_balance
trading_book["Portfolio value"] = self.portfolio_asset
trading_book["Total asset"] = self.total_asset
trading_book["Returns"] = trading_book["Total asset"] / trading_book["Total asset"].shift(1) - 1
trading_book["CumReturns"] = trading_book["Returns"].add(1).cumprod().fillna(1)
trading_book.to_csv('./test_result/trading_book_backtest.csv')
kpi = dp.MathCalc.calc_kpi(trading_book)
kpi.to_csv('./test_result/kpi_backtest.csv')
print("\n")
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print(
" KPI of RNN-LSTM modelled trading strategy for a portfolio of {} non-correlated stocks".format(
NUMBER_NON_CORR_STOCKS))
print(kpi)
print("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv")
return trading_book, kpi
class Data_ScaleSplit:
"""
This class preprosses data for the LSTM model.
"""
def __init__(self, X, selected_stocks_price, train_portion):
self.X = X
self.stock_price = selected_stocks_price
self.generate_labels()
self.scale_data()
self.split_data(train_portion)
def generate_labels(self):
"""
Generate label data for tomorrow's prediction.
"""
self.Y = self.stock_price.shift(-1)
self.Y.columns = [c + '_Y' for c in self.Y.columns]
def scale_data(self):
"""
Scale the X and Y data with minimax scaller.
The scaling is done separately for the train and test set to avoid look ahead bias.
"""
self.XY = pd.concat([self.X, self.Y], axis=1).dropna()
train_set = self.XY.loc[START_TRAIN:END_TRAIN]
test_set = self.XY.loc[START_TEST:END_TEST]
# MinMax scaling
minmaxed_scaler = MinMaxScaler(feature_range=(0, 1))
self.minmaxed = minmaxed_scaler.fit(train_set)
train_set_matrix = minmaxed_scaler.transform(train_set)
test_set_matrix = minmaxed_scaler.transform(test_set)
self.train_set_matrix_df = pd.DataFrame(train_set_matrix, index=train_set.index, columns=train_set.columns)
self.test_set_matrix_df = pd.DataFrame(test_set_matrix, index=test_set.index, columns=test_set.columns)
self.XY = pd.concat([self.train_set_matrix_df, self.test_set_matrix_df], axis=0)
# print ("Train set shape: ", train_set_matrix.shape)
# print ("Test set shape: ", test_set_matrix.shape)
def split_data(self, train_portion):
"""
Perform train test split with cut off date defined.
"""
df_values = self.XY.values
# split into train and test sets
train = df_values[:int(train_portion), :]
test = df_values[int(train_portion):, :]
# split into input and outputs
train_X, self.train_y = train[:, :-5], train[:, -5:]
test_X, self.test_y = test[:, :-5], test[:, -5:]
# reshape input to be 3D [samples, timesteps, features]
self.train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
self.test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print("\n")
print("Dataset shapes >")
print("Train feature data shape:", self.train_X.shape)
print("Train label data shape:", self.train_y.shape)
print("Test feature data shape:", self.test_X.shape)
print("Test label data shape:", self.test_y.shape)
def get_prediction(self, model_lstm):
"""
Get the model prediction, inverse transform scaling to get back to original price and
reassemble the full XY dataframe.
"""
# Get the model to predict test_y
predicted_y_lstm = model_lstm.predict(self.test_X, batch_size=None, verbose=0, steps=None)
# Get the model to generate train_y
trained_y_lstm = model_lstm.predict(self.train_X, batch_size=None, verbose=0, steps=None)
# combine the model generated train_y and test_y to create the full_y
y_lstm = pd.DataFrame(data=np.vstack((trained_y_lstm, predicted_y_lstm)),
columns=[c + '_LSTM' for c in self.XY.columns[-5:]], index=self.XY.index)
# Combine the original full length y with model generated y
lstm_y_df = pd.concat([self.XY[self.XY.columns[-5:]], y_lstm], axis=1)
# Get the full length XY data with the length of model generated y
lstm_df = self.XY.loc[lstm_y_df.index]
# Replace the full length XY data's Y with the model generated Y
lstm_df[lstm_df.columns[-5:]] = lstm_y_df[lstm_y_df.columns[-5:]]
# Inverse transform it to get back the original data, the model generated y would be transformed to reveal its true predicted value
recovered_data_lstm = self.minmaxed.inverse_transform(lstm_df)
# Create a dataframe from it
self.recovered_data_lstm = pd.DataFrame(data=recovered_data_lstm, columns=self.XY.columns, index=lstm_df.index)
return self.recovered_data_lstm
def get_train_test_set(self):
"""
Get the split X and y data.
"""
return self.train_X, self.train_y, self.test_X, self.test_y
def get_all_data(self):
"""
Get the full XY data and the original stock price.
"""
return self.XY, self.stock_price
class Model:
"""
This class contains all the functions required to build a LSTM or LSTM-CNN model
It also offer an option to load a pre-built model.
"""
@staticmethod
def train_model(model, train_X, train_y, model_type):
"""
Try to load a pre-built model.
Otherwise fit a new mode with the training data. Once training is done, save the model.
"""
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=PATIENCE)
if model_type == "LSTM":
batch_size = 4
mc = ModelCheckpoint('./model/best_lstm_model.h5', monitor='val_loss', save_weights_only=False,
mode='min', verbose=1, save_best_only=True)
try:
model = load_model('./model/best_lstm_model.h5')
print("\n")
print("Loading pre-saved model ...")
except:
print("\n")
print("No pre-saved model, training new model.")
pass
elif model_type == "CNN":
batch_size = 8
mc = ModelCheckpoint('./model/best_cnn_model.h5'.format(symbol), monitor='val_loss', save_weights_only=False,
mode='min', verbose=1, save_best_only=True)
try:
model = load_model('./model/best_cnn_model.h5')
print("\n")
print("Loading pre-saved model ...")
except:
print("\n")
print("No pre-saved model, training new model.")
pass
# fit network
history = model.fit(
train_X,
train_y,
epochs=500,
batch_size=batch_size,
validation_split=0.2,
verbose=2,
shuffle=True,
# callbacks=[es, mc, tb, LearningRateTracker()])
callbacks=[es, mc])
if model_type == "LSTM":
model.save('./model/best_lstm_model.h5')
elif model_type == "CNN":
model.save('./model/best_cnn_model.h5')
return history, model
@staticmethod
def plot_training(history,nn):
"""
Plot the historical training loss.
"""
# plot history
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.title('Training loss history for {} model'.format(nn))
plt.savefig('./train_result/training_loss_history_{}.png'.format(nn))
plt.show()
@staticmethod
def build_rnn_model(train_X):
"""
Build the RNN model architecture.
"""
# design network
print("\n")
print("RNN LSTM model architecture >")
model = Sequential()
model.add(LSTM(128, kernel_initializer='random_uniform',
bias_initializer='zeros', return_sequences=True,
recurrent_dropout=0.2,
input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dropout(0.5))
model.add(LSTM(64, kernel_initializer='random_uniform',
return_sequences=True,
# bias_regularizer=regularizers.l2(0.01),
# kernel_regularizer=regularizers.l1_l2(l1=0.01,l2=0.01),
# activity_regularizer=regularizers.l2(0.01),
bias_initializer='zeros'))
model.add(Dropout(0.5))
model.add(LSTM(64, kernel_initializer='random_uniform',
# bias_regularizer=regularizers.l2(0.01),
# kernel_regularizer=regularizers.l1_l2(l1=0.01,l2=0.01),
# activity_regularizer=regularizers.l2(0.01),
bias_initializer='zeros'))
model.add(Dropout(0.5))
model.add(Dense(5))
# optimizer = keras.optimizers.RMSprop(lr=0.25, rho=0.9, epsilon=1e-0)
# optimizer = keras.optimizers.Adagrad(lr=0.0001, epsilon=1e-08, decay=0.00002)
# optimizer = keras.optimizers.Adam(lr=0.0001)
# optimizer = keras.optimizers.Nadam(lr=0.0002, beta_1=0.9, beta_2=0.999, schedule_decay=0.004)
# optimizer = keras.optimizers.Adamax(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0)
optimizer = keras.optimizers.Adadelta(lr=0.2, rho=0.95, epsilon=None, decay=0.00001)
model.compile(loss='mae', optimizer=optimizer, metrics=['mse', 'mae'])
model.summary()
print("\n")
return model
# ------------------------------ Main Program ---------------------------------
def main():
print("\n")
print("######################### This program compare performance of trading strategies ############################")
print("\n")
print( "1. Simple Buy and hold strategy of a portfolio with {} non-correlated stocks".format(NUMBER_NON_CORR_STOCKS))
print( "2. Sharpe ratio optimized portfolio of {} non-correlated stocks".format(NUMBER_NON_CORR_STOCKS))
print( "3. Minimum variance optimized portfolio of {} non-correlated stocks".format(NUMBER_NON_CORR_STOCKS))
print( "4. Simple Buy and hold strategy ")
print( "1. Simple Buy and hold strategy ")
print("\n")
print("Starting to pre-process data for trading environment construction ... ")
# Data Preprocessing
dataset = dp.DataRetrieval()
dow_stocks_train, dow_stocks_test = dataset.get_all()
train_portion = len(dow_stocks_train)
dow_stock_volume = dataset.components_df_v[DJI]
portfolios = dp.Trading(dow_stocks_train, dow_stocks_test, dow_stock_volume.loc[START_TEST:END_TEST])
_, _, non_corr_stocks = portfolios.find_non_correlate_stocks(NUMBER_NON_CORR_STOCKS)
non_corr_stocks_data = dataset.get_adj_close(non_corr_stocks)
print("\n")
print("Base on non-correlation preference, {} stocks are selected for portfolio construction:".format(NUMBER_NON_CORR_STOCKS))
for stock in non_corr_stocks:
print(DJI_N[DJI.index(stock)])
print("\n")
sharpe_portfolio, min_variance_portfolio = portfolios.find_efficient_frontier(non_corr_stocks_data, non_corr_stocks)
print("Risk-averse portfolio with low variance:")
print(min_variance_portfolio.T)
print("High return portfolio with high Sharpe ratio")
print(sharpe_portfolio.T)
dow_stocks = pd.concat([dow_stocks_train, dow_stocks_test], axis=0)
test_values_buyhold, test_returns_buyhold, test_kpi_buyhold = \
portfolios.diversified_trade(non_corr_stocks, dow_stocks.loc[START_TEST:END_TEST][non_corr_stocks])
print("\n")
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print(" KPI of a simple buy and hold strategy for a portfolio of {} non-correlated stocks".format(NUMBER_NON_CORR_STOCKS))
print("------------------------------------------------------------------------------------")
print(test_kpi_buyhold)
print("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv")
test_values_sharpe_optimized_buyhold, test_returns_sharpe_optimized_buyhold, test_kpi_sharpe_optimized_buyhold =\
portfolios.optimized_diversified_trade(non_corr_stocks, sharpe_portfolio, dow_stocks.loc[START_TEST:END_TEST][non_corr_stocks])
print("\n")
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print(" KPI of a simple buy and hold strategy for a Sharpe ratio optimized portfolio of {} non-correlated stocks".format(NUMBER_NON_CORR_STOCKS))
print("------------------------------------------------------------------------------------")
print(test_kpi_sharpe_optimized_buyhold)
print("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv")
test_values_minvar_optimized_buyhold, test_returns_minvar_optimized_buyhold, test_kpi_minvar_optimized_buyhold = \
portfolios.optimized_diversified_trade(non_corr_stocks, min_variance_portfolio, dow_stocks.loc[START_TEST:END_TEST][non_corr_stocks])
print("\n")
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print(" KPI of a simple buy and hold strategy for a Minimum variance optimized portfolio of {} non-correlated stocks".format(NUMBER_NON_CORR_STOCKS))
print("------------------------------------------------------------------------------------")
print(test_kpi_minvar_optimized_buyhold)
print("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv")
plot = dp.UserDisplay()
test_returns = dp.MathCalc.assemble_returns(test_returns_buyhold['Returns'],
test_returns_sharpe_optimized_buyhold['Returns'],
test_returns_minvar_optimized_buyhold['Returns'])
test_cum_returns = dp.MathCalc.assemble_cum_returns(test_returns_buyhold['CumReturns'],
test_returns_sharpe_optimized_buyhold['CumReturns'],
test_returns_minvar_optimized_buyhold['CumReturns'])
print("\n")
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print("Buy and hold strategies computation completed. Now creating prediction model using RNN LSTM architecture")
print("--------------------------------------------------------------------------------------------------------")
print("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv")
# Use feature data preprocessed by StartTrader, so that they both use the same training data, to have a fair comparison
input_states = pd.read_csv("./data/ddpg_input_states.csv", index_col='Date', parse_dates=True)
scale_split = Data_ScaleSplit(input_states, dow_stocks[non_corr_stocks], train_portion)
train_X, train_y, test_X, test_y = scale_split.get_train_test_set()
modelling = Model
model_lstm = modelling.build_rnn_model(train_X)
history_lstm, model_lstm = modelling.train_model(model_lstm, train_X, train_y, "LSTM")
print("RNN model loaded, now training the model again, training will stop after {} episodes no improvement")
modelling.plot_training(history_lstm, "LSTM")
print("Training completed, loading prediction using the trained RNN model >")
recovered_data_lstm = scale_split.get_prediction(model_lstm)
plot.plot_prediction(dow_stocks[non_corr_stocks].loc[recovered_data_lstm.index], recovered_data_lstm[recovered_data_lstm.columns[-5:]] , len(train_X), "LSTM")
# Get the original stock price with the prediction length
original_portfolio_stock_price = dow_stocks[non_corr_stocks].loc[recovered_data_lstm.index]
# Get the predicted stock price with the prediction length
predicted_portfolio_stock_price = recovered_data_lstm[recovered_data_lstm.columns[-5:]]
print("Bactesting the RNN-LSTM model now")
# Run backtest, the backtester is similar to those use by StarTrader too
backtest = Trading(predicted_portfolio_stock_price, original_portfolio_stock_price, dow_stock_volume[non_corr_stocks].loc[recovered_data_lstm.index], dow_stocks_test[non_corr_stocks], non_corr_stocks)
trading_book, kpi = backtest.execute_trading(non_corr_stocks)
# Load backtest result for StarTrader using DDPG as learning algorithm
ddpg_backtest = pd.read_csv('./test_result/trading_book_test_1.csv', index_col='Unnamed: 0', parse_dates=True)
print("Backtesting completed, plotting comparison of trading models")
# Compare performance on all 4 trading type
djia_daily = dataset._get_daily_data(CONTEXT_DATA[1]).loc[START_TEST:END_TEST]['Close']
#print(djia_daily)
all_benchmark_returns = test_returns
all_benchmark_returns['DJIA'] = dp.MathCalc.calc_return(djia_daily)
all_benchmark_returns['RNN LSTM'] = trading_book['Returns']
all_benchmark_returns['DDPG'] = ddpg_backtest['Returns']
all_benchmark_returns.to_csv('./test_result/all_strategies_returns.csv')
plot.plot_portfolio_risk(all_benchmark_returns)
all_benchmark_cum_returns = test_cum_returns
all_benchmark_cum_returns['DJIA'] = all_benchmark_returns['DJIA'].add(1).cumprod().fillna(1)
all_benchmark_cum_returns['RNN LSTM'] = trading_book['CumReturns']
all_benchmark_cum_returns['DDPG'] = ddpg_backtest['CumReturns']
all_benchmark_cum_returns.to_csv('./test_result/all_strategies_cum_returns.csv')
plot.plot_portfolio_return(all_benchmark_cum_returns)
if __name__ == '__main__':
main()
| 50.021429
| 205
| 0.63023
| 18,078
| 0.645366
| 0
| 0
| 4,380
| 0.156362
| 0
| 0
| 10,130
| 0.361631
|
83363c0ef913ccccece0efe1dc580e5eb1715e0d
| 239
|
py
|
Python
|
veinmind-backdoor/register.py
|
Jqqzzz/veinmind-tools
|
d7d35880efb4f5f5ad4c3f4685f5d0f4ec8e404f
|
[
"MIT"
] | 364
|
2022-02-09T07:05:00.000Z
|
2022-03-31T15:12:52.000Z
|
veinmind-backdoor/register.py
|
lionkgxu/veinmind-tools
|
415aae9da5f0e31275ecdf61a2cef088c766d381
|
[
"MIT"
] | 9
|
2022-03-03T01:02:15.000Z
|
2022-03-28T03:24:30.000Z
|
veinmind-backdoor/register.py
|
lionkgxu/veinmind-tools
|
415aae9da5f0e31275ecdf61a2cef088c766d381
|
[
"MIT"
] | 62
|
2022-02-10T09:54:15.000Z
|
2022-03-31T09:43:00.000Z
|
class register:
plugin_dict = {}
plugin_name = []
@classmethod
def register(cls, plugin_name):
def wrapper(plugin):
cls.plugin_dict[plugin_name] = plugin
return plugin
return wrapper
| 23.9
| 49
| 0.598326
| 239
| 1
| 0
| 0
| 176
| 0.736402
| 0
| 0
| 0
| 0
|
83383133f1e2636bee0ef87328b2ad1c26e323fd
| 1,288
|
py
|
Python
|
Desafio horario atual/__init__.py
|
pinheirogus/Curso-Python-Udemy
|
d6d52320426172e924081b9df619490baa8c6016
|
[
"MIT"
] | 1
|
2021-09-01T01:58:13.000Z
|
2021-09-01T01:58:13.000Z
|
Desafio horario atual/__init__.py
|
pinheirogus/Curso-Python-Udemy
|
d6d52320426172e924081b9df619490baa8c6016
|
[
"MIT"
] | null | null | null |
Desafio horario atual/__init__.py
|
pinheirogus/Curso-Python-Udemy
|
d6d52320426172e924081b9df619490baa8c6016
|
[
"MIT"
] | null | null | null |
# num1 = input("Digite um número inteiro: ")
#
#
# try:
#
# if num1.isnumeric() :
# num1 = int(num1)
# if (num1 % 2) == 0 :
# print("Você digitou um número par.")
# elif (num1 % 2) != 0:
# print("Você digitou um número ímpar.")
# else:
# print("Você não digitou um número válido.")
# else:
# print("Você não digitou um número inteiro.")
# except:
# print("Você não digitou um número.")
###################################################################################################################################
#hora_atual = input("Qual o horário atual? ")
###################################################################################################################################
nome = input("Por favor, digite seu primeiro nome: ")
try:
if nome.isnumeric():
print("Você não digitou um nome válido.")
else:
if len(nome) <= 4:
print("Seu nome é curto.")
elif (len(nome) == 5) or (len(nome) == 6):
print("Seu nome é normal.")
elif len(nome) > 6:
print("Seu nome é muito grande.")
else:
print("Você não digitou um nome válido.1")
except:
print("Você não digitou um nome válido.")
| 30.666667
| 131
| 0.420807
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,000
| 0.759301
|
8338456e9d4d6099460e1bd2a49c5b5cf56d90a9
| 223
|
py
|
Python
|
05/b_average.py
|
koshin117/python-learning
|
68dd99e2f72fff7507a874c11511415fef3c9354
|
[
"MIT"
] | 1
|
2021-03-29T08:30:19.000Z
|
2021-03-29T08:30:19.000Z
|
05/b_average.py
|
koshin117/python-learning
|
68dd99e2f72fff7507a874c11511415fef3c9354
|
[
"MIT"
] | null | null | null |
05/b_average.py
|
koshin117/python-learning
|
68dd99e2f72fff7507a874c11511415fef3c9354
|
[
"MIT"
] | null | null | null |
#B
def average(As :list) -> float:
return float(sum(As)/len(As))
def main():
# input
As = list(map(int, input().split()))
# compute
# output
print(average(As))
if __name__ == '__main__':
main()
| 17.153846
| 40
| 0.565022
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 36
| 0.161435
|
8338723c7e22b26ca6c647d1d2092f73e2a758fb
| 3,224
|
py
|
Python
|
tests/test_js.py
|
tinachou28/dataIO-project
|
cc8592edf5a2f03ba3cebcbc83b13764729ad839
|
[
"MIT"
] | 7
|
2016-04-23T03:33:42.000Z
|
2019-01-02T01:02:44.000Z
|
tests/test_js.py
|
tinachou28/dataIO-project
|
cc8592edf5a2f03ba3cebcbc83b13764729ad839
|
[
"MIT"
] | 2
|
2018-05-22T07:08:13.000Z
|
2019-05-14T19:39:16.000Z
|
tests/test_js.py
|
tinachou28/dataIO-project
|
cc8592edf5a2f03ba3cebcbc83b13764729ad839
|
[
"MIT"
] | 4
|
2017-08-19T16:05:34.000Z
|
2020-12-08T10:43:11.000Z
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import warnings
import pytest
from pytest import approx
import os
from os.path import join
from datetime import datetime
from dataIO import py23
from dataIO import js
from dataIO import textfile
path_json = os.path.abspath("test.json")
path_gz = os.path.abspath("test.json.gz")
data_simple = {
"int": 100,
"float": 3.1415926535,
"str": "string 字符串",
"boolean": True,
}
data_complex = {
"int": 100,
"float": 3.1415926535,
"str": "string 字符串",
"bytes": "bytes 比特串".encode("utf-8"),
"boolean": True,
"datetime": datetime.now(),
}
def test_is_json_file():
assert js.is_json_file("test.json") is True
assert js.is_json_file("test.JSON") is True
assert js.is_json_file("test.js") is True
assert js.is_json_file("test.JS") is True
assert js.is_json_file("test.json.tmp") is True
assert js.is_json_file("test.js.tmp") is True
assert js.is_json_file("test.gz") is False
assert js.is_json_file("test.GZ") is False
assert js.is_json_file("test.gz.tmp") is False
with pytest.raises(js.JsonExtError) as exc_info:
js.is_json_file("test.txt")
def test_prevent_overwrite(tmpdir):
"""Test whether file overwrite alert is working.
"""
textfile.write("hello", path_json)
js.dump([1, 2, 3], path_json)
os.remove(path_json)
def test_float_precision():
"""Test whether ``float_precision`` keywork is working.
"""
js.safe_dump({"value": 1.23456789}, path_json, indent_format=False,
float_precision=2, enable_verbose=False)
try:
assert js.load(path_json, enable_verbose=False)[
"value"] == approx(1.23)
except:
warnings.warn("float_precision argument is not working.")
os.remove(path_json)
def test_compress():
"""Test whether data compression is working.
"""
js.safe_dump({"value": 1}, path_gz, enable_verbose=False)
assert js.load(path_gz, enable_verbose=False) == {"value": 1}
os.remove(path_gz)
try:
from bson import json_util
def test_bytes_and_datetime():
js.safe_dump(data_complex, path_json, ensure_ascii=True, enable_verbose=False)
d = js.load(path_json, enable_verbose=False)
assert d["int"] == data_complex["int"]
assert d["float"] == data_complex["float"]
assert d["str"] == data_complex["str"]
assert d["boolean"] == data_complex["boolean"]
if py23.is_py3:
assert d["bytes"].decode("utf-8") == "bytes 比特串"
dt1 = d["datetime"]
dt2 = data_complex["datetime"]
assert dt1.date() == dt2.date()
assert dt1.hour == dt2.hour
assert dt1.minute == dt2.minute
assert dt1.second == dt2.second
assert abs(dt1.microsecond - dt2.microsecond) <= 1000
os.remove(path_json)
except:
pass
def test_pretty_dumps():
data = {"id": 1,
"path": r"C:\用户\麦克\\",
"create_time": datetime.now(),}
s = js.pretty_dumps(data)
assert r"C:\\用户\\麦克\\\\" in s
if __name__ == "__main__":
import os
pytest.main([os.path.basename(__file__), "--tb=native", "-s", ])
| 26.644628
| 86
| 0.638337
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 755
| 0.231311
|
8338c6c065505edebe32c2e1b457eb24e32e6163
| 34,731
|
py
|
Python
|
remerkleable/complex.py
|
hwwhww/remerkleable
|
b52dce6b0beae7fffbb826fb9945dca9c40504fd
|
[
"MIT"
] | 1
|
2020-07-22T14:51:20.000Z
|
2020-07-22T14:51:20.000Z
|
remerkleable/complex.py
|
hwwhww/remerkleable
|
b52dce6b0beae7fffbb826fb9945dca9c40504fd
|
[
"MIT"
] | null | null | null |
remerkleable/complex.py
|
hwwhww/remerkleable
|
b52dce6b0beae7fffbb826fb9945dca9c40504fd
|
[
"MIT"
] | null | null | null |
from typing import NamedTuple, cast, List as PyList, Dict, Any, BinaryIO, Optional, TypeVar, Type, Protocol, \
runtime_checkable
from types import GeneratorType
from textwrap import indent
from collections.abc import Sequence as ColSequence
from itertools import chain
import io
from remerkleable.core import View, BasicView, OFFSET_BYTE_LENGTH, ViewHook, ObjType, ObjParseException
from remerkleable.basic import uint256, uint8, uint32
from remerkleable.tree import Node, subtree_fill_to_length, subtree_fill_to_contents,\
zero_node, Gindex, PairNode, to_gindex, NavigationError, get_depth
from remerkleable.subtree import SubtreeView
from remerkleable.readonly_iters import PackedIter, ComplexElemIter, ComplexFreshElemIter, ContainerElemIter
V = TypeVar('V', bound=View)
def decode_offset(stream: BinaryIO) -> uint32:
return cast(uint32, uint32.deserialize(stream, OFFSET_BYTE_LENGTH))
def encode_offset(stream: BinaryIO, offset: int):
return uint32(offset).serialize(stream)
class ComplexView(SubtreeView):
def encode_bytes(self) -> bytes:
stream = io.BytesIO()
self.serialize(stream)
stream.seek(0)
return stream.read()
@classmethod
def decode_bytes(cls: Type[V], bytez: bytes) -> V:
stream = io.BytesIO()
stream.write(bytez)
stream.seek(0)
return cls.deserialize(stream, len(bytez))
M = TypeVar('M', bound="MonoSubtreeView")
class MonoSubtreeView(ColSequence, ComplexView):
def length(self) -> int:
raise NotImplementedError
@classmethod
def coerce_view(cls: Type[M], v: Any) -> M:
return cls(*v)
@classmethod
def element_cls(cls) -> Type[View]:
raise NotImplementedError
@classmethod
def item_elem_cls(cls, i: int) -> Type[View]:
return cls.element_cls()
@classmethod
def to_chunk_length(cls, elems_length: int) -> int:
if cls.is_packed():
elem_type: Type[View] = cls.element_cls()
if issubclass(elem_type, BasicView):
elems_per_chunk = 32 // elem_type.type_byte_length()
return (elems_length + elems_per_chunk - 1) // elems_per_chunk
else:
raise Exception("cannot append a packed element that is not a basic type")
else:
return elems_length
@classmethod
def views_into_chunks(cls, views: PyList[View]) -> PyList[Node]:
if cls.is_packed():
elem_type: Type[View] = cls.element_cls()
if issubclass(elem_type, BasicView):
# cast the list as a whole, checking each element takes too long.
return elem_type.pack_views(cast(PyList[BasicView], views))
else:
raise Exception("cannot append a packed element that is not a basic type")
else:
return [v.get_backing() for v in views]
@classmethod
def is_valid_count(cls, count: int) -> bool:
raise NotImplementedError
def __iter__(self):
return iter(self.get(i) for i in range(self.length()))
def readonly_iter(self):
tree_depth = self.tree_depth()
length = self.length()
backing = self.get_backing()
elem_type: Type[View] = self.element_cls()
if self.is_packed():
return PackedIter(backing, tree_depth, length, cast(Type[BasicView], elem_type))
else:
if issubclass(elem_type, bytes): # is the element type the raw-bytes? Then not re-use views.
return ComplexFreshElemIter(backing, tree_depth, length, cast(Type[View], elem_type))
else:
return ComplexElemIter(backing, tree_depth, length, elem_type)
@classmethod
def deserialize(cls: Type[M], stream: BinaryIO, scope: int) -> M:
elem_cls = cls.element_cls()
if elem_cls.is_fixed_byte_length():
elem_byte_length = elem_cls.type_byte_length()
if scope % elem_byte_length != 0:
raise Exception(f"scope {scope} does not match element byte length {elem_byte_length} multiple")
count = scope // elem_byte_length
if not cls.is_valid_count(count):
raise Exception(f"count {count} is invalid")
return cls(elem_cls.deserialize(stream, elem_byte_length) for _ in range(count)) # type: ignore
else:
if scope == 0:
if not cls.is_valid_count(0):
raise Exception("scope cannot be 0, count must not be 0")
return cls()
first_offset = decode_offset(stream)
if first_offset > scope:
raise Exception(f"first offset is too big: {first_offset}, scope: {scope}")
if first_offset % OFFSET_BYTE_LENGTH != 0:
raise Exception(f"first offset {first_offset} is not a multiple of offset length {OFFSET_BYTE_LENGTH}")
count = first_offset // OFFSET_BYTE_LENGTH
if not cls.is_valid_count(count):
raise Exception(f"count {count} is invalid")
# count - 1: we already have the first offset
offsets = [first_offset] + [decode_offset(stream) for _ in range(count - 1)] + [uint32(scope)]
elem_min, elem_max = elem_cls.min_byte_length(), elem_cls.max_byte_length()
elems = []
for i in range(count):
start, end = offsets[i], offsets[i+1]
if end < start:
raise Exception(f"offsets[{i}] value {start} is invalid, next offset is {end}")
elem_size = end - start
if not (elem_min <= elem_size <= elem_max):
raise Exception(f"offset[{i}] value {start} is invalid, next offset is {end},"
f" implied size is {elem_size}, size bounds: [{elem_min}, {elem_max}]")
elems.append(elem_cls.deserialize(stream, elem_size))
return cls(*elems) # type: ignore
def serialize(self, stream: BinaryIO) -> int:
elem_cls = self.__class__.element_cls()
if issubclass(elem_cls, uint8):
out = bytes(iter(self))
stream.write(out)
return len(out)
if elem_cls.is_fixed_byte_length():
for v in self.readonly_iter():
v.serialize(stream)
return elem_cls.type_byte_length() * self.length()
else:
temp_dyn_stream = io.BytesIO()
offset = OFFSET_BYTE_LENGTH * self.length() # the offsets are part of the fixed-size-bytes prologue
for v in self:
encode_offset(stream, offset)
offset += cast(View, v).serialize(temp_dyn_stream)
temp_dyn_stream.seek(0)
stream.write(temp_dyn_stream.read(offset))
return offset
@classmethod
def from_obj(cls: Type[M], obj: ObjType) -> M:
if not isinstance(obj, (list, tuple)):
raise ObjParseException(f"obj '{obj}' is not a list or tuple")
elem_cls = cls.element_cls()
return cls(elem_cls.from_obj(el) for el in obj) # type: ignore
@classmethod
def navigate_type(cls, key: Any) -> Type[View]:
if key < 0:
raise KeyError
return cls.element_cls()
@classmethod
def key_to_static_gindex(cls, key: Any) -> Gindex:
if key < 0:
raise KeyError
if cls.is_packed():
elems_per_chunk = 32 // cls.element_cls().type_byte_length()
chunk_i = key // elems_per_chunk
else:
chunk_i = key
return to_gindex(chunk_i, cls.tree_depth())
def navigate_view(self, key: Any) -> View:
return self.__getitem__(key)
def __len__(self):
return self.length()
def __add__(self, other):
if issubclass(self.element_cls(), uint8):
return bytes(self) + bytes(other)
else:
return list(chain(self, other))
def __getitem__(self, k):
if isinstance(k, slice):
start = 0 if k.start is None else k.start
end = self.length() if k.stop is None else k.stop
return [self.get(i) for i in range(start, end)]
else:
return self.get(k)
def __setitem__(self, k, v):
if type(k) == slice:
i = 0 if k.start is None else k.start
end = self.length() if k.stop is None else k.stop
for item in v:
self.set(i, item)
i += 1
if i != end:
raise Exception("failed to do full slice-set, not enough values")
else:
self.set(k, v)
def _repr_sequence(self):
length: int
try:
length = self.length()
except NavigationError:
return f"{self.type_repr()}( *summary root, no length known* )"
vals: Dict[int, View] = {}
partial = False
for i in range(length):
try:
vals[i] = self.get(i)
except NavigationError:
partial = True
continue
basic_elems = isinstance(self.element_cls(), BasicView)
shortened = length > (64 if basic_elems else 8)
summary_length = (10 if basic_elems else 3)
seperator = ', ' if basic_elems else ',\n'
contents = seperator.join(f"... {length - (summary_length * 2)} omitted ..."
if (shortened and i == summary_length)
else (f"{i}: {repr(v)}" if partial else repr(v))
for i, v in vals.items()
if (not shortened) or i <= summary_length or i >= length - summary_length)
if '\n' in contents:
contents = '\n' + indent(contents, ' ') + '\n'
if partial:
return f"{self.type_repr()}~partial~<<len={length}>>({contents})"
else:
return f"{self.type_repr()}<<len={length}>>({contents})"
class List(MonoSubtreeView):
def __new__(cls, *args, backing: Optional[Node] = None, hook: Optional[ViewHook] = None, **kwargs):
if backing is not None:
if len(args) != 0:
raise Exception("cannot have both a backing and elements to init List")
return super().__new__(cls, backing=backing, hook=hook, **kwargs)
elem_cls = cls.element_cls()
vals = list(args)
if len(vals) == 1:
val = vals[0]
if isinstance(val, (GeneratorType, list, tuple)):
vals = list(val)
if issubclass(elem_cls, uint8):
if isinstance(val, bytes):
vals = list(val)
if isinstance(val, str):
if val[:2] == '0x':
val = val[2:]
vals = list(bytes.fromhex(val))
if len(vals) > 0:
limit = cls.limit()
if len(vals) > limit:
raise Exception(f"too many list inputs: {len(vals)}, limit is: {limit}")
input_views = []
for el in vals:
if isinstance(el, View):
input_views.append(el)
else:
input_views.append(elem_cls.coerce_view(el))
input_nodes = cls.views_into_chunks(input_views)
contents = subtree_fill_to_contents(input_nodes, cls.contents_depth())
backing = PairNode(contents, uint256(len(input_views)).get_backing())
return super().__new__(cls, backing=backing, hook=hook, **kwargs)
def __class_getitem__(cls, params) -> Type["List"]:
(element_type, limit) = params
contents_depth = 0
packed = False
if isinstance(element_type, BasicView):
elems_per_chunk = 32 // element_type.type_byte_length()
contents_depth = get_depth((limit + elems_per_chunk - 1) // elems_per_chunk)
packed = True
else:
contents_depth = get_depth(limit)
class SpecialListView(List):
@classmethod
def is_packed(cls) -> bool:
return packed
@classmethod
def contents_depth(cls) -> int:
return contents_depth
@classmethod
def element_cls(cls) -> Type[View]:
return element_type
@classmethod
def limit(cls) -> int:
return limit
SpecialListView.__name__ = SpecialListView.type_repr()
return SpecialListView
def length(self) -> int:
ll_node = super().get_backing().get_right()
ll = cast(uint256, uint256.view_from_backing(node=ll_node, hook=None))
return int(ll)
def value_byte_length(self) -> int:
elem_cls = self.__class__.element_cls()
if elem_cls.is_fixed_byte_length():
return elem_cls.type_byte_length() * self.length()
else:
return sum(OFFSET_BYTE_LENGTH + cast(View, el).value_byte_length() for el in iter(self))
def append(self, v: View):
ll = self.length()
if ll >= self.__class__.limit():
raise Exception("list is maximum capacity, cannot append")
i = ll
elem_type: Type[View] = self.__class__.element_cls()
if not isinstance(v, elem_type):
v = elem_type.coerce_view(v)
target: Gindex
if self.__class__.is_packed():
next_backing = self.get_backing()
if isinstance(v, BasicView):
elems_per_chunk = 32 // elem_type.type_byte_length()
chunk_i = i // elems_per_chunk
target = to_gindex(chunk_i, self.__class__.tree_depth())
chunk: Node
if i % elems_per_chunk == 0:
set_last = next_backing.setter(target, expand=True)
chunk = zero_node(0)
else:
set_last = next_backing.setter(target)
chunk = next_backing.getter(target)
chunk = v.backing_from_base(chunk, i % elems_per_chunk)
next_backing = set_last(chunk)
else:
raise Exception("cannot append a packed element that is not a basic type")
else:
target = to_gindex(i, self.__class__.tree_depth())
set_last = self.get_backing().setter(target, expand=True)
next_backing = set_last(v.get_backing())
set_length = next_backing.rebind_right
new_length = uint256(ll + 1).get_backing()
next_backing = set_length(new_length)
self.set_backing(next_backing)
def pop(self):
ll = self.length()
if ll == 0:
raise Exception("list is empty, cannot pop")
i = ll - 1
target: Gindex
can_summarize: bool
if self.__class__.is_packed():
next_backing = self.get_backing()
elem_type: Type[View] = self.__class__.element_cls()
if issubclass(elem_type, BasicView):
elems_per_chunk = 32 // elem_type.type_byte_length()
chunk_i = i // elems_per_chunk
target = to_gindex(chunk_i, self.__class__.tree_depth())
if i % elems_per_chunk == 0:
chunk = zero_node(0)
else:
chunk = next_backing.getter(target)
set_last = next_backing.setter(target)
chunk = elem_type.default(None).backing_from_base(chunk, i % elems_per_chunk)
next_backing = set_last(chunk)
can_summarize = (target & 1) == 0 and i % elems_per_chunk == 0
else:
raise Exception("cannot pop a packed element that is not a basic type")
else:
target = to_gindex(i, self.__class__.tree_depth())
set_last = self.get_backing().setter(target)
next_backing = set_last(zero_node(0))
can_summarize = (target & 1) == 0
# if possible, summarize
if can_summarize:
# summarize to the highest node possible.
# I.e. the resulting target must be a right-hand, unless it's the only content node.
while (target & 1) == 0 and target != 0b10:
target >>= 1
summary_fn = next_backing.summarize_into(target)
next_backing = summary_fn()
set_length = next_backing.rebind_right
new_length = uint256(ll - 1).get_backing()
next_backing = set_length(new_length)
self.set_backing(next_backing)
def get(self, i: int) -> View:
if i < 0 or i >= self.length():
raise IndexError
return super().get(i)
def set(self, i: int, v: View) -> None:
if i < 0 or i >= self.length():
raise IndexError
super().set(i, v)
def __repr__(self):
return self._repr_sequence()
@classmethod
def type_repr(cls) -> str:
return f"List[{cls.element_cls().__name__}, {cls.limit()}]"
@classmethod
def is_packed(cls) -> bool:
raise NotImplementedError
@classmethod
def contents_depth(cls) -> int:
raise NotImplementedError
@classmethod
def tree_depth(cls) -> int:
return cls.contents_depth() + 1 # 1 extra for length mix-in
@classmethod
def item_elem_cls(cls, i: int) -> Type[View]:
return cls.element_cls()
@classmethod
def limit(cls) -> int:
raise NotImplementedError
@classmethod
def is_valid_count(cls, count: int) -> bool:
return 0 <= count <= cls.limit()
@classmethod
def navigate_type(cls, key: Any) -> Type[View]:
if key >= cls.limit():
raise KeyError
return super().navigate_type(key)
@classmethod
def key_to_static_gindex(cls, key: Any) -> Gindex:
if key >= cls.limit():
raise KeyError
return super().key_to_static_gindex(key)
@classmethod
def default_node(cls) -> Node:
return PairNode(zero_node(cls.contents_depth()), zero_node(0)) # mix-in 0 as list length
@classmethod
def is_fixed_byte_length(cls) -> bool:
return False
@classmethod
def min_byte_length(cls) -> int:
return 0
@classmethod
def max_byte_length(cls) -> int:
elem_cls = cls.element_cls()
bytes_per_elem = elem_cls.max_byte_length()
if not elem_cls.is_fixed_byte_length():
bytes_per_elem += OFFSET_BYTE_LENGTH
return bytes_per_elem * cls.limit()
def to_obj(self) -> ObjType:
return list(el.to_obj() for el in self.readonly_iter())
class Vector(MonoSubtreeView):
def __new__(cls, *args, backing: Optional[Node] = None, hook: Optional[ViewHook] = None, **kwargs):
if backing is not None:
if len(args) != 0:
raise Exception("cannot have both a backing and elements to init Vector")
return super().__new__(cls, backing=backing, hook=hook, **kwargs)
elem_cls = cls.element_cls()
vals = list(args)
if len(vals) == 1:
val = vals[0]
if isinstance(val, (GeneratorType, list, tuple)):
vals = list(val)
if issubclass(elem_cls, uint8):
if isinstance(val, bytes):
vals = list(val)
if isinstance(val, str):
if val[:2] == '0x':
val = val[2:]
vals = list(bytes.fromhex(val))
if len(vals) > 0:
vector_length = cls.vector_length()
if len(vals) != vector_length:
raise Exception(f"invalid inputs length: {len(vals)}, vector length is: {vector_length}")
input_views = []
for el in vals:
if isinstance(el, View):
input_views.append(el)
else:
input_views.append(elem_cls.coerce_view(el))
input_nodes = cls.views_into_chunks(input_views)
backing = subtree_fill_to_contents(input_nodes, cls.tree_depth())
return super().__new__(cls, backing=backing, hook=hook, **kwargs)
def __class_getitem__(cls, params) -> Type["Vector"]:
(element_view_cls, length) = params
if length <= 0:
raise Exception(f"Invalid vector length: {length}")
tree_depth = 0
packed = False
if isinstance(element_view_cls, BasicView):
elems_per_chunk = 32 // element_view_cls.type_byte_length()
tree_depth = get_depth((length + elems_per_chunk - 1) // elems_per_chunk)
packed = True
else:
tree_depth = get_depth(length)
class SpecialVectorView(Vector):
@classmethod
def is_packed(cls) -> bool:
return packed
@classmethod
def tree_depth(cls) -> int:
return tree_depth
@classmethod
def element_cls(cls) -> Type[View]:
return element_view_cls
@classmethod
def vector_length(cls) -> int:
return length
out_typ = SpecialVectorView
# for fixed-size vectors, pre-compute the size.
if element_view_cls.is_fixed_byte_length():
byte_length = element_view_cls.type_byte_length() * length
class FixedSpecialVectorView(SpecialVectorView):
@classmethod
def type_byte_length(cls) -> int:
return byte_length
@classmethod
def min_byte_length(cls) -> int:
return byte_length
@classmethod
def max_byte_length(cls) -> int:
return byte_length
out_typ = FixedSpecialVectorView
out_typ.__name__ = out_typ.type_repr()
return out_typ
def get(self, i: int) -> View:
if i < 0 or i >= self.__class__.vector_length():
raise IndexError
return super().get(i)
def set(self, i: int, v: View) -> None:
if i < 0 or i >= self.__class__.vector_length():
raise IndexError
super().set(i, v)
def length(self) -> int:
return self.__class__.vector_length()
def value_byte_length(self) -> int:
if self.__class__.is_fixed_byte_length():
return self.__class__.type_byte_length()
else:
return sum(OFFSET_BYTE_LENGTH + cast(View, el).value_byte_length() for el in iter(self))
def __repr__(self):
return self._repr_sequence()
@classmethod
def type_repr(cls) -> str:
return f"Vector[{cls.element_cls().__name__}, {cls.vector_length()}]"
@classmethod
def vector_length(cls) -> int:
raise NotImplementedError
@classmethod
def is_valid_count(cls, count: int) -> bool:
return count == cls.vector_length()
@classmethod
def navigate_type(cls, key: Any) -> Type[View]:
if key >= cls.vector_length():
raise KeyError
return super().navigate_type(key)
@classmethod
def key_to_static_gindex(cls, key: Any) -> Gindex:
if key >= cls.vector_length():
raise KeyError
return super().key_to_static_gindex(key)
@classmethod
def default_node(cls) -> Node:
elem_type: Type[View] = cls.element_cls()
length = cls.to_chunk_length(cls.vector_length())
elem: Node
if cls.is_packed():
elem = zero_node(0)
else:
elem = elem_type.default_node()
return subtree_fill_to_length(elem, cls.tree_depth(), length)
@classmethod
def is_fixed_byte_length(cls) -> bool:
return cls.element_cls().is_fixed_byte_length() # only if the element type is fixed byte length.
@classmethod
def min_byte_length(cls) -> int:
elem_cls = cls.element_cls()
bytes_per_elem = elem_cls.min_byte_length()
if not elem_cls.is_fixed_byte_length():
bytes_per_elem += OFFSET_BYTE_LENGTH
return bytes_per_elem * cls.vector_length()
@classmethod
def max_byte_length(cls) -> int:
elem_cls = cls.element_cls()
bytes_per_elem = elem_cls.max_byte_length()
if not elem_cls.is_fixed_byte_length():
bytes_per_elem += OFFSET_BYTE_LENGTH
return bytes_per_elem * cls.vector_length()
def to_obj(self) -> ObjType:
return tuple(el.to_obj() for el in self.readonly_iter())
Fields = Dict[str, Type[View]]
class FieldOffset(NamedTuple):
key: str
typ: Type[View]
offset: int
@runtime_checkable
class _ContainerLike(Protocol):
@classmethod
def fields(cls) -> Fields:
...
CV = TypeVar('CV', bound="Container")
class Container(ComplexView):
# Container types should declare fields through class annotations.
# If none are specified, it will fall back on this (to avoid annotations of super classes),
# and error on construction, since empty container types are invalid.
_empty_annotations: bool
_field_indices: Dict[str, int]
def __new__(cls, *args, backing: Optional[Node] = None, hook: Optional[ViewHook] = None, **kwargs):
if backing is not None:
if len(args) != 0:
raise Exception("cannot have both a backing and elements to init List")
return super().__new__(cls, backing=backing, hook=hook, **kwargs)
input_nodes = []
for fkey, ftyp in cls.fields().items():
fnode: Node
if fkey in kwargs:
finput = kwargs.pop(fkey)
if isinstance(finput, View):
fnode = finput.get_backing()
else:
fnode = ftyp.coerce_view(finput).get_backing()
else:
fnode = ftyp.default_node()
input_nodes.append(fnode)
# check if any keys are remaining to catch unrecognized keys
if len(kwargs) > 0:
raise AttributeError(f'The field names [{"".join(kwargs.keys())}] are not defined in {cls}')
backing = subtree_fill_to_contents(input_nodes, cls.tree_depth())
out = super().__new__(cls, backing=backing, hook=hook)
return out
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
cls._field_indices = {fkey: i for i, fkey in enumerate(cls.__annotations__.keys()) if fkey[0] != '_'}
if len(cls._field_indices) == 0:
raise Exception(f"Container {cls.__name__} must have at least one field!")
@classmethod
def coerce_view(cls: Type[CV], v: Any) -> CV:
return cls(**{fkey: getattr(v, fkey) for fkey in cls.fields().keys()})
@classmethod
def fields(cls) -> Fields:
return cls.__annotations__
@classmethod
def is_fixed_byte_length(cls) -> bool:
return all(f.is_fixed_byte_length() for f in cls.fields().values())
@classmethod
def type_byte_length(cls) -> int:
if cls.is_fixed_byte_length():
return cls.min_byte_length()
else:
raise Exception("dynamic length container does not have a fixed byte length")
@classmethod
def min_byte_length(cls) -> int:
total = 0
for ftyp in cls.fields().values():
if not ftyp.is_fixed_byte_length():
total += OFFSET_BYTE_LENGTH
total += ftyp.min_byte_length()
return total
@classmethod
def max_byte_length(cls) -> int:
total = 0
for ftyp in cls.fields().values():
if not ftyp.is_fixed_byte_length():
total += OFFSET_BYTE_LENGTH
total += ftyp.max_byte_length()
return total
@classmethod
def is_packed(cls) -> bool:
return False
@classmethod
def tree_depth(cls) -> int:
return get_depth(len(cls.fields()))
@classmethod
def item_elem_cls(cls, i: int) -> Type[View]:
return list(cls.fields().values())[i]
@classmethod
def default_node(cls) -> Node:
return subtree_fill_to_contents([field.default_node() for field in cls.fields().values()], cls.tree_depth())
def value_byte_length(self) -> int:
if self.__class__.is_fixed_byte_length():
return self.__class__.type_byte_length()
else:
total = 0
fields = self.fields()
for fkey, ftyp in fields.items():
if ftyp.is_fixed_byte_length():
total += ftyp.type_byte_length()
else:
total += OFFSET_BYTE_LENGTH
total += cast(View, getattr(self, fkey)).value_byte_length()
return total
def __getattr__(self, item):
if item[0] == '_':
return super().__getattribute__(item)
else:
try:
i = self.__class__._field_indices[item]
except KeyError:
raise AttributeError(f"unknown attribute {item}")
return super().get(i)
def __setattr__(self, key, value):
if key[0] == '_':
super().__setattr__(key, value)
else:
try:
i = self.__class__._field_indices[key]
except KeyError:
raise AttributeError(f"unknown attribute {key}")
super().set(i, value)
def _get_field_val_repr(self, fkey: str, ftype: Type[View]) -> str:
field_start = ' ' + fkey + ': ' + ftype.__name__ + ' = '
try:
field_repr = repr(getattr(self, fkey))
if '\n' in field_repr: # if multiline, indent it, but starting from the value.
i = field_repr.index('\n')
field_repr = field_repr[:i+1] + indent(field_repr[i+1:], ' ' * len(field_start))
return field_start + field_repr
except NavigationError:
return f"{field_start} *omitted from partial*"
def __repr__(self):
return f"{self.__class__.__name__}(Container)\n" + '\n'.join(
indent(self._get_field_val_repr(fkey, ftype), ' ')
for fkey, ftype in self.__class__.fields().items())
@classmethod
def type_repr(cls) -> str:
return f"{cls.__name__}(Container)\n" + '\n'.join(
(' ' + fkey + ': ' + ftype.__name__) for fkey, ftype in cls.fields().items())
def __iter__(self):
tree_depth = self.tree_depth()
backing = self.get_backing()
return ContainerElemIter(backing, tree_depth, list(self.__class__.fields().values()))
@classmethod
def decode_bytes(cls: Type[V], bytez: bytes) -> V:
stream = io.BytesIO()
stream.write(bytez)
stream.seek(0)
return cls.deserialize(stream, len(bytez))
@classmethod
def deserialize(cls: Type[CV], stream: BinaryIO, scope: int) -> CV:
fields = cls.fields()
field_values: Dict[str, View]
if cls.is_fixed_byte_length():
field_values = {fkey: ftyp.deserialize(stream, ftyp.type_byte_length()) for fkey, ftyp in fields.items()}
else:
field_values = {}
dyn_fields: PyList[FieldOffset] = []
fixed_size = 0
for fkey, ftyp in fields.items():
if ftyp.is_fixed_byte_length():
fsize = ftyp.type_byte_length()
field_values[fkey] = ftyp.deserialize(stream, fsize)
fixed_size += fsize
else:
dyn_fields.append(FieldOffset(key=fkey, typ=ftyp, offset=int(decode_offset(stream))))
fixed_size += OFFSET_BYTE_LENGTH
if len(dyn_fields) > 0:
if dyn_fields[0].offset < fixed_size:
raise Exception(f"first offset is smaller than expected fixed size")
for i, (fkey, ftyp, foffset) in enumerate(dyn_fields):
next_offset = dyn_fields[i + 1].offset if i + 1 < len(dyn_fields) else scope
if foffset > next_offset:
raise Exception(f"offset {i} is invalid: {foffset} larger than next offset {next_offset}")
fsize = next_offset - foffset
f_min_size, f_max_size = ftyp.min_byte_length(), ftyp.max_byte_length()
if not (f_min_size <= fsize <= f_max_size):
raise Exception(f"offset {i} is invalid, size out of bounds: {foffset}, next {next_offset},"
f" implied size: {fsize}, size bounds: [{f_min_size}, {f_max_size}]")
field_values[fkey] = ftyp.deserialize(stream, fsize)
return cls(**field_values) # type: ignore
def serialize(self, stream: BinaryIO) -> int:
fields = self.__class__.fields()
is_fixed_size = self.is_fixed_byte_length()
temp_dyn_stream: BinaryIO
written = sum(map((lambda x: x.type_byte_length() if x.is_fixed_byte_length() else OFFSET_BYTE_LENGTH),
fields.values()))
if not is_fixed_size:
temp_dyn_stream = io.BytesIO()
for fkey, ftyp in fields.items():
v: View = getattr(self, fkey)
if ftyp.is_fixed_byte_length():
v.serialize(stream)
else:
encode_offset(stream, written)
written += v.serialize(temp_dyn_stream) # type: ignore
if not is_fixed_size:
temp_dyn_stream.seek(0)
stream.write(temp_dyn_stream.read(written))
return written
@classmethod
def from_obj(cls: Type[CV], obj: ObjType) -> CV:
if not isinstance(obj, dict):
raise ObjParseException(f"obj '{obj}' is not a dict")
fields = cls.fields()
for k in obj.keys():
if k not in fields:
raise ObjParseException(f"obj '{obj}' has unknown key {k}")
return cls(**{k: fields[k].from_obj(v) for k, v in obj.items()}) # type: ignore
def to_obj(self) -> ObjType:
return {f_k: f_v.to_obj() for f_k, f_v in zip(self.__class__.fields().keys(), self.__iter__())}
@classmethod
def key_to_static_gindex(cls, key: Any) -> Gindex:
fields = cls.fields()
try:
field_index = list(fields.keys()).index(key)
except ValueError: # list.index raises ValueError if the element (a key here) is missing
raise KeyError
return to_gindex(field_index, cls.tree_depth())
@classmethod
def navigate_type(cls, key: Any) -> Type[View]:
return cls.fields()[key]
def navigate_view(self, key: Any) -> View:
return self.__getattr__(key)
| 37.792165
| 119
| 0.582477
| 33,574
| 0.966687
| 0
| 0
| 13,837
| 0.398405
| 0
| 0
| 3,390
| 0.097607
|
83399c09776772609094ffc2ac08102d789dfc9b
| 21,383
|
py
|
Python
|
cave/com.raytheon.viz.gfe/python/autotest/RoutineLevel4_1_TestScript.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
cave/com.raytheon.viz.gfe/python/autotest/RoutineLevel4_1_TestScript.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
cave/com.raytheon.viz.gfe/python/autotest/RoutineLevel4_1_TestScript.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | 1
|
2021-10-30T00:03:05.000Z
|
2021-10-30T00:03:05.000Z
|
# #
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
# #
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# RoutineLevel4_1_TestScript Local Effects
#
# Author:
# ----------------------------------------------------------------------------
# First run setupTextEA
windLE1 = """Definition["windLE_list"] = 1"""
windLE2 = """Definition["windLE_list"] = 2"""
tempLE1 = """Definition["tempLE_list"] = 1"""
tempLE2 = """Definition["tempLE_list"] = 2"""
periodLE1 = """Definition["Period_1_version"] = 1"""
periodLE2 = """Definition["Period_1_version"] = 2"""
periodLE3 = """Definition["Period_1_version"] = 3"""
tempLE_method1 = """Definition["tempLE_method"] = 1"""
tempLE_method2 = """Definition["tempLE_method"] = 2"""
snowLE1 = """## (self.weather_phrase,self._wxLocalEffects_list()),
## (self.snow_phrase,self._snowAmtLocalEffects_list()),
## (self.total_snow_phrase,self._totalSnowAmtLocalEffects_list()),
"""
snowLE2 = """ (self.weather_phrase,self._wxLocalEffects_list()),
(self.snow_phrase,self._snowAmtLocalEffects_list()),
(self.total_snow_phrase,self._totalSnowAmtLocalEffects_list()),
"""
snow2LE1 = """## ("Period_2_3", 12), """
snow2LE2 = """ ("Period_2_3", 12), """
# Runs LE_Test_Local for each test
scripts = [
{
"name": "LE1",
"commentary": "Local Effects: MaxT (21,40), Wind (N30,N10), Gust 0",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 21, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (30, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (10, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"Highs around 40, except in the lower 20s in the mountains",
"North winds around 10 mph, except north around 35 mph in the mountains",
],
},
{
"name": "LE2",
"commentary": "Local Effects: Wind (N20,N10) -> (N30,N20), Gust 0",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 6, (10, "N"), ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (30, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (20, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"North winds around 10 mph increasing to around 25 mph in the afternoon",
"In the mountains, north winds around 25 mph increasing to around 35 mph in the afternoon",
],
},
{
"name": "LE3",
"commentary": "Local Effects: Wind (N20,0), Gust 0",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 12, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (0, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"Light winds, except north around 25 mph in the mountains",
],
},
{
"name": "LE4",
"commentary": "Local Effects: Wind (N20,0) -> (N30,0), Gust 0",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 6, (0, "N"), ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (30, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (0, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"Light winds",
"In the mountains, north winds around 25 mph increasing to around 35 mph in the afternoon",
],
},
{
"name": "LE5",
"commentary": "Local Effects: Wind (N20,N10), Gust 0, windLE_list=1",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 21, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (10, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"North winds around 25 mph in the mountains, otherwise north around 10 mph",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (windLE1, windLE2), "undo")
],
},
{
"name": "LE6",
"commentary": "Local Effects: Wind (N20,N10) -> (N30,N20), Gust 0, windLE_list=1",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 6, (10, "N"), ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (30, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (20, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"In the mountains, north winds around 25 mph increasing to around 35 mph in the afternoon",
"In the valleys, north winds around 10 mph increasing to around 25 mph in the afternoon",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (windLE1, windLE2), "undo")
],
},
{
"name": "LE7",
"commentary": "Local Effects: Temp (21, 40), Wind (N20,N10), Gust 0, tempLE_list=2",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 21, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (10, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"Highs around 40, except in the lower 20s in the mountains",
"North winds around 10 mph, except north around 25 mph in the mountains",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (tempLE1, tempLE2), "undo")
],
},
{
"name": "LE8",
"commentary": "Local Effects: MaxT (20,20,20), Period_1_version=1",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area2"]),
],
"checkStrings": [
"Highs around 20",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE2), "undo")
],
},
{
"name": "LE9",
"commentary": "Local Effects: MaxT (20,20,40), Period_1_version=1",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area2"]),
],
"checkStrings": [
"Highs around 20, except around 40 in the benches",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE2), "undo")
],
},
{
"name": "LE10",
"commentary": "Local Effects: MaxT (20,30,40), Period_1_version=1",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 30, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area2"]),
],
"checkStrings": [
"Highs around 20, except around 30 in the rush valley",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE2), "undo")
],
},
{
"name": "LE11",
"commentary": "Local Effects: MaxT (20,30,40), Period_1_version=2",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 30, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area2"]),
],
"checkStrings": [
"Highs around 20 in the city, and around 30 in the rush valley, and around 40 in the benches",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", [(periodLE1, periodLE2), (tempLE_method1, tempLE_method2)], "undo"),
],
},
{
"name": "LE12",
"commentary": "Local Effects: MaxT (20,40,20), Period_1_version=2",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area2"]),
],
"checkStrings": [
"Highs around 20 in the city and in the benches, and around 40 in the rush valley",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", [(periodLE1, periodLE2), (tempLE_method1, tempLE_method2)], "undo")
],
},
{
"name": "LE13",
"commentary": "Local Effects: MaxT (20,40,40), Period_1_version=2",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area2"]),
],
"checkStrings": [
"Highs around 20 in the city, and around 40 in the rush valley and in the benches",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", [(periodLE1, periodLE2), (tempLE_method1, tempLE_method2)], "undo"),
],
},
{
"name": "LE14",
"commentary": "Local Effects: MaxT (20,20,40), Period_1_version=2",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area2"]),
],
"checkStrings": [
"Highs around 20 in the city and in the rush valley, and around 40 in the benches",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", [(periodLE1, periodLE2), (tempLE_method1, tempLE_method2)], "undo"),
],
},
{
"name": "LE15",
"commentary": "Local Effects: SnowAmt",
"createGrids": [
("Fcst", "PoP", "SCALAR", 0, 48, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 48, "Lkly:S:-:<NoVis>:", "all"),
("Fcst", "SnowAmt", "SCALAR", 0, 12, 3, ["area3"]),
("Fcst", "SnowAmt", "SCALAR", 0, 12, 3, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 0, 12, 3, ["BelowElev"]),
("Fcst", "SnowAmt", "SCALAR", 12, 24, 5, ["area3"]),
("Fcst", "SnowAmt", "SCALAR", 12, 24, 5, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 12, 24, 5, ["BelowElev"]),
("Fcst", "SnowAmt", "SCALAR", 24, 36, 1, ["area3"]),
("Fcst", "SnowAmt", "SCALAR", 24, 36, 1, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 24, 36, 1, ["BelowElev"]),
("Fcst", "SnowAmt", "SCALAR", 36, 48, 0, ["area3"]),
("Fcst", "SnowAmt", "SCALAR", 36, 48, 0, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 36, 48, 0, ["BelowElev"]),
],
"checkStrings": [
".TODAY...", "Snow accumulation around 3 inches",
".TONIGHT...", "Snow accumulation around 5 inches",
"...", "Snow accumulation around 1 inch",
"...", "No snow accumulation",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", [(snowLE1, snowLE2), (snow2LE1, snow2LE2)], "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE16",
"commentary": "Local Effects: SnowAmt",
"createGrids": [
("Fcst", "PoP", "SCALAR", 0, 48, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 48, "Lkly:S:-:<NoVis>:", "all"),
("Fcst", "SnowAmt", "SCALAR", 0, 12, 5, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 0, 12, 2, ["BelowElev"]),
("Fcst", "SnowAmt", "SCALAR", 12, 24, 4, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 12, 24, 1, ["BelowElev"]),
("Fcst", "SnowAmt", "SCALAR", 24, 36, 3, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 24, 36, 1, ["BelowElev"]),
("Fcst", "SnowAmt", "SCALAR", 36, 48, 0, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 36, 48, 0, ["BelowElev"]),
],
"checkStrings": [
".TODAY...", "Snow accumulation around 2 inches, except around 5 inches above timberline",
".TONIGHT...", "Snow accumulation around 1 inch, except around 4 inches above timberline",
"...", "Snow accumulation of 1 to 3 inches",
"Total snow accumulation around 4 inches, except around 12 inches above timberline",
"...", "No snow accumulation",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", [(snowLE1, snowLE2), (snow2LE1, snow2LE2)], "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE17", # Wade and Ballard
"commentary": "Local Effects: Wind (N20,N10) -> (N30,N10)",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 6, (10, "N"), ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (30, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (10, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"North winds around 10 mph. In the mountains, north winds around 25 mph increasing to around 35 mph in the afternoon.",
],
},
{
"name": "LE18", # Wade and Ballard
"commentary": "Local Effects: Wind (N10,N20) -> (N10,N30)",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (10, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 6, (20, "N"), ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (10, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (30, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
# "North winds around 25 mph increasing to around 35 mph in the afternoon. North winds around 10 mph in the mountains.",
"North winds around 25 mph increasing to around 35 mph in the afternoon. In the mountains, north winds around 10 mph.",
],
},
{
"name": "LE19",
"commentary": "Local Effects for non-intersecting areas -- CASE 3 for sub-phrase consolidation",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 48, 30, "all"),
("Fcst", "PoP", "SCALAR", 0, 48, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 48, "NoWx", ["area3"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:RW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:SW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area2"]),
],
"checkStrings": [
"Mostly sunny.",
"A 50 percent chance of showers in the rush valley, patchy fog in the rush valley, a 50 percent chance of snow showers in the benches, patchy fog in the benches.",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE3), "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE20",
"commentary": "Local Effects for non-intersecting areas -- CASE 3 for sub-phrase consolidation",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 48, 30, "all"),
("Fcst", "PoP", "SCALAR", 0, 12, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 12, "NoWx", ["area3"]),
("Fcst", "Wx", "WEATHER", 0, 6, "Chc:T:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 6, "Chc:T:<NoInten>:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 6, 12, "Chc:RW:-:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 6, 12, "Chc:SW:-:<NoVis>:", ["area2"]),
],
"checkStrings": [
"Mostly sunny.",
"In the rush valley, chance of thunderstorms in the morning, then chance of showers in the afternoon.",
"In the benches, chance of thunderstorms in the morning, then chance of snow showers in the afternoon.",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE3), "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE21",
"commentary": "Local Effects for non-intersecting areas -- CASE 3 for sub-phrase consolidation",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 48, 30, "all"),
("Fcst", "PoP", "SCALAR", 0, 12, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 12, "Chc:T:<NoInten>:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 0, 6, "Chc:T:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 6, "Chc:T:<NoInten>:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 6, 12, "Chc:RW:-:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 6, 12, "Chc:SW:-:<NoVis>:", ["area2"]),
],
"checkStrings": [
"Mostly sunny.",
"In the city, a 50 percent chance of thunderstorms.",
"In the rush valley, chance of thunderstorms in the morning, then chance of showers in the afternoon.",
"In the benches, chance of thunderstorms in the morning, then chance of snow showers in the afternoon.",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE3), "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE22",
"commentary": "Local Effects for non-intersecting areas -- CASE 2 for sub-phrase consolidation",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 48, 30, "all"),
("Fcst", "PoP", "SCALAR", 0, 48, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 48, "Patchy:F:<NoInten>:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:RW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:SW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area2"]),
],
"checkStrings": [
"Mostly sunny.",
"A 50 percent chance of showers in the rush valley, a 50 percent chance of snow showers in the benches, chance of showers in the rush valley, chance of snow showers in the benches.",
"Patchy fog.",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE3), "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE23",
"commentary": "Local Effects for non-intersecting areas",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 48, 30, "all"),
("Fcst", "PoP", "SCALAR", 0, 48, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 48, "NoWx", ["area3"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:RW:-:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:SW:-:<NoVis>:", ["area2"]),
],
"checkStrings": [
"Mostly sunny.",
"A 50 percent chance of showers in the rush valley, a 50 percent chance of snow showers in the benches.",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE3), "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE24",
"commentary": "Local Effects for non-intersecting areas -- no consolidation necessary",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 48, 30, "all"),
("Fcst", "PoP", "SCALAR", 0, 48, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:RW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:RW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:SW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area2"]),
],
"checkStrings": [
"Mostly sunny.",
"A 50 percent chance of showers in the city and in the rush valley, a 50 percent chance of snow showers in the benches",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE3), "undo"),
],
"stringOrder": "yes",
},
]
import CreateGrids
import TestScript
def testScript(self, dataMgr):
defaults = {
"cmdLineVars" :"{('Product Issuance', 'productIssuance'): 'Morning', ('Issuance Type', 'issuanceType'): 'ROUTINE', ('Issued By', 'issuedBy'): None}",
"deleteGrids": CreateGrids.Delete_grids,
"productType": "LE_Test_Local",
}
return TestScript.generalTestScript(self, dataMgr, scripts, defaults)
| 41.520388
| 189
| 0.533087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 14,055
| 0.657298
|
8339dd90862b3868393e86e2c87682f87414e27c
| 12,569
|
py
|
Python
|
AutoPano/Phase2/Code/Test_files/TrainUnsup.py
|
akathpal/ComputerVision-CMSC733
|
f5fa21a0ada8ab8ea08a6c558f6df9676570a2df
|
[
"MIT"
] | 1
|
2019-09-26T02:06:17.000Z
|
2019-09-26T02:06:17.000Z
|
AutoPano/Phase2/Code/Test_files/TrainUnsup.py
|
akathpal/UMD-CMSC733-ComputerVision
|
f5fa21a0ada8ab8ea08a6c558f6df9676570a2df
|
[
"MIT"
] | null | null | null |
AutoPano/Phase2/Code/Test_files/TrainUnsup.py
|
akathpal/UMD-CMSC733-ComputerVision
|
f5fa21a0ada8ab8ea08a6c558f6df9676570a2df
|
[
"MIT"
] | 1
|
2022-03-30T05:03:09.000Z
|
2022-03-30T05:03:09.000Z
|
#!/usr/bin/env python
"""
CMSC733 Spring 2019: Classical and Deep Learning Approaches for
Geometric Computer Vision
Project 1: MyAutoPano: Phase 2 Starter Code
Author(s):
Nitin J. Sanket (nitinsan@terpmail.umd.edu)
PhD Candidate in Computer Science,
University of Maryland, College Park
Abhishek Kathpal
University of Maryland,College Park
"""
# Dependencies:
# opencv, do (pip install opencv-python)
# skimage, do (apt install python-skimage)
# termcolor, do (pip install termcolor)
import tensorflow as tf
import pickle
import cv2
import sys
import os
import glob
# import Misc.ImageUtils as iu
import random
from skimage import data, exposure, img_as_float
import matplotlib.pyplot as plt
from Network.Network import Supervised_HomographyModel,Unsupervised_HomographyModel
from Misc.MiscUtils import *
from Misc.DataUtils import *
import numpy as np
import time
import argparse
import shutil
from StringIO import StringIO
import string
from termcolor import colored, cprint
import math as m
from tqdm import tqdm
from matplotlib import pyplot as plt
from Misc.TFSpatialTransformer import *
# Don't generate pyc codes
sys.dont_write_bytecode = True
def extract(data):
"""
Extracting training data and labels from pickle files
"""
f = open(data, 'rb')
out = pickle.load(f)
features = np.array(out['features'])
labels = np.array(out['labels'])
f.close()
return features,labels
def GenerateBatch(BasePath, DirNamesTrain, TrainLabels, ImageSize, MiniBatchSize,ModelType):
"""
Inputs:
BasePath - Path to COCO folder without "/" at the end
DirNamesTrain - Variable with Subfolder paths to train files
NOTE that Train can be replaced by Val/Test for generating batch corresponding to validation (held-out testing in this case)/testing
TrainLabels - Labels corresponding to Train
NOTE that TrainLabels can be replaced by Val/TestLabels for generating batch corresponding to validation (held-out testing in this case)/testing
ImageSize - Size of the Image
MiniBatchSize is the size of the MiniBatch
Outputs:
I1Batch - Batch of images
LabelBatch - Batch of one-hot encoded labels
"""
ImageNum = 0
I1Batch = []
LabelBatch = []
if (ModelType.lower() == 'supervised'):
print("Supervised_approach")
features,labels=extract('training.pkl')
ImageNum = 0
while ImageNum < MiniBatchSize:
# Generate random image
NumTrainImages=5000
RandIdx = random.randint(0, NumTrainImages-1)
ImageNum += 1
##########################################################
# Add any standardization or data augmentation here!
##########################################################
I1 = np.float32(features[RandIdx])
I1=(I1-np.mean(I1))/255
t = labels[RandIdx].reshape((1,8))
label = t[0]
# Append All Images and Mask
I1Batch.append(I1)
LabelBatch.append(label)
else:
# print("Unsupervised Approach")
I1FullBatch = []
PatchBatch = []
CornerBatch = []
I2Batch = []
ImageNum = 0
while ImageNum < MiniBatchSize:
# Generate random image
RandIdx = random.randint(0, len(DirNamesTrain)-1)
# print(len(DirNamesTrain))
RandImageName = BasePath + os.sep + DirNamesTrain[RandIdx] + '.jpg'
ImageNum += 1
patchSize = 128
r = 32
img_orig = plt.imread(RandImageName)
img_orig = np.float32(img_orig)
# plt.imshow(img_orig)
# plt.show()
if(len(img_orig.shape)==3):
img = cv2.cvtColor(img_orig,cv2.COLOR_RGB2GRAY)
else:
img = img_orig
img=(img-np.mean(img))/255
img = cv2.resize(img,(320,240))
# img = cv2.resize(img,(ImageSize[0],ImageSize[1]))
# print(img.shape[1]-r-patchSize)
x = np.random.randint(r, img.shape[1]-r-patchSize)
y = np.random.randint(r, img.shape[0]-r-patchSize)
# print(x)
p1 = (x,y)
p2 = (patchSize+x, y)
p3 = (patchSize+x, patchSize+y)
p4 = (x, patchSize+y)
src = [p1, p2, p3, p4]
src = np.array(src)
dst = []
for pt in src:
dst.append((pt[0]+np.random.randint(-r, r), pt[1]+np.random.randint(-r, r)))
H = cv2.getPerspectiveTransform(np.float32(src), np.float32(dst))
H_inv = np.linalg.inv(H)
warpImg = cv2.warpPerspective(img, H_inv, (img.shape[1],img.shape[0]))
patch1 = img[y:y + patchSize, x:x + patchSize]
patch2 = warpImg[y:y + patchSize, x:x + patchSize]
imgData = np.dstack((patch1, patch2))
# Append All Images and Mask
I1FullBatch.append(np.float32(img))
PatchBatch.append(imgData)
CornerBatch.append(np.float32(src))
I2Batch.append(np.float32(patch2.reshape(128,128,1)))
return I1FullBatch, PatchBatch, CornerBatch, I2Batch
def PrettyPrint(NumEpochs, DivTrain, MiniBatchSize, NumTrainSamples, LatestFile):
"""
Prints all stats with all arguments
"""
print('Number of Epochs Training will run for ' + str(NumEpochs))
print('Factor of reduction in training data is ' + str(DivTrain))
print('Mini Batch Size ' + str(MiniBatchSize))
print('Number of Training Images ' + str(NumTrainSamples))
if LatestFile is not None:
print('Loading latest checkpoint with the name ' + LatestFile)
def TrainOperation(ImgPH, CornerPH, I2PH, I1FullPH, DirNamesTrain, TrainLabels, NumTrainSamples, ImageSize,
NumEpochs, MiniBatchSize, SaveCheckPoint, CheckPointPath,
DivTrain, LatestFile, BasePath, LogsPath, ModelType):
"""
Inputs:
ImgPH is the Input Image placeholder
LabelPH is the one-hot encoded label placeholder
DirNamesTrain - Variable with Subfolder paths to train files
TrainLabels - Labels corresponding to Train/Test
NumTrainSamples - length(Train)
ImageSize - Size of the image
NumEpochs - Number of passes through the Train data
MiniBatchSize is the size of the MiniBatch
SaveCheckPoint - Save checkpoint every SaveCheckPoint iteration in every epoch, checkpoint saved automatically after every epoch
CheckPointPath - Path to save checkpoints/model
DivTrain - Divide the data by this number for Epoch calculation, use if you have a lot of dataor for debugging code
LatestFile - Latest checkpointfile to continue training
BasePath - Path to COCO folder without "/" at the end
LogsPath - Path to save Tensorboard Logs
ModelType - Supervised or Unsupervised Model
Outputs:
Saves Trained network in CheckPointPath and Logs to LogsPath
"""
# Predict output with forward pass
if ModelType.lower() == 'supervised':
H4pt = Supervised_HomographyModel(ImgPH, ImageSize, MiniBatchSize)
with tf.name_scope('Loss'):
loss = tf.sqrt(tf.reduce_sum((tf.squared_difference(H4pt,LabelPH))))
with tf.name_scope('Adam'):
Optimizer = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(loss)
else:
# print(ImageSize)
pred_I2,I2 = Unsupervised_HomographyModel(ImgPH, CornerPH, I2PH, ImageSize, MiniBatchSize)
with tf.name_scope('Loss'):
loss = tf.reduce_mean(tf.abs(pred_I2 - I2))
with tf.name_scope('Adam'):
Optimizer = tf.train.AdamOptimizer(learning_rate=1e-5).minimize(loss)
# Tensorboard
# Create a summary to monitor loss tensor
EpochLossPH = tf.placeholder(tf.float32, shape=None)
loss_summary = tf.summary.scalar('LossEveryIter', loss)
epoch_loss_summary = tf.summary.scalar('LossPerEpoch', EpochLossPH)
# tf.summary.image('Anything you want', AnyImg)
# Merge all summaries into a single operation
MergedSummaryOP1 = tf.summary.merge([loss_summary])
MergedSummaryOP2 = tf.summary.merge([epoch_loss_summary])
# MergedSummaryOP = tf.summary.merge_all()
# Setup Saver
Saver = tf.train.Saver()
AccOverEpochs=np.array([0,0])
with tf.Session() as sess:
if LatestFile is not None:
Saver.restore(sess, CheckPointPath + LatestFile + '.ckpt')
# Extract only numbers from the name
StartEpoch = int(''.join(c for c in LatestFile.split('a')[0] if c.isdigit()))
print('Loaded latest checkpoint with the name ' + LatestFile + '....')
else:
sess.run(tf.global_variables_initializer())
StartEpoch = 0
print('New model initialized....')
# Tensorboard
Writer = tf.summary.FileWriter(LogsPath, graph=tf.get_default_graph())
for Epochs in tqdm(range(StartEpoch, NumEpochs)):
NumIterationsPerEpoch = int(NumTrainSamples/MiniBatchSize/DivTrain)
Loss=[]
epoch_loss=0
for PerEpochCounter in tqdm(range(NumIterationsPerEpoch)):
I1FullBatch, PatchBatch, CornerBatch, I2Batch = GenerateBatch(BasePath, DirNamesTrain, TrainLabels, ImageSize, MiniBatchSize,ModelType)
FeedDict = {ImgPH: PatchBatch, CornerPH: CornerBatch, I2PH: I2Batch}
_, LossThisBatch, Summary = sess.run([Optimizer, loss, MergedSummaryOP1], feed_dict=FeedDict)
#print(shapeH4pt,shapeLabel).
Loss.append(LossThisBatch)
epoch_loss = epoch_loss + LossThisBatch
# Save checkpoint every some SaveCheckPoint's iterations
if PerEpochCounter % SaveCheckPoint == 0:
# Save the Model learnt in this epoch
SaveName = CheckPointPath + str(Epochs) + 'a' + str(PerEpochCounter) + 'model.ckpt'
Saver.save(sess, save_path=SaveName)
print('\n' + SaveName + ' Model Saved...')
# Tensorboard
Writer.add_summary(Summary, Epochs*NumIterationsPerEpoch + PerEpochCounter)
epoch_loss = epoch_loss/NumIterationsPerEpoch
print(np.mean(Loss))
# Save model every epoch
SaveName = CheckPointPath + str(Epochs) + 'model.ckpt'
Saver.save(sess, save_path=SaveName)
print('\n' + SaveName + ' Model Saved...')
Summary_epoch = sess.run(MergedSummaryOP2,feed_dict={EpochLossPH: epoch_loss})
Writer.add_summary(Summary_epoch,Epochs)
Writer.flush()
def main():
"""
Inputs:
None
Outputs:
Runs the Training and testing code based on the Flag
"""
# Parse Command Line arguments
Parser = argparse.ArgumentParser()
Parser.add_argument('--BasePath', default='../Data', help='Base path of images, Default:/media/nitin/Research/Homing/SpectralCompression/COCO')
Parser.add_argument('--CheckPointPath', default='../Checkpoints/', help='Path to save Checkpoints, Default: ../Checkpoints/')
Parser.add_argument('--ModelType', default='unsupervised', help='Model type, Supervised or Unsupervised? Choose from Sup and Unsup, Default:Unsup')
Parser.add_argument('--NumEpochs', type=int, default=50, help='Number of Epochs to Train for, Default:50')
Parser.add_argument('--DivTrain', type=int, default=1, help='Factor to reduce Train data by per epoch, Default:1')
Parser.add_argument('--MiniBatchSize', type=int, default=32, help='Size of the MiniBatch to use, Default:1')
Parser.add_argument('--LoadCheckPoint', type=int, default=0, help='Load Model from latest Checkpoint from CheckPointsPath?, Default:0')
Parser.add_argument('--LogsPath', default='Logs/', help='Path to save Logs for Tensorboard, Default=Logs/')
Args = Parser.parse_args()
NumEpochs = Args.NumEpochs
BasePath = Args.BasePath
DivTrain = float(Args.DivTrain)
MiniBatchSize = Args.MiniBatchSize
LoadCheckPoint = Args.LoadCheckPoint
CheckPointPath = Args.CheckPointPath
LogsPath = Args.LogsPath
ModelType = Args.ModelType
# Setup all needed parameters including file reading
DirNamesTrain, SaveCheckPoint, ImageSize, NumTrainSamples, TrainLabels, NumClasses = SetupAll(BasePath, CheckPointPath)
print("here")
# Find Latest Checkpoint File
if LoadCheckPoint==1:
LatestFile = FindLatestModel(CheckPointPath)
else:
LatestFile = None
# Pretty print stats
PrettyPrint(NumEpochs, DivTrain, MiniBatchSize, NumTrainSamples, LatestFile)
# Define PlaceHolder variables for Input and Predicted output
ImgPH = tf.placeholder(tf.float32, shape=(MiniBatchSize, 128, 128, 2))
CornerPH = tf.placeholder(tf.float32, shape=(MiniBatchSize, 4,2))
I2PH = tf.placeholder(tf.float32, shape=(MiniBatchSize, 128, 128,1))
I1FullPH = tf.placeholder(tf.float32, shape=(MiniBatchSize, ImageSize[0], ImageSize[1],ImageSize[2]))
TrainOperation(ImgPH, CornerPH, I2PH, I1FullPH, DirNamesTrain, TrainLabels, NumTrainSamples, ImageSize,
NumEpochs, MiniBatchSize, SaveCheckPoint, CheckPointPath,
DivTrain, LatestFile, BasePath, LogsPath, ModelType)
if __name__ == '__main__':
main()
| 36.32659
| 151
| 0.693691
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,548
| 0.361843
|
833a1a0c360f3cdcf8d7b6c1f70840aed091b251
| 699
|
py
|
Python
|
Lista 2/Exercicio 14.py
|
GiovannaPazello/Projetos-em-Python
|
3cf7edbdf2a2350605a775389f7fe2cc7fe8032e
|
[
"MIT"
] | null | null | null |
Lista 2/Exercicio 14.py
|
GiovannaPazello/Projetos-em-Python
|
3cf7edbdf2a2350605a775389f7fe2cc7fe8032e
|
[
"MIT"
] | null | null | null |
Lista 2/Exercicio 14.py
|
GiovannaPazello/Projetos-em-Python
|
3cf7edbdf2a2350605a775389f7fe2cc7fe8032e
|
[
"MIT"
] | null | null | null |
'''Faça um programa que gere números aleatórios entre 0 e 50 até o número 32 ser
gerado. Quando isso ocorrer, informar:
a. A soma de todos os números gerados
b. A quantidade de números gerados que é impar
c. O menor número gerado'''
import random
x = random.randint(0,50)
cont = 32
somaNumeros = 0
qqntImpares = 0
menorNumero = 51
while cont != x:
x = random.randint(0, 50)
somaNumeros = somaNumeros + x
if x%2 != 0:
qqntImpares = qqntImpares + 1
if menorNumero > x:
menorNumero = x
print('A soma de todos os números é {}'.format(somaNumeros))
print('A quantidade de números ímpares é {}'.format(qqntImpares))
print('O menor número é {}'.format(menorNumero))
| 23.3
| 80
| 0.690987
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 340
| 0.475524
|
833a4ecb5ab38b8de2e042cd613f15a274dee6fa
| 1,556
|
py
|
Python
|
mavsim_python/chap4/wind_simulation.py
|
eyler94/mavsim_template_files
|
181a76f15dc454f5a6f58f4596d9039cbe388cd9
|
[
"MIT"
] | null | null | null |
mavsim_python/chap4/wind_simulation.py
|
eyler94/mavsim_template_files
|
181a76f15dc454f5a6f58f4596d9039cbe388cd9
|
[
"MIT"
] | null | null | null |
mavsim_python/chap4/wind_simulation.py
|
eyler94/mavsim_template_files
|
181a76f15dc454f5a6f58f4596d9039cbe388cd9
|
[
"MIT"
] | 1
|
2021-11-15T09:53:42.000Z
|
2021-11-15T09:53:42.000Z
|
"""
Class to determine wind velocity at any given moment,
calculates a steady wind speed and uses a stochastic
process to represent wind gusts. (Follows section 4.4 in uav book)
"""
import sys
sys.path.append('..')
import numpy as np
class wind_simulation:
def __init__(self, Ts):
# steady state wind defined in the inertial frame
self._steady_state = np.array([[0., 0., 0.]]).T
# self.steady_state = np.array([[3., 1., 0.]]).T
# Dryden gust model parameters (pg 56 UAV book)
# HACK: Setting Va to a constant value is a hack. We set a nominal airspeed for the gust model.
# Could pass current Va into the gust function and recalculate A and B matrices.
Va = 17
self._A =
self._B =
self._C =
self._gust_state =
self._Ts = Ts
def update(self):
# returns a six vector.
# The first three elements are the steady state wind in the inertial frame
# The second three elements are the gust in the body frame
return np.concatenate(( self._steady_state, self._gust() ))
def _gust(self):
# calculate wind gust using Dryden model. Gust is defined in the body frame
w = np.random.randn() # zero mean unit variance Gaussian (white noise)
# propagate Dryden model (Euler method): x[k+1] = x[k] + Ts*( A x[k] + B w[k] )
self._gust_state += self._Ts * (self._A @ self._gust_state + self._B * w)
# output the current gust: y[k] = C x[k]
return self._C @ self._gust_state
| 38.9
| 105
| 0.628535
| 1,319
| 0.847686
| 0
| 0
| 0
| 0
| 0
| 0
| 910
| 0.584833
|
833a7aa9cb8a7c6a6aacafb0a6fb6428d1abdec9
| 2,779
|
py
|
Python
|
dx/geometric_brownian_motion.py
|
yehuihe/dx
|
6a8c6a1605fd4314c481561ecceaaddf4528c43d
|
[
"Apache-2.0"
] | null | null | null |
dx/geometric_brownian_motion.py
|
yehuihe/dx
|
6a8c6a1605fd4314c481561ecceaaddf4528c43d
|
[
"Apache-2.0"
] | null | null | null |
dx/geometric_brownian_motion.py
|
yehuihe/dx
|
6a8c6a1605fd4314c481561ecceaaddf4528c43d
|
[
"Apache-2.0"
] | null | null | null |
"""Simulation Class -- Geometric Brownian Motion
"""
# Author: Yehui He <yehui.he@hotmail.com>
# License: Apache-2.0 License
import numpy as np
from .sn_random_numbers import sn_random_numbers
from .simulation_class import SimulationClass
class GeometricBrownianMotion(SimulationClass):
"""Class to generate simulated paths based on
the Black-Scholes-Merton geometric Brownian motion model.
Parameters
----------
name : str
Name of the object.
mar_env : MarketEnvironment
Market environment data for simulation.
corr : bool
True if correlated with other model object
"""
def __init__(self, name, mar_env, corr=False):
super().__init__(name, mar_env, corr)
def update(self, initial_value=None, volatility=None, final_date=None):
if initial_value:
self.initial_value = initial_value
if volatility:
self.volatility = volatility
if final_date:
self.final_date = final_date
self.instrument_values = None
def generate_paths(self, fixed_seed=False, day_count=365.):
if not self.time_grid.any():
# method from generic simulation class
self.generate_time_grid()
# number of dates for time grid
M = len(self.time_grid)
# number of paths
I = self.paths
# ndarray initialization for path simulation
paths = np.zeros((M, I))
# initialize first date with initial_value
paths[0] = self.initial_value
if not self.correlated:
# if not correlated, generate random numbers
rand = sn_random_numbers((1, M, I),
fixed_seed=fixed_seed)
else:
# if correlated, use random number object as provided
# in market environment
rand = self.random_numbers
short_rate = self.discount_curve.short_rate
# get short_rate for drift of process
for t in range(1, len(self.time_grid)):
# select the right time slice from the relevant
# random number set
if not self.correlated:
ran = rand[t]
else:
ran = np.dot(self.cholesky_matrix, rand[:, t, :])
ran = ran[self.rn_set]
dt = (self.time_grid[t] - self.time_grid[t - 1]).days / day_count
# difference between two dates as year fraction
paths[t] = paths[t - 1] * np.exp((short_rate - 0.5 *
self.volatility ** 2) * dt +
self.volatility * np.sqrt(dt) * ran)
# generate simulated values for the respective date
self.instrument_values = paths
| 35.628205
| 82
| 0.594458
| 2,534
| 0.911839
| 0
| 0
| 0
| 0
| 0
| 0
| 953
| 0.342929
|
833ab5ac04df4cc2bfa2f945d2155461c52e1071
| 1,039
|
py
|
Python
|
yibai-sms-python-sdk-1.0.0/yibai/api/Yibai.py
|
100sms/yibai-python-sdk
|
9907d0fbf147b5b3ce10e4afed2ac7f19d52af3f
|
[
"MIT"
] | null | null | null |
yibai-sms-python-sdk-1.0.0/yibai/api/Yibai.py
|
100sms/yibai-python-sdk
|
9907d0fbf147b5b3ce10e4afed2ac7f19d52af3f
|
[
"MIT"
] | null | null | null |
yibai-sms-python-sdk-1.0.0/yibai/api/Yibai.py
|
100sms/yibai-python-sdk
|
9907d0fbf147b5b3ce10e4afed2ac7f19d52af3f
|
[
"MIT"
] | 1
|
2019-11-26T11:49:54.000Z
|
2019-11-26T11:49:54.000Z
|
# encoding=utf8
import HttpUtils
class YibaiApiError(Exception):
def __init__(self, code, message):
super(YibaiApiError, self).__init__(message)
self.code = code
class YibaiClient(object):
def __init__(self, server_url, apikey):
self.serverUrl = server_url
self.apikey = apikey
def sms_batch_submit(self, submits):
return self.__execute({'submits': submits}, '/sms/batchSubmit')
def sms_pull_status_report(self):
return self.__execute({}, '/sms/pullStatusReport')
def sms_pull_reply_message(self):
return self.__execute({}, '/sms/pullReply')
def user_info(self):
return self.__execute({}, '/user/info')
def __execute(self, request, url_path):
request['apikey'] = self.apikey
req_url = self.serverUrl + url_path
res = HttpUtils.post_json(req_url, request)
if res['code'] == 200:
return res['response']
raise YibaiApiError(res['code'], res['message'])
| 28.861111
| 72
| 0.624639
| 990
| 0.952839
| 0
| 0
| 0
| 0
| 0
| 0
| 133
| 0.128008
|
833b47331d2a097b8a77501f425210bc65eeddac
| 1,194
|
py
|
Python
|
setup.py
|
nattster/lettuce_webdriver
|
26b910ceef67d5b81030640ebbab0504bd59d643
|
[
"MIT"
] | 24
|
2015-02-04T14:49:51.000Z
|
2021-03-23T17:17:09.000Z
|
setup.py
|
nattster/lettuce_webdriver
|
26b910ceef67d5b81030640ebbab0504bd59d643
|
[
"MIT"
] | 4
|
2015-07-13T22:41:22.000Z
|
2016-10-03T20:17:22.000Z
|
setup.py
|
nattster/lettuce_webdriver
|
26b910ceef67d5b81030640ebbab0504bd59d643
|
[
"MIT"
] | 12
|
2015-01-24T02:05:39.000Z
|
2016-12-30T07:30:28.000Z
|
__version__ = '0.3.5'
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
setup(name='lettuce_webdriver',
version=__version__,
description='Selenium webdriver extension for lettuce',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
'Topic :: Software Development :: Testing',
],
keywords='web lettuce bdd',
author="Nick Pilon, Ben Bangert",
author_email="npilon@gmail.com, ben@groovie.org",
url="https://github.com/bbangert/lettuce_webdriver/",
license="MIT",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
tests_require = ['lettuce', 'selenium', 'nose'],
install_requires=['lettuce','selenium>=2.30.0'],
test_suite="lettuce_webdriver",
entry_points="""
[console_scripts]
lettuce_webdriver=lettuce_webdriver.parallel_bin:main
"""
)
| 32.27027
| 61
| 0.649079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 535
| 0.448074
|
833c0720b2fa02e3aacf53733cbb5dfadce129a9
| 326
|
py
|
Python
|
project4/network/migrations/0005_remove_post_likers.py
|
mjs375/cs50_Network
|
31a2399f4429931b15721861a2940b57811ae844
|
[
"MIT"
] | null | null | null |
project4/network/migrations/0005_remove_post_likers.py
|
mjs375/cs50_Network
|
31a2399f4429931b15721861a2940b57811ae844
|
[
"MIT"
] | null | null | null |
project4/network/migrations/0005_remove_post_likers.py
|
mjs375/cs50_Network
|
31a2399f4429931b15721861a2940b57811ae844
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.3 on 2020-11-15 16:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('network', '0004_auto_20201111_2224'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='likers',
),
]
| 18.111111
| 47
| 0.588957
| 241
| 0.739264
| 0
| 0
| 0
| 0
| 0
| 0
| 95
| 0.291411
|
833ff2663454d251a149619c7bf5edfd07d118d9
| 942
|
py
|
Python
|
Commands/interested.py
|
hanss314/TheBrainOfTWOWCentral
|
a97d40ebb73904f236d7b3db6ec9f8c3fe999f4e
|
[
"MIT"
] | null | null | null |
Commands/interested.py
|
hanss314/TheBrainOfTWOWCentral
|
a97d40ebb73904f236d7b3db6ec9f8c3fe999f4e
|
[
"MIT"
] | null | null | null |
Commands/interested.py
|
hanss314/TheBrainOfTWOWCentral
|
a97d40ebb73904f236d7b3db6ec9f8c3fe999f4e
|
[
"MIT"
] | null | null | null |
from Config._const import PREFIX
HELP = {
"COOLDOWN": 3,
"MAIN": "Toggles whether or not you have the `Interested in the Bot` role",
"FORMAT": "",
"CHANNEL": 0,
"USAGE": f"""Using `{PREFIX}interested` will add the `Interested in the Bot` to you, or remove it if you already
have it.""".replace("\n", "").replace("\t", "")
}
PERMS = 0 # Member
ALIASES = ["I"]
REQ = ["BOT_ROLE", "TWOW_CENTRAL"]
async def MAIN(message, args, level, perms, BOT_ROLE, TWOW_CENTRAL):
person = TWOW_CENTRAL.get_member(message.author.id)
if BOT_ROLE in person.roles: # If they already have the role...
await person.remove_roles(BOT_ROLE) # remove it.
await message.channel.send(f"<@{message.author.id}>, you no longer have `Interested in the Bot`.")
return
# If they don't have the role yet...
await person.add_roles(BOT_ROLE) # add it.
await message.channel.send(f"**<@{message.author.id}>, you now have `Interested in the Bot`!**")
return
| 34.888889
| 114
| 0.686837
| 0
| 0
| 0
| 0
| 0
| 0
| 538
| 0.571125
| 501
| 0.531847
|
83404f40a03d9276b97c34aee6e5fb4ad81499f8
| 101
|
py
|
Python
|
gen_newsletter.py
|
pnijjar/google-calendar-rss
|
6f4e6b9acbeffcf74112e6b33d99eaf1ea912be4
|
[
"Apache-2.0"
] | 1
|
2021-06-29T04:10:48.000Z
|
2021-06-29T04:10:48.000Z
|
gen_newsletter.py
|
pnijjar/google-calendar-rss
|
6f4e6b9acbeffcf74112e6b33d99eaf1ea912be4
|
[
"Apache-2.0"
] | 1
|
2021-06-29T05:03:36.000Z
|
2021-06-29T05:03:36.000Z
|
gen_newsletter.py
|
pnijjar/google-calendar-rss
|
6f4e6b9acbeffcf74112e6b33d99eaf1ea912be4
|
[
"Apache-2.0"
] | 2
|
2019-08-07T15:33:25.000Z
|
2021-06-29T04:37:21.000Z
|
#!/usr/bin/env python3
from gcal_helpers import helpers
helpers.write_transformation("newsletter")
| 16.833333
| 42
| 0.811881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 0.336634
|
8340e8e017d3e1c1641789fc6d116198178f84f1
| 2,550
|
py
|
Python
|
qiskit/pulse/instructions/delay.py
|
gadial/qiskit-terra
|
0fc83f44a6e80969875c738b2cee7bc33223e45f
|
[
"Apache-2.0"
] | 1
|
2021-10-05T11:56:53.000Z
|
2021-10-05T11:56:53.000Z
|
qiskit/pulse/instructions/delay.py
|
gadial/qiskit-terra
|
0fc83f44a6e80969875c738b2cee7bc33223e45f
|
[
"Apache-2.0"
] | 24
|
2021-01-27T08:20:27.000Z
|
2021-07-06T09:42:28.000Z
|
qiskit/pulse/instructions/delay.py
|
gadial/qiskit-terra
|
0fc83f44a6e80969875c738b2cee7bc33223e45f
|
[
"Apache-2.0"
] | 4
|
2021-10-05T12:07:27.000Z
|
2022-01-28T18:37:28.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""An instruction for blocking time on a channel; useful for scheduling alignment."""
from typing import Optional, Union, Tuple
from qiskit.circuit import ParameterExpression
from qiskit.pulse.channels import Channel
from qiskit.pulse.instructions.instruction import Instruction
class Delay(Instruction):
"""A blocking instruction with no other effect. The delay is used for aligning and scheduling
other instructions.
Example:
To schedule an instruction at time = 10, on a channel assigned to the variable ``channel``,
the following could be used::
sched = Schedule(name="Delay instruction example")
sched += Delay(10, channel)
sched += Gaussian(duration, amp, sigma, channel)
The ``channel`` will output no signal from time=0 up until time=10.
"""
def __init__(self, duration: Union[int, ParameterExpression],
channel: Channel,
name: Optional[str] = None):
"""Create a new delay instruction.
No other instruction may be scheduled within a ``Delay``.
Args:
duration: Length of time of the delay in terms of dt.
channel: The channel that will have the delay.
name: Name of the delay for display purposes.
"""
super().__init__(operands=(duration, channel), name=name)
@property
def channel(self) -> Channel:
"""Return the :py:class:`~qiskit.pulse.channels.Channel` that this instruction is
scheduled on.
"""
return self.operands[1]
@property
def channels(self) -> Tuple[Channel]:
"""Returns the channels that this schedule uses."""
return (self.channel, )
@property
def duration(self) -> Union[int, ParameterExpression]:
"""Duration of this instruction."""
return self.operands[0]
def is_parameterized(self) -> bool:
"""Return ``True`` iff the instruction is parameterized."""
return isinstance(self.duration, ParameterExpression) or super().is_parameterized()
| 35.915493
| 99
| 0.671373
| 1,789
| 0.701569
| 0
| 0
| 486
| 0.190588
| 0
| 0
| 1,642
| 0.643922
|
83419d745e57d76be4f84f2cf4a69352d320b89f
| 738
|
py
|
Python
|
users/urls.py
|
mahmutcankurt/DjangoBlogSite
|
8597bbe7ed066b50e02367a98f0062deb37d251d
|
[
"Apache-2.0"
] | 3
|
2021-01-24T13:14:33.000Z
|
2022-01-25T22:17:59.000Z
|
users/urls.py
|
mahmutcankurt1/staj
|
8597bbe7ed066b50e02367a98f0062deb37d251d
|
[
"Apache-2.0"
] | null | null | null |
users/urls.py
|
mahmutcankurt1/staj
|
8597bbe7ed066b50e02367a98f0062deb37d251d
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls import url
from .views import signupView, activate, account_activation_sent, user_login, user_logout, user_edit_profile, user_change_password
urlpatterns = [
url(r'^register/$', signupView, name='register'),
url(r'^account_activation_sent/$', account_activation_sent, name='account_activation_sent'),
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', activate,
name='activate'),
url(r'^login/$', user_login, name='user_login'),
url(r'^logout/$', user_logout, name='user_logout'),
url(r'^user_edit_profile/$', user_edit_profile, name='user_edit_profile'),
url(r'^change_password/$', user_change_password, name='change_password'),
]
| 43.411765
| 130
| 0.703252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 305
| 0.413279
|
8341a4470393cc4df708339799fbfe8844ec3b50
| 739
|
py
|
Python
|
mosasaurus/chromaticlc/mptest.py
|
zkbt/mosasaurus
|
8ddeaa359adda36e4c48c3c6c476c34fdc09d952
|
[
"MIT"
] | 2
|
2018-08-03T16:22:27.000Z
|
2018-09-03T22:46:31.000Z
|
mosasaurus/chromaticlc/mptest.py
|
zkbt/mosasaurus
|
8ddeaa359adda36e4c48c3c6c476c34fdc09d952
|
[
"MIT"
] | 15
|
2016-11-23T19:59:33.000Z
|
2019-07-10T13:40:40.000Z
|
mosasaurus/chromaticlc/mptest.py
|
zkbt/mosasaurus
|
8ddeaa359adda36e4c48c3c6c476c34fdc09d952
|
[
"MIT"
] | 1
|
2016-12-02T20:53:08.000Z
|
2016-12-02T20:53:08.000Z
|
import TransmissionSpectrum
import multiprocessing
obs = 'wasp94_140805.obs'
ncpu = multiprocessing.cpu_count()
def fastfit(i):
t = TransmissionSpectrum.TransmissionSpectrum(obs)
t.speak('starting fit for bin {0}'.format(i))
t.bins[i].fit(plot=False, slow=False, interactive=False, remake=True)
def slowfit(i):
t = TransmissionSpectrum.TransmissionSpectrum(obs)
t.speak('starting fit for bin {0}'.format(i))
t.bins[i].fit(plot=False, slow=True, interactive=False, nburnin=500, ninference=500)
pool = multiprocessing.Pool(ncpu)
t = TransmissionSpectrum.TransmissionSpectrum(obs)
for i in range(len(t.bins)):
fastfit(i)
#pool.map_async(fastfit, range(len(t.bins)))
#pool.map_async(slowfit, range(len(t.bins)))
| 33.590909
| 88
| 0.741543
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 159
| 0.215156
|
8342f7c7f2effcfa796c1cab9266d9d3d82726f5
| 1,867
|
py
|
Python
|
semeval_filter.py
|
krzysztoffiok/twitter_sentiment_to_usnavy
|
673e01336242348d9aa79e6e9b3385222bcd62d7
|
[
"MIT"
] | 2
|
2021-02-19T11:17:03.000Z
|
2021-11-04T06:30:48.000Z
|
semeval_filter.py
|
krzysztoffiok/twitter_sentiment_to_usnavy
|
673e01336242348d9aa79e6e9b3385222bcd62d7
|
[
"MIT"
] | null | null | null |
semeval_filter.py
|
krzysztoffiok/twitter_sentiment_to_usnavy
|
673e01336242348d9aa79e6e9b3385222bcd62d7
|
[
"MIT"
] | 1
|
2020-05-03T09:10:21.000Z
|
2020-05-03T09:10:21.000Z
|
import pandas as pd
import numpy as np
import datatable as dt
import re
"""
Basic pre-processing of Twitter text from SemEval2017 data set.
"""
# replace repeating characters so that only 2 repeats remain
def repoo(x):
repeat_regexp = re.compile(r'(\S+)(\1{2,})')
repl = r'\2'
return repeat_regexp.sub(repl=r'\2', string=x)
file_names = ["./semeval_data/source_data/semtrain.csv", "./semeval_data/source_data/semtest.csv"]
for file_name in file_names:
df = dt.fread(file_name).to_pandas()
df_sampled = df.copy()
sample_size = len(df_sampled)
# preprocess data
import re
# change all pic.twitter.com to "IMAGE"
df_sampled["text"] = df_sampled["text"].str.replace(
'pic.twitter.com/(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' _IMAGE ', regex=True)
# # get rid of some instances of IMG
df_sampled["text"] = df_sampled["text"].str.replace(
'https://pbs.twimg.com/media/(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', 'IMAGE ',
regex=True)
# get rid of some instances of https://twitter.com -> to RETWEET
df_sampled["text"] = df_sampled["text"].str.replace(
'https://twitter.com(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' _RETWEET ',
regex=True)
# change all URLS to "URL"
df_sampled["text"] = df_sampled["text"].str.replace(
'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' _URL ', regex=True)
# get rid of character repeats
for i in range(10):
df_sampled["text"] = df_sampled["text"].map(lambda x: repoo(str(x)))
# get rid of endline signs
df_sampled["text"] = df_sampled["text"].str.replace("\n", "")
# save to file the sampled DF
df_sampled[["sentiment", "text"]].to_csv(f"{file_name[:-4]}_filtered.csv")
| 34.574074
| 119
| 0.591859
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,013
| 0.542582
|
8343385a22dd30ea40482bf144f766b74f99b606
| 6,969
|
py
|
Python
|
tutorials/rhythm/plot_SlidingWindowMatching.py
|
bcmartinb/neurodsp
|
36d8506f3bd916f83b093a62843ffb77647a6e1e
|
[
"Apache-2.0"
] | 154
|
2019-01-30T04:10:48.000Z
|
2022-03-30T12:55:00.000Z
|
tutorials/rhythm/plot_SlidingWindowMatching.py
|
bcmartinb/neurodsp
|
36d8506f3bd916f83b093a62843ffb77647a6e1e
|
[
"Apache-2.0"
] | 159
|
2019-01-28T22:49:36.000Z
|
2022-03-17T16:42:48.000Z
|
tutorials/rhythm/plot_SlidingWindowMatching.py
|
bcmartinb/neurodsp
|
36d8506f3bd916f83b093a62843ffb77647a6e1e
|
[
"Apache-2.0"
] | 42
|
2019-05-31T21:06:44.000Z
|
2022-03-25T23:17:57.000Z
|
"""
Sliding Window Matching
=======================
Find recurring patterns in neural signals using Sliding Window Matching.
This tutorial primarily covers the :func:`~.sliding_window_matching` function.
"""
###################################################################################################
# Overview
# --------
#
# Non-periodic or non-sinusoidal properties can be difficult to assess in frequency domain
# methods. To try and address this, the sliding window matching (SWM) algorithm has been
# proposed for detecting and measuring recurring, but unknown, patterns in time series data.
# Patterns of interest may be transient events, and/or the waveform shape of neural oscillations.
#
# In this example, we will explore applying the SWM algorithm to some LFP data.
#
# The SWM approach tries to find recurring patterns (or motifs) in the data, using sliding
# windows. An iterative process samples window randomly, and compares each to the average
# window. The goal is to find a selection of windows that look maximally like the average
# window, at which point the occurrences of the window have been detected, and the average
# window pattern can be examined.
#
# The sliding window matching algorithm is described in
# `Gips et al, 2017 <https://doi.org/10.1016/j.jneumeth.2016.11.001>`_
#
###################################################################################################
# sphinx_gallery_thumbnail_number = 2
import numpy as np
# Import the sliding window matching function
from neurodsp.rhythm import sliding_window_matching
# Import utilities for loading and plotting data
from neurodsp.utils.download import load_ndsp_data
from neurodsp.plts.rhythm import plot_swm_pattern
from neurodsp.plts.time_series import plot_time_series
from neurodsp.utils import set_random_seed, create_times
from neurodsp.utils.norm import normalize_sig
###################################################################################################
# Set random seed, for reproducibility
set_random_seed(0)
###################################################################################################
# Load neural signal
# ------------------
#
# First, we will load a segment of ECoG data, as an example time series.
#
###################################################################################################
# Download, if needed, and load example data files
sig = load_ndsp_data('sample_data_1.npy', folder='data')
sig = normalize_sig(sig, mean=0, variance=1)
# Set sampling rate, and create a times vector for plotting
fs = 1000
times = create_times(len(sig)/fs, fs)
###################################################################################################
#
# Next, we can visualize this data segment. As we can see this segment of data has
# some prominent bursts of oscillations, in this case, in the beta frequency.
#
###################################################################################################
# Plot example signal
plot_time_series(times, sig)
###################################################################################################
# Apply sliding window matching
# -----------------------------
#
# The beta oscillation in our data segment looks like it might have some non-sinusoidal
# properties. We can investigate this with sliding window matching.
#
# Sliding window matching can be applied with the
# :func:`~.sliding_window_matching` function.
#
###################################################################################################
# Data Preprocessing
# ~~~~~~~~~~~~~~~~~~
#
# Typically, the input signal does not have to be filtered into a band of interest to use SWM.
#
# If the goal is to characterize non-sinusoidal rhythms, you typically won't want to
# apply a filter that will smooth out the features of interest.
#
# However, if the goal is to characterize higher frequency activity, it can be useful to
# apply a highpass filter, so that the method does not converge on a lower frequency motif.
#
# In our case, the beta rhythm of interest is the most prominent, low frequency, feature of the
# data, so we won't apply a filter.
#
###################################################################################################
# Algorithm Settings
# ~~~~~~~~~~~~~~~~~~
#
# The SWM algorithm has some algorithm specific settings that need to be applied, including:
#
# - `win_len` : the length of the window, defined in seconds
# - `win_spacing` : the minimum distance between windows, also defined in seconds
#
# The length of the window influences the patterns that are extracted from the data.
# Typically, you want to set the window length to match the expected timescale of the
# patterns under study.
#
# For our purposes, we will define the window length to be about 1 cycle of a beta oscillation,
# which should help the algorithm to find the waveform shape of the neural oscillation.
#
###################################################################################################
# Define window length & minimum window spacing, both in seconds
win_len = .055
win_spacing = .055
###################################################################################################
# Apply the sliding window matching algorithm to the time series
windows, window_starts = sliding_window_matching(sig, fs, win_len, win_spacing, var_thresh=.5)
###################################################################################################
# Examine the Results
# ~~~~~~~~~~~~~~~~~~~
#
# What we got back from the SWM function are the calculate average window, the list
# of indices in the data of the windows, and the calculated costs for each iteration of
# the algorithm run.
#
# In order to visualize the resulting pattern, we can use
# :func:`~.plot_swm_pattern`.
#
###################################################################################################
# Compute the average window
avg_window = np.mean(windows, 0)
# Plot the discovered pattern
plot_swm_pattern(avg_window)
###################################################################################################
#
# In the above average pattern, that looks to capture a beta rhythm, we can notice some
# waveform shape of the extracted rhythm.
#
###################################################################################################
# Concluding Notes
# ~~~~~~~~~~~~~~~~
#
# One thing to keep in mind is that the SWM algorithm includes a random element of sampling
# and comparing the windows - meaning it is not deterministic. Because of this, results
# can change with different random seeds.
#
# To explore this, go back and change the random seed, and see how the output changes.
#
# You can also set the number of iterations that the algorithm sweeps through. Increasing
# the number of iterations, and using longer data segments, can help improve the robustness
# of the algorithm results.
#
| 39.822857
| 99
| 0.578275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,126
| 0.879036
|
8343d14fcff75c3593b87cced0b3013a8661f9e3
| 719
|
py
|
Python
|
forge/auth/backends.py
|
django-forge/forge
|
6223d2a4e7a570dfba87c3ae2e14927010fe7fd9
|
[
"MIT"
] | 3
|
2022-03-30T22:14:35.000Z
|
2022-03-31T22:04:42.000Z
|
forge/auth/backends.py
|
django-forge/forge
|
6223d2a4e7a570dfba87c3ae2e14927010fe7fd9
|
[
"MIT"
] | null | null | null |
forge/auth/backends.py
|
django-forge/forge
|
6223d2a4e7a570dfba87c3ae2e14927010fe7fd9
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
UserModel = get_user_model()
class EmailModelBackend(ModelBackend):
def authenticate(self, request, username=None, password=None, **kwargs):
if username is None:
email = kwargs.get(UserModel.EMAIL_FIELD)
else:
email = username
email = UserModel._default_manager.normalize_email(email)
try:
user = UserModel.objects.get(email=email)
except UserModel.DoesNotExist:
return None
else:
if user.check_password(password) and self.user_can_authenticate(user):
return user
return None
| 28.76
| 82
| 0.66064
| 585
| 0.81363
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
83441a6b6c5d79e325330fcd2de68468db5ae8e3
| 8,923
|
py
|
Python
|
Macro/WorkFeature/Utils/WF_curve.py
|
myao9494/FreeCAD_Factory
|
6bf3209f2295d306d4c2c8c2ded25839c837e869
|
[
"MIT"
] | null | null | null |
Macro/WorkFeature/Utils/WF_curve.py
|
myao9494/FreeCAD_Factory
|
6bf3209f2295d306d4c2c8c2ded25839c837e869
|
[
"MIT"
] | null | null | null |
Macro/WorkFeature/Utils/WF_curve.py
|
myao9494/FreeCAD_Factory
|
6bf3209f2295d306d4c2c8c2ded25839c837e869
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 1 06:59:10 2016
@author: laurent
"""
from __future__ import division
from math import factorial
# Pascal's triangle
p_t = [ [1], # n=0
[1,1], # n=1
[1,2,1], # n=2
[1,3,3,1], # n=3
[1,4,6,4,1], # n=4
[1,5,10,10,5,1], # n=5
[1,6,15,20,15,6,1]] # n=6
#==============================================================================
# binomial(n,k):
# while(n >= lut.length):
# s = lut.length
# nextRow = new array(size=s+1)
# nextRow[0] = 1
# for(i=1, prev=s-1; i<prev; i++):
# nextRow[i] = lut[prev][i-1] + lut[prev][i]
# nextRow[s] = 1
# lut.add(nextRow)
# return lut[n][k]
#==============================================================================
def binomial(n, i):
""" return binomial terms from Pascal triangle from predefined list or
calculate the terms if not already in the list.
"""
global p_t
m_l = len(p_t)
while n >= m_l:
m_next_row = []
m_next_row.append(1)
for m_i in range(1,m_l):
m_next_row.append(p_t[m_l-1][m_i-1]+p_t[m_l-1][m_i])
m_next_row.append(1)
# print m_next_row
p_t.append(m_next_row)
m_l = len(p_t)
return p_t[n][i]
def binomial_term(n, i):
""" binomial coefficient = n! / (i!(n - i)!)
"""
return factorial(n) / (factorial(i) * factorial(n - i))
#==============================================================================
# function Bezier(n,t):
# sum = 0
# for(k=0; k<n; k++):
# sum += n!/(k!*(n-k)!) * (1-t)^(n-k) * t^(k)
# return sum
#==============================================================================
def bezier_base(n, t):
""" Basis Bezier function.
"""
m_sum = 0.
m_C = binomial_term
for i in range(n):
m_sum += m_C(n, i) * (1 - t)**(n - i) * t**i
return m_sum
#==============================================================================
# function Bezier(2,t):
# t2 = t * t
# mt = 1-t
# mt2 = mt * mt
# return mt2 + 2*mt*t + t2
#==============================================================================
def bezier_quadratic_terms(t):
""" Simplified Bezier quadratic curve.
Return 3 terms in list ()
"""
m_terms = list()
# n=2 i=0
# m_C(n, i) * (1 - t)**(n - i) * t**i
# m_C(2, 0) * (1 - t)**(2 - 0) * t**0
# 1 * (1 - t)*(1 - t) * 1
m_terms.append((1 - t)*(1 - t))
# n=2 i=1
# m_C(n, i) * (1 - t)**(n - i) * t**i
# m_C(2, 1) * (1 - t)**(2 - 1) * t**1
# 2 * (1 - t) * t
m_terms.append(2 * (1 - t) * t)
m_terms.append(t*t)
return m_terms
#==============================================================================
# function Bezier(3,t):
# t2 = t * t
# t3 = t2 * t
# mt = 1-t
# mt2 = mt * mt
# mt3 = mt2 * mt
# return mt3 + 3*mt2*t + 3*mt*t2 + t3
#==============================================================================
def bezier_cubic_terms(t):
""" Simplified Bezier cubic curve.
Return 4 terms in list ()
"""
m_terms = list()
# n=3 i=0
# m_C(n, i) * (1 - t)**(n - i) * t**i
# m_C(3, 0) * (1 - t)**(3 - 0) * t**0
# (1 - t)*(1 - t)*(1 - t)
m_terms.append((1 - t)*(1 - t)*(1 - t))
# n=3 i=1
# m_C(n, i) * (1 - t)**(n - i) * t**i
# m_C(3, 1) * (1 - t)**(3 - 1) * t**1
# 3 * (1 - t)*(1 - t) * t
m_terms.append(3 * (1 - t)*(1 - t) * t)
# n=3 i=2
# m_C(n, i) * (1 - t)**(n - i) * t**i
# m_C(3, 2) * (1 - t)**(3 - 2) * t**2
# 3 * (1 - t) * t * t
m_terms.append(3 * (1 - t) * t * t)
m_terms.append(t * t * t)
return m_terms
def bezier_terms(n, t):
""" Bezier curve.
Return n+1 terms in list ()
"""
m_terms = list()
m_C = binomial_term
for i in range(n):
m_terms.append( m_C(n, i) * (1 - t)**(n - i) * t**i )
m_terms.append(t ** n)
return m_terms
#==============================================================================
# function Bezier(n,t,w[]):
# sum = 0
# for(k=0; k<n; k++):
# sum += w[k] * binomial(n,k) * (1-t)^(n-k) * t^(k)
# return sum
#==============================================================================
def bezier_curve(n, t, weigths):
""" Basis Bezier function.
"""
m_sum = 0.
m_C = binomial_term
for i,w in zip(range(n+1),weigths):
m_sum += m_C(n, i) * (1 - t)**(n - i) * t**i * w
return m_sum
#==============================================================================
# function Bezier(2,t,w[]):
# t2 = t * t
# mt = 1-t
# mt2 = mt * mt
# return w[0]*mt2 + w[1]*2*mt*t + w[2]*t2
#==============================================================================
def bezier_quadratic_curve(t, weigths):
if len(weigths) != 3:
return None
t2 = t * t
mt = 1-t
mt2 = mt * mt
return weigths[0]*mt2 + weigths[1]*2*mt*t + weigths[2]*t2
#==============================================================================
# function Bezier(3,t,w[]):
# t2 = t * t
# t3 = t2 * t
# mt = 1-t
# mt2 = mt * mt
# mt3 = mt2 * mt
# return w[0]*mt3 + 3*w[1]*mt2*t + 3*w[2]*mt*t2 + w[3]*t3
#==============================================================================
def bezier_cubic_curve(t, weigths):
if len(weigths) != 4:
return None
t2 = t * t
t3 = t2 * t
mt = 1-t
mt2 = mt * mt
mt3 = mt2 * mt
return weigths[0]*mt3 + weigths[1]*3*mt2*t + weigths[2]*3*mt*t2 + weigths[3]*t3
class Bezier():
""" bezier curve object
points : list of control points
points = [(-1,-1,0.0),(0,3,0.0)]
"""
def __init__(self, points):
if (None in [points]) :
print "\nERROR in : bezier.__init__"
print "'points' not defined !"
return None
n = len(t)
pass
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
t = np.arange(0.0, 1.0, 0.01)
b1 = bezier_base(1, t)
plt.plot(t, b1)
b2 = bezier_base(2, t)
plt.plot(t, b2)
b3 = bezier_base(3, t)
plt.plot(t, b3)
plt.xlabel('t values')
plt.ylabel('')
plt.title('Bezier basis functions : b1(blue), b2(green) and b3(red)')
plt.grid(True)
plt.show()
# print str(binomial(0, 0))
# print str(binomial(1, 0)),
# print str(binomial(1, 1))
print ("Pascal's triangle :")
for j in range(0,10):
for i in range(0,j+1):
print str(binomial(j, i)),
print ""
# m_points = [(-1,-1,0.0),(0,3,0.0)]
# bz=Bezier(m_points) t = np.arange(0.0, 1.0, 0.01)
t = np.arange(0.0, 1.0, 0.01)
b12,b22,b32 = bezier_quadratic_terms(t)
plt.plot(t, b12)
plt.plot(t, b22)
plt.plot(t, b32)
plt.xlabel('t values')
plt.ylabel('')
plt.title('Bezier basis functions terms : quadratic')
plt.grid(True)
plt.show()
t = np.arange(0.0, 1.0, 0.01)
b13,b23,b33,b43 = bezier_cubic_terms(t)
plt.plot(t, b13)
plt.plot(t, b23)
plt.plot(t, b33)
plt.plot(t, b43)
plt.title('Bezier basis functions terms : cubic')
plt.show()
t = np.arange(0.0, 1.0, 0.01)
m_terms = list()
m_terms = bezier_terms(15,t)
for term in m_terms:
plt.plot(t, term)
plt.title('Bezier basis functions terms : 15')
plt.show()
pt1 = (120,160)
pt2 = (35,200)
pt3 = (220,260)
pt4 = (220,40)
x = (120,35,220,220)
y = (160,200,260,40)
t = np.arange(0.0, 1.0, 0.01)
m_dim = len(x)-1
m_Xs = bezier_curve(m_dim, t, x)
m_Xs = bezier_cubic_curve(t, x)
plt.plot(t, m_Xs)
plt.title('Bezier curve : X')
plt.show()
m_dim = len(y)-1
m_Ys = bezier_curve(m_dim, t, y)
m_Ys = bezier_cubic_curve(t, y)
plt.plot(t, m_Ys)
plt.title('Bezier curve : Y')
plt.show()
plt.plot(m_Xs, m_Ys)
plt.plot(x, y, 'o-')
plt.show()
t = np.arange(-0.2, 1.1, 0.01)
m_Xs = bezier_curve(m_dim, t, x)
m_Ys = bezier_curve(m_dim, t, y)
plt.plot(m_Xs, m_Ys)
plt.plot(x, y, 'o-')
plt.show()
#==============================================================================
# import matplotlib as mpl
# from mpl_toolkits.mplot3d import Axes3D
# import numpy as np
# import matplotlib.pyplot as plt
#
# mpl.rcParams['legend.fontsize'] = 10
#
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# theta = np.linspace(-4 * np.pi, 4 * np.pi, 100)
# z = np.linspace(-2, 2, 100)
# r = z**2 + 1
# x = r * np.sin(theta)
# y = r * np.cos(theta)
# ax.plot(x, y, z, label='parametric curve')
# ax.legend()
#
# plt.show()
#==============================================================================
| 27.625387
| 87
| 0.420262
| 350
| 0.039224
| 0
| 0
| 0
| 0
| 0
| 0
| 4,557
| 0.510703
|
834811bba2b38dd1f90f60e0f432be19f153a845
| 1,428
|
py
|
Python
|
LeetCode/z_arrange.py
|
Max-PJB/python-learning2
|
e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd
|
[
"MIT"
] | null | null | null |
LeetCode/z_arrange.py
|
Max-PJB/python-learning2
|
e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd
|
[
"MIT"
] | null | null | null |
LeetCode/z_arrange.py
|
Max-PJB/python-learning2
|
e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : pengj
@ date : 2018/11/1 19:03
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : pengjianbiao@hotmail.com
-------------------------------------------------
Description :
将字符串 "PAYPALISHIRING" 以Z字形排列成给定的行数:
P A H N
A P L S I I G
Y I R
之后从左往右,逐行读取字符:"PAHNAPLSIIGYIR"
实现一个将字符串进行指定行数变换的函数:
string convert(string s, int numRows);
示例 1:
输入: s = "PAYPALISHIRING", numRows = 3
输出: "PAHNAPLSIIGYIR"
示例 2:
输入: s = "PAYPALISHIRING", numRows = 4
输出: "PINALSIGYAHRPI"
解释:
P I N
A L S I G
Y A H R
P I
-------------------------------------------------
"""
import time
__author__ = 'Max_Pengjb'
start = time.time()
# 下面写上代码块
def z_arrange(s, numrows):
if numrows < 2:
return s
res = [[] for _ in range(numrows)]
circle = numrows * 2 - 2
length = len(s)
for i in range(length):
t = i % circle if i % circle < numrows else circle - i % circle
# if t < numrows:
# res[t].append(s[i])
# else:
# t = circle - t
res[t].append(s[i])
print(res)
return ''.join(map(lambda x: ''.join(x), res))
ss = "PAYPALISHIRING"
print(z_arrange(ss, 4))
# 上面中间写上代码块
end = time.time()
print('Running time: %s Seconds' % (end - start))
| 20.112676
| 71
| 0.497199
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,080
| 0.680958
|
8348305c9172017dde4aba4393d6db7827e9ab1f
| 970
|
py
|
Python
|
old/INSTADOWNLOAD.py
|
Nibba2018/INSTADOWNLOAD
|
4f4b831df14d2cfdcb2cf91e3710576432bc4845
|
[
"MIT"
] | 1
|
2019-08-12T06:24:17.000Z
|
2019-08-12T06:24:17.000Z
|
old/INSTADOWNLOAD.py
|
Nibba2018/INSTADOWNLOAD
|
4f4b831df14d2cfdcb2cf91e3710576432bc4845
|
[
"MIT"
] | 2
|
2019-08-12T05:29:57.000Z
|
2019-08-12T10:18:24.000Z
|
old/INSTADOWNLOAD.py
|
tre3x/INSTADOWNLOAD
|
c8bd6f12a0abfcbac4fdeeb2994ba75067ca592d
|
[
"MIT"
] | 1
|
2019-08-12T10:02:14.000Z
|
2019-08-12T10:02:14.000Z
|
import sys
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QApplication,QDialog
from PyQt5.uic import loadUi
import requests
import urllib.request
from selenium import webdriver
class INSTADOWNLOAD(QDialog):
def __init__(self):
super(INSTADOWNLOAD,self).__init__()
loadUi('instadownload.ui',self)
self.setWindowTitle('INSTADOWNLOAD')
self.pushButton.clicked.connect(self.on_pushButton_clicked)
@pyqtSlot()
def on_pushButton_clicked(self):
driver = webdriver.Firefox()
driver.get(self.lineEdit.text())
get_div = driver.find_element_by_class_name('FFVAD')
photolink = get_div.get_attribute('src')
print(photolink)
urllib.request.urlretrieve(photolink, 'INSTAPHOTO.jpg')
self.label_3.setTest('Your download link is:', photolink)
app=QApplication(sys.argv)
widget=INSTADOWNLOAD()
widget.show()
sys.exit(app.exec_())
| 30.3125
| 68
| 0.694845
| 661
| 0.681443
| 0
| 0
| 399
| 0.41134
| 0
| 0
| 85
| 0.087629
|
83499ec97a8ebaba9f0df370c50f48f1b192aa91
| 719
|
py
|
Python
|
ved/migrations/0010_auto_20180303_1353.py
|
mjovanc/tidlundsved
|
da55a07d02f04bc636299fe4d236aa19188a359b
|
[
"MIT"
] | 1
|
2019-04-19T20:39:39.000Z
|
2019-04-19T20:39:39.000Z
|
ved/migrations/0010_auto_20180303_1353.py
|
mjovanc/tidlundsved
|
da55a07d02f04bc636299fe4d236aa19188a359b
|
[
"MIT"
] | 3
|
2020-01-15T22:21:14.000Z
|
2020-01-15T22:21:15.000Z
|
ved/migrations/0010_auto_20180303_1353.py
|
mjovanc/tidlundsved
|
da55a07d02f04bc636299fe4d236aa19188a359b
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.2 on 2018-03-03 13:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ved', '0009_auto_20180302_1839'),
]
operations = [
migrations.AlterField(
model_name='order',
name='firewood_choice',
field=models.CharField(max_length=50, verbose_name='Val'),
),
migrations.AlterField(
model_name='order',
name='order_status',
field=models.CharField(choices=[('Ej påbörjad', 'Ej påbörjad'), ('Påbörjad', 'Påbörjad'), ('Levererad', 'Levererad')], default='Ej påbörjad', max_length=30, verbose_name='Status på order'),
),
]
| 29.958333
| 201
| 0.606398
| 637
| 0.872603
| 0
| 0
| 0
| 0
| 0
| 0
| 236
| 0.323288
|
834ad9cbfb170166d5394332db47b29bcb81eb73
| 163
|
py
|
Python
|
examples/plot_kde_2d.py
|
awesome-archive/arviz
|
e11432bc065d0b2280f27c901beb4ac9fc5c5dba
|
[
"Apache-2.0"
] | 2
|
2018-12-01T03:41:54.000Z
|
2018-12-01T22:04:59.000Z
|
examples/plot_kde_2d.py
|
awesome-archive/arviz
|
e11432bc065d0b2280f27c901beb4ac9fc5c5dba
|
[
"Apache-2.0"
] | null | null | null |
examples/plot_kde_2d.py
|
awesome-archive/arviz
|
e11432bc065d0b2280f27c901beb4ac9fc5c5dba
|
[
"Apache-2.0"
] | 1
|
2020-10-16T12:57:48.000Z
|
2020-10-16T12:57:48.000Z
|
"""
2d KDE
======
_thumb: .1, .8
"""
import arviz as az
import numpy as np
az.style.use('arviz-darkgrid')
az.plot_kde(np.random.rand(100), np.random.rand(100))
| 12.538462
| 53
| 0.650307
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 53
| 0.325153
|
834c8fddbb55c2d6f805fb0cea2ee12883df1ec1
| 331
|
py
|
Python
|
debug/read_depth_from_exr_file.py
|
ccj5351/hmr_rgbd
|
d1dcf81d72c11e1f502f2c494cd86425f384d9cc
|
[
"MIT"
] | null | null | null |
debug/read_depth_from_exr_file.py
|
ccj5351/hmr_rgbd
|
d1dcf81d72c11e1f502f2c494cd86425f384d9cc
|
[
"MIT"
] | 1
|
2020-12-09T07:29:00.000Z
|
2020-12-09T07:29:00.000Z
|
debug/read_depth_from_exr_file.py
|
ccj5351/hmr_rgbd
|
d1dcf81d72c11e1f502f2c494cd86425f384d9cc
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python3
# -*-coding:utf-8-*-
# @file: read_depth_from_exr_file.py
# @brief:
# @author: Changjiang Cai, ccai1@stevens.edu, caicj5351@gmail.com
# @version: 0.0.1
# @creation date: 10-06-2019
# @last modified: Mon 10 Jun 2019 06:18:44 PM EDT
import cv2
dep = cv2.imread("0.exr",-1) # "-1" means any depth or channel;
| 27.583333
| 65
| 0.682779
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 288
| 0.870091
|
834cfa268aa66defcd3a6263fa4f402f1287f7c1
| 2,732
|
py
|
Python
|
azplugins/test-py/test_analyze_group_velocity.py
|
mphoward/azplugins
|
a4e3f92090dea78645b4e84cda96709cc9372ffa
|
[
"BSD-3-Clause"
] | 10
|
2019-02-27T16:13:33.000Z
|
2020-02-21T01:07:08.000Z
|
azplugins/test-py/test_analyze_group_velocity.py
|
mphoward/azplugins
|
a4e3f92090dea78645b4e84cda96709cc9372ffa
|
[
"BSD-3-Clause"
] | 18
|
2019-02-26T17:22:15.000Z
|
2020-04-22T20:20:43.000Z
|
azplugins/test-py/test_analyze_group_velocity.py
|
mphoward/azplugins
|
a4e3f92090dea78645b4e84cda96709cc9372ffa
|
[
"BSD-3-Clause"
] | 3
|
2019-06-18T18:15:42.000Z
|
2020-02-21T01:07:16.000Z
|
# Copyright (c) 2018-2020, Michael P. Howard
# Copyright (c) 2021-2022, Auburn University
# This file is part of the azplugins project, released under the Modified BSD License.
import hoomd
from hoomd import md
hoomd.context.initialize()
try:
from hoomd import azplugins
except ImportError:
import azplugins
import unittest
import numpy as np
class analyze_group_velocity_tests(unittest.TestCase):
def setUp(self):
snap = hoomd.data.make_snapshot(N=2, box=hoomd.data.boxdim(Lx=10,Ly=10,Lz=10), particle_types=['A','B'])
if hoomd.comm.get_rank() == 0:
snap.particles.position[:] = [[0,0,-5],[0,0,5]]
snap.particles.velocity[:] = [[1,-2,3],[-2,4,-6]]
snap.particles.typeid[:] = [0,1]
snap.particles.mass[:] = [1,2]
self.s = hoomd.init.read_snapshot(snap)
def test_compute_all(self):
all_ = hoomd.group.all()
azplugins.analyze.group_velocity(group=all_)
log = hoomd.analyze.log(filename=None, quantities=['vx_all','vy_all','vz_all'], period=1)
hoomd.run(1)
v = [log.query('vx_all'), log.query('vy_all'), log.query('vz_all')]
np.testing.assert_allclose(v, [-1,2,-3])
def test_compute_subset(self):
typeA = hoomd.group.type('A',name='A')
azplugins.analyze.group_velocity(group=typeA)
log = hoomd.analyze.log(filename=None, quantities=['vx_A','vy_A','vz_A'], period=1)
hoomd.run(1)
v = [log.query('vx_A'), log.query('vy_A'), log.query('vz_A')]
np.testing.assert_allclose(v, [1,-2,3])
def test_compute_empty_group(self):
notatype = hoomd.group.type('C')
azplugins.analyze.group_velocity(group=notatype)
log = hoomd.analyze.log(filename=None, quantities=['vx_C','vy_C','vz_C'], period=1)
hoomd.run(1)
v = [log.query('vx_C'), log.query('vy_C'), log.query('vz_C')]
np.testing.assert_allclose(v, [0,0,0])
def test_compute_suffix(self):
typeB = hoomd.group.type('B',name='B')
azplugins.analyze.group_velocity(group=typeB,suffix='_foo')
log = hoomd.analyze.log(filename=None, quantities=['vx_foo','vy_foo','vz_foo'], period=1)
hoomd.run(1)
v = [log.query('vx_foo'), log.query('vy_foo'), log.query('vz_foo')]
np.testing.assert_allclose(v, [-2,4,-6])
def test_unique_suffix(self):
all_ = hoomd.group.all()
azplugins.analyze.group_velocity(group=all_,suffix='_1')
with self.assertRaises(ValueError):
azplugins.analyze.group_velocity(group=all_,suffix='_1')
def tearDown(self):
del self.s
hoomd.context.initialize()
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| 39.028571
| 112
| 0.63287
| 2,306
| 0.84407
| 0
| 0
| 0
| 0
| 0
| 0
| 400
| 0.146413
|
83516db70951f52393c19b6cbc942b802e4f1c1e
| 1,310
|
py
|
Python
|
tests/test_settings.py
|
jpadilla/apistar
|
3e0faafd0d6a7c59e2b7a1e3017e15d005c5cc3a
|
[
"BSD-3-Clause"
] | 1
|
2021-07-07T13:14:20.000Z
|
2021-07-07T13:14:20.000Z
|
tests/test_settings.py
|
jpadilla/apistar
|
3e0faafd0d6a7c59e2b7a1e3017e15d005c5cc3a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_settings.py
|
jpadilla/apistar
|
3e0faafd0d6a7c59e2b7a1e3017e15d005c5cc3a
|
[
"BSD-3-Clause"
] | null | null | null |
from apistar import App, Route, TestClient
from apistar.settings import Setting, Settings
def get_settings(settings: Settings):
return settings
def get_setting(ABC: Setting):
return {'ABC': ABC}
routes = [
Route('/settings/', 'GET', get_settings),
Route('/setting/', 'GET', get_setting),
]
settings = {
'ABC': 123,
'XYZ': 456
}
app = App(routes=routes, settings=settings)
client = TestClient(app)
def test_settings():
response = client.get('/settings/')
assert response.status_code == 200
assert response.json() == {
'ABC': 123,
'XYZ': 456
}
def test_setting():
response = client.get('/setting/')
assert response.status_code == 200
assert response.json() == {
'ABC': 123,
}
def test_use_setting_as_argument():
abc = Setting(789)
assert get_setting(abc) == {'ABC': 789}
def test_settings_lookup():
settings = Settings(
ABC=123,
DEF={'XYZ': 456}
)
assert settings.get('ABC') == 123
assert settings.get(['DEF']) == {'XYZ': 456}
assert settings.get(['DEF', 'XYZ']) == 456
assert settings.get('missing') is None
assert settings.get(['ABC', 'missing']) is None
assert settings.get(['DEF', 'missing']) is None
assert settings.get(['DEF', 'missing'], '') == ''
| 21.129032
| 53
| 0.61145
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 176
| 0.134351
|
8351e3e4666f0e2916bbcd985c19442107e57895
| 1,825
|
py
|
Python
|
api/views/reminder_views.py
|
OlegKlimenko/Plamber
|
a3536b864d05abb6b6bba0f2971ab4b7b9c60db6
|
[
"Apache-2.0"
] | 13
|
2017-03-30T12:19:35.000Z
|
2019-12-09T03:15:22.000Z
|
api/views/reminder_views.py
|
OlegKlimenko/Plamber
|
a3536b864d05abb6b6bba0f2971ab4b7b9c60db6
|
[
"Apache-2.0"
] | 213
|
2017-02-18T11:48:40.000Z
|
2022-03-11T23:20:36.000Z
|
api/views/reminder_views.py
|
OlegKlimenko/Plamber
|
a3536b864d05abb6b6bba0f2971ab4b7b9c60db6
|
[
"Apache-2.0"
] | 3
|
2018-06-17T11:54:49.000Z
|
2019-10-22T16:19:28.000Z
|
# -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from ..serializers.request_serializers import GetReminderRequest, UpdateReminderRequest
from ..utils import invalid_data_response, validate_api_secret_key
from app.models import TheUser
# ----------------------------------------------------------------------------------------------------------------------
@api_view(['POST'])
def get_reminders(request):
"""
Returns the reminders status.
"""
validate_api_secret_key(request.data.get('app_key'))
request_serializer = GetReminderRequest(data=request.data)
if request_serializer.is_valid():
the_user = get_object_or_404(TheUser, auth_token=request.data.get('user_token'))
return Response({'detail': 'successful',
'data': the_user.get_api_reminders()},
status=status.HTTP_200_OK)
else:
return invalid_data_response(request_serializer)
# ----------------------------------------------------------------------------------------------------------------------
@api_view(['POST'])
def update_reminder(request):
"""
Changes the status of the reminder.
"""
validate_api_secret_key(request.data.get('app_key'))
request_serializer = UpdateReminderRequest(data=request.data)
if request_serializer.is_valid():
the_user = get_object_or_404(TheUser, auth_token=request.data.get('user_token'))
the_user.update_reminder(request.data.get('field'), request.data.get('value'))
return Response({'detail': 'successful'},
status=status.HTTP_200_OK)
else:
return invalid_data_response(request_serializer)
| 35.096154
| 120
| 0.61863
| 0
| 0
| 0
| 0
| 1,191
| 0.652603
| 0
| 0
| 473
| 0.259178
|
8352e7131c0b9ae8dac198efa815d926f7e58c34
| 2,815
|
py
|
Python
|
anaf/documents/migrations/0001_initial.py
|
tovmeod/anaf
|
80e4a00532ce6f4ce76c5ffc858ff90c759a9879
|
[
"BSD-3-Clause"
] | 2
|
2016-03-15T13:17:26.000Z
|
2017-03-22T15:39:01.000Z
|
anaf/documents/migrations/0001_initial.py
|
tovmeod/anaf
|
80e4a00532ce6f4ce76c5ffc858ff90c759a9879
|
[
"BSD-3-Clause"
] | 4
|
2021-03-19T21:42:58.000Z
|
2022-03-11T23:13:07.000Z
|
anaf/documents/migrations/0001_initial.py
|
tovmeod/anaf
|
80e4a00532ce6f4ce76c5ffc858ff90c759a9879
|
[
"BSD-3-Clause"
] | 4
|
2016-08-31T16:55:41.000Z
|
2020-04-22T18:48:54.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import anaf.documents.models
import anaf.documents.files
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('object_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='core.Object')),
('title', models.CharField(max_length=255)),
('body', models.TextField(null=True, blank=True)),
],
options={
'ordering': ['-last_updated'],
},
bases=('core.object',),
),
migrations.CreateModel(
name='File',
fields=[
('object_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='core.Object')),
('name', models.CharField(max_length=255)),
('content', models.FileField(storage=anaf.documents.files.FileStorage(), upload_to=anaf.documents.models.generate_filename)),
],
options={
'ordering': ['-last_updated'],
},
bases=('core.object',),
),
migrations.CreateModel(
name='Folder',
fields=[
('object_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='core.Object')),
('name', models.CharField(max_length=255)),
('parent', models.ForeignKey(related_name='child_set', blank=True, to='documents.Folder', null=True)),
],
options={
},
bases=('core.object',),
),
migrations.CreateModel(
name='WebLink',
fields=[
('object_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='core.Object')),
('title', models.CharField(max_length=255)),
('url', models.CharField(max_length=255)),
('folder', models.ForeignKey(to='documents.Folder')),
],
options={
'ordering': ['-last_updated'],
},
bases=('core.object',),
),
migrations.AddField(
model_name='file',
name='folder',
field=models.ForeignKey(to='documents.Folder'),
preserve_default=True,
),
migrations.AddField(
model_name='document',
name='folder',
field=models.ForeignKey(to='documents.Folder'),
preserve_default=True,
),
]
| 36.558442
| 143
| 0.54032
| 2,649
| 0.94103
| 0
| 0
| 0
| 0
| 0
| 0
| 480
| 0.170515
|
83538fea40955920ad2d4b405a77ec50fa91b2b3
| 8,188
|
py
|
Python
|
mbpo/models/utils.py
|
anyboby/ConstrainedMBPO
|
036f4ffefc464e676a287c35c92cc5c0b8925fcf
|
[
"MIT"
] | 5
|
2020-02-12T17:09:09.000Z
|
2021-09-29T16:06:40.000Z
|
mbpo/models/utils.py
|
anyboby/ConstrainedMBPO
|
036f4ffefc464e676a287c35c92cc5c0b8925fcf
|
[
"MIT"
] | 10
|
2020-08-31T02:50:02.000Z
|
2022-02-09T23:36:43.000Z
|
mbpo/models/utils.py
|
anyboby/ConstrainedMBPO
|
036f4ffefc464e676a287c35c92cc5c0b8925fcf
|
[
"MIT"
] | 2
|
2022-03-15T01:45:26.000Z
|
2022-03-15T06:46:47.000Z
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import tensorflow as tf
import numpy as np
EPS = 1e-10
def get_required_argument(dotmap, key, message, default=None):
val = dotmap.get(key, default)
if val is default:
raise ValueError(message)
return val
def gaussian_kl_np(mu0, log_std0, mu1, log_std1):
"""interprets each entry in mu_i and log_std_i as independent,
preserves shape
output clipped to {0, 1e10}
"""
var0, var1 = np.exp(2 * log_std0), np.exp(2 * log_std1)
pre_sum = 0.5*(((mu1- mu0)**2 + var0)/(var1+EPS) - 1) + log_std1 - log_std0
all_kls = pre_sum
#all_kls = np.mean(all_kls)
all_kls = np.clip(all_kls, 0, 1/EPS) ### for stability
return all_kls
def gaussian_jsd_np(mu0, log_std0, mu1, log_std1):
pass
def average_dkl(mu, std):
"""
Calculates the average kullback leiber divergences of multiple univariate gaussian distributions.
K(P1,…Pk) = 1/(k(k−1)) ∑_[k_(i,j)=1] DKL(Pi||Pj)
(Andrea Sgarro, Informational divergence and the dissimilarity of probability distributions.)
expects the distributions along axis 0, and samples along axis 1.
Output is reduced by axis 0
Args:
mu: array-like means
std: array-like stds
"""
## clip log
log_std = np.log(std)
log_std = np.clip(log_std, -100, 1e8)
assert len(mu.shape)>=2 and len(log_std.shape)>=2
num_models = len(mu)
d_kl = None
for i in range(num_models):
for j in range(num_models):
if d_kl is None:
d_kl = gaussian_kl_np(mu[i], log_std[i], mu[j], log_std[j])
else: d_kl+= gaussian_kl_np(mu[i], log_std[i], mu[j], log_std[j])
d_kl = d_kl/(num_models*(num_models-1)+EPS)
return d_kl
def median_dkl(mu, std):
"""
Calculates the median kullback leiber divergences of multiple univariate gaussian distributions.
K(P1,…Pk) = 1/(k(k−1)) ∑_[k_(i,j)=1] DKL(Pi||Pj)
(Andrea Sgarro, Informational divergence and the dissimilarity of probability distributions.)
expects the distributions along axis 0, and samples along axis 1.
Output is reduced by axis 0
Args:
mu: array-like means
std: array-like stds
"""
## clip log
log_std = np.log(std)
log_std = np.clip(log_std, -100, 1e8)
assert len(mu.shape)>=2 and len(log_std.shape)>=2
num_models = len(mu)
d_kl = np.zeros(shape=(num_models*(num_models-1),) + mu.shape[1:])
n = 0
for i in range(num_models):
for j in range(num_models):
if i != j:
d_kl[n] = gaussian_kl_np(mu[i], log_std[i], mu[j], log_std[j])
n += 1
d_kl_med = np.median(d_kl, axis=0)
return d_kl_med
class TensorStandardScaler:
"""Helper class for automatically normalizing inputs into the network.
"""
def __init__(self, x_dim, sc_factor=1, name='Scaler'):
"""Initializes a scaler.
Arguments:
x_dim (int): The dimensionality of the inputs into the scaler.
Returns: None.
"""
self.fitted = False
with tf.variable_scope(name):
self.count = tf.get_variable(
name=name+'_count', shape=(), initializer=tf.constant_initializer(0),
trainable=False
)
self.mu = tf.get_variable(
name=name+'_mu', shape=[1, x_dim], initializer=tf.constant_initializer(0.0),
trainable=False
)
self.var = tf.get_variable(
name=name+'_std', shape=[1, x_dim], initializer=tf.constant_initializer(1.0),
trainable=False
)
self.cached_count, self.cached_mu, self.cached_var = 0, np.zeros([1, x_dim]), np.ones([1, x_dim])
self.sc_factor = sc_factor
def fit(self, data):
"""Runs two ops, one for assigning the mean of the data to the internal mean, and
another for assigning the standard deviation of the data to the internal standard deviation.
This function must be called within a 'with <session>.as_default()' block.
Arguments:
data (np.ndarray): A numpy array containing the input
Returns: None.
"""
batch_count = data.shape[0]
batch_mu = np.mean(data, axis=0, keepdims=True)
batch_var = np.var(data, axis=0, keepdims=True)
new_mean, new_var, new_count = self.running_mean_var_from_batch(batch_mu, batch_var, batch_count)
#sigma[sigma < 1e-8] = 1.0
self.mu.load(new_mean)
self.var.load(new_var)
self.count.load(new_count)
self.fitted = True
self.cache()
def transform(self, data):
"""Transforms the input matrix data using the parameters of this scaler.
can be adjusted to scale with a factor, to control sensitivity to ood data:
d = (d-mu)/sigma = d + (d-mu)/sigma - d = d + (d(1-sigma)-mu)/sigma
and the version with scaling factor thus becomes
d = d + sc_factor*(d(1-sigma)-mu)/sigma
Arguments:
data (np.array): A numpy array containing the points to be transformed.
sc_factor: Factor to what degree the original dataset is transformed
Returns: (np.array) The transformed dataset.
"""
#scaled_transform = data + self.sc_factor * (data* (1-self.sigma) - self.mu) / self.sigma
# scaling = 1+self.sc_factor*(self.sigma-1)
# scaling = tf.clip_by_value(scaling, 1.0e-8, 1.0e8)
scaled_transform = (data-self.mu)/(tf.maximum(tf.sqrt(self.var)*self.sc_factor, 1e-2))
return scaled_transform
def inverse_transform(self, data):
"""Undoes the transformation performed by this scaler.
Arguments:
data (np.array): A numpy array containing the points to be transformed.
Returns: (np.array) The transformed dataset.
"""
return (tf.maximum(tf.sqrt(self.var)*self.sc_factor, 1e-2)) * data + self.mu
def inverse_transform_var(self, data):
"""Undoes the transformation performed by this scaler for variances.
Arguments:
data (np.array): A numpy array containing the points to be transformed.
Returns: (np.array) The transformed dataset.
"""
return tf.square(tf.maximum(tf.sqrt(self.var)*self.sc_factor, 1e-2)) * data
def inverse_transform_logvar(self, data):
"""Undoes the transformation performed by this scaler for variances.
Arguments:
data (np.array): A numpy array containing the points to be transformed.
Returns: (np.array) The transformed dataset.
"""
return 2*tf.log(tf.maximum(tf.sqrt(self.var)*self.sc_factor, 1e-2)) + data
def get_vars(self):
"""Returns a list of variables managed by this object.
Returns: (list<tf.Variable>) The list of variables.
"""
return [self.mu, self.var]
def get_mu(self):
return self.mu
def get_var(self):
return self.var
def cache(self):
"""Caches current values of this scaler.
Returns: None.
"""
self.cached_mu = self.mu.eval()
self.cached_var = self.var.eval()
self.cached_count = self.count.eval()
def load_cache(self):
"""Loads values from the cache
Returns: None.
"""
self.mu.load(self.cached_mu)
self.var.load(self.cached_var)
self.count.load(self.cached_count)
def decay_count(self, decay_rate=0.99):
self.count.load(self.cached_count*decay_rate)
def running_mean_var_from_batch(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.cached_mu
tot_count = self.cached_count + batch_count
new_mean = self.cached_mu + delta * batch_count / tot_count
m_a = self.cached_var * self.cached_count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * self.cached_count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
| 34.116667
| 105
| 0.624695
| 5,366
| 0.65439
| 0
| 0
| 0
| 0
| 0
| 0
| 3,508
| 0.427805
|
8353ff3c9e015b9857f33992c111e498b4c778a1
| 6,391
|
py
|
Python
|
ext/generate-models.py
|
gerhardtdatsomor/pytest-nunit
|
8e27275337af3216a3bddc16e793ee9637902361
|
[
"MIT"
] | null | null | null |
ext/generate-models.py
|
gerhardtdatsomor/pytest-nunit
|
8e27275337af3216a3bddc16e793ee9637902361
|
[
"MIT"
] | null | null | null |
ext/generate-models.py
|
gerhardtdatsomor/pytest-nunit
|
8e27275337af3216a3bddc16e793ee9637902361
|
[
"MIT"
] | null | null | null |
"""
A script for generating attrs-models from an XSD.
Built especially for this model. But feel-free to reuse elsewhere.
Licensed under MIT.
Written by Anthony Shaw.
"""
import logging
import xmlschema
import xmlschema.qnames
try:
import black
import click
except ImportError:
print("Install black and click before use.")
logging.basicConfig()
log = logging.getLogger("__name__")
log.setLevel(logging.DEBUG)
# Python reserved keywords. TODO : Sure this is in stdlib somewhere? maybe tokens
KEYWORDS = ["id", "type", "class", "if", "else", "and", "for", "not", "or", "filter"]
# Map XML atomic builtin types to Python std types
XS_ATOMIC_MAP = {
xmlschema.qnames.XSD_STRING: "str",
xmlschema.qnames.XSD_INTEGER: "int",
xmlschema.qnames.XSD_INT: "int",
xmlschema.qnames.XSD_BOOLEAN: "bool",
xmlschema.qnames.XSD_DECIMAL: "float",
}
# Make an Attrs attr.ib from an Element.
def make_attrib(attrib, type_, optional=False):
"""
Make attrs attribute from XmlAttribute
:return: `str`
"""
args = ["metadata={\"name\": '%s', \"type\": '%s', \"optional\": %s}" % (attrib.name, type_, optional)]
# Put type hints on XSD atomic types
if isinstance(attrib.type, xmlschema.validators.XsdAtomicBuiltin):
_atomic_type = XS_ATOMIC_MAP.get(attrib.type.name, "object")
args.append("type={0}".format(_atomic_type))
if hasattr(attrib, "use") and attrib.use == "required":
args.append(
"validator=attr.validators.instance_of({0})".format(_atomic_type)
)
elif isinstance(attrib.type, xmlschema.validators.XsdAtomicRestriction):
if hasattr(attrib, "use") and attrib.use == "required":
# If type is an enumeration facet
if (
attrib.type.facets
and xmlschema.qnames.XSD_ENUMERATION in attrib.type.facets
and attrib.type.name
):
args.append(
"validator=attr.validators.in_({0})".format(attrib.type.name)
)
# If simple restriction type, use the base type instead (this isn't java)
elif attrib.type.base_type.name in (XS_ATOMIC_MAP.keys()):
args.append(
"validator=attr.validators.instance_of({0})".format(
XS_ATOMIC_MAP.get(attrib.type.base_type.name, "object")
)
)
else:
args.append(
"validator=attr.validators.instance_of({0})".format(
attrib.type.name
)
)
elif isinstance(attrib.type, xmlschema.validators.XsdComplexType):
args.append("type='{0}'".format(attrib.type.name))
# args.append('validator=attr.validators.instance_of({0})'.format(attrib.type.name))
if hasattr(attrib, "use") and attrib.use == "optional":
optional = True
if optional:
args.append("default=attr.NOTHING")
name = attrib.name.replace("-", "_")
if name in KEYWORDS:
name = name + "_"
return "{0} = attr.ib({1})".format(name, ", ".join(args))
@click.command()
@click.argument("xsd_path", type=click.Path(exists=True))
@click.argument("output_path", type=click.Path(exists=False))
def main(xsd_path, output_path):
xsd = xmlschema.XMLSchema(xsd_path)
out = "import attr\n" "import enum\n\n\n"
for name, type_ in xsd.types.items():
has_parts = False
# Write basic atomic restriction types
if isinstance(type_, xmlschema.validators.XsdAtomicRestriction):
is_enum_type = (
type_.facets and xmlschema.qnames.XSD_ENUMERATION in type_.facets
)
if is_enum_type:
out += "class {0}(enum.Enum):\n".format(name)
enums = type_.facets[xmlschema.qnames.XSD_ENUMERATION].enumeration
for e in enums:
out += " {0} = '{0}'\n".format(e)
has_parts = True
else:
out += "class {0}({1}):\n".format(
name, XS_ATOMIC_MAP.get(type_.base_type.name, "object")
)
# Write complex types as new attrs classes
elif isinstance(type_, xmlschema.validators.XsdComplexType):
log.info("Name %s" % name)
out += "@attr.s\nclass {0}(object):\n".format(name)
attribs = {}
# Write element groups and sequences
for group in type_.iter_components(xmlschema.validators.XsdGroup):
log.info("Suite %s : %s" % (name, group))
if group.model == "sequence":
for elem in group.iter_elements():
log.info(elem)
attribs[elem.name] = " {0}\n".format(make_attrib(elem, "element", optional=elem.min_occurs == 0))
elif group.model == "choice":
for elem in group.iter_elements():
log.info(elem)
attribs[elem.name] = " {0}\n".format(make_attrib(elem, "element", optional=True))
else:
for elem in group.iter_elements():
log.info(elem)
attribs[elem.name] = " {0}\n".format(
make_attrib(elem, "element", optional=elem.min_occurs == 0)
)
# Write element attributes
for attrib in type_.attributes.values():
attribs[attrib.name] = " {0}\n".format(make_attrib(attrib, "attrib", attrib.use == "optional"))
for attrib in attribs.values():
out += attrib
has_parts = (len(attribs) > 0)
if not has_parts:
out += " pass\n" # avoid having empty class
out += "\n\n"
out = black.format_str(
out,
mode=black.FileMode(
target_versions={
black.TargetVersion.PY27,
black.TargetVersion.PY35,
black.TargetVersion.PY36,
black.TargetVersion.PY37,
},
line_length=101,
string_normalization=False,
is_pyi=False,
),
)
with open(output_path, "w") as out_f:
out_f.write(out)
if __name__ == "__main__":
main()
| 34.923497
| 124
| 0.55985
| 0
| 0
| 0
| 0
| 3,175
| 0.496792
| 0
| 0
| 1,627
| 0.254577
|
8354f3e967b4c8a5432e55702c43dd8c0b61efde
| 415
|
py
|
Python
|
OrderService/Order/migrations/0003_order_payment_details.py
|
surajkendhey/Kart
|
458bee955d1569372fc8b3facb2602063a6ec6f5
|
[
"Apache-2.0"
] | null | null | null |
OrderService/Order/migrations/0003_order_payment_details.py
|
surajkendhey/Kart
|
458bee955d1569372fc8b3facb2602063a6ec6f5
|
[
"Apache-2.0"
] | null | null | null |
OrderService/Order/migrations/0003_order_payment_details.py
|
surajkendhey/Kart
|
458bee955d1569372fc8b3facb2602063a6ec6f5
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-10-18 09:41
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('Order', '0002_auto_20201018_1503'),
]
operations = [
migrations.AddField(
model_name='order',
name='payment_details',
field=jsonfield.fields.JSONField(default=dict),
),
]
| 20.75
| 59
| 0.621687
| 306
| 0.737349
| 0
| 0
| 0
| 0
| 0
| 0
| 103
| 0.248193
|
8355344375b34bffec11d50ae6b74005d1c2e2fb
| 1,166
|
py
|
Python
|
src/deepproblog/examples/Forth/Sort/sort.py
|
vossenwout/gtadeepproblog
|
65509b740518af422b96e84ef10716e0ac246e75
|
[
"Apache-2.0"
] | 54
|
2021-06-23T08:03:23.000Z
|
2022-03-10T01:02:43.000Z
|
src/deepproblog/examples/Forth/Sort/sort.py
|
Damzwan/deepproblog
|
56bcf5208e79c17510b5d288068fabc6cd64f3cf
|
[
"Apache-2.0"
] | 2
|
2021-06-30T23:48:25.000Z
|
2022-03-18T10:45:05.000Z
|
src/deepproblog/examples/Forth/Sort/sort.py
|
Damzwan/deepproblog
|
56bcf5208e79c17510b5d288068fabc6cd64f3cf
|
[
"Apache-2.0"
] | 12
|
2021-06-30T10:47:52.000Z
|
2022-03-09T23:51:48.000Z
|
import torch
from deepproblog.dataset import DataLoader, QueryDataset
from deepproblog.engines import ExactEngine
from deepproblog.evaluate import get_confusion_matrix
from deepproblog.examples.Forth import EncodeModule
from deepproblog.model import Model
from deepproblog.network import Network
from deepproblog.train import train_model
train = 2
test = 8
train_queries = QueryDataset("data/train{}_test{}_train.txt".format(train, test))
dev_queries = QueryDataset("data/train{}_test{}_dev.txt".format(train, test))
test_queries = QueryDataset("data/train{}_test{}_test.txt".format(train, test))
fc1 = EncodeModule(20, 20, 2)
model = Model(
"compare.pl",
[Network(fc1, "swap_net", optimizer=torch.optim.Adam(fc1.parameters(), 1.0))],
)
model.set_engine(ExactEngine(model), cache=True)
test_model = Model("compare.pl", [Network(fc1, "swap_net", k=1)])
test_model.set_engine(ExactEngine(test_model), cache=False)
train_obj = train_model(
model,
DataLoader(train_queries, 16),
40,
log_iter=50,
test_iter=len(train_queries),
test=lambda x: [
("Accuracy", get_confusion_matrix(test_model, dev_queries).accuracy())
],
)
| 29.897436
| 82
| 0.752144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 144
| 0.123499
|
835b2ca04f52a867e7d976e3a7d13af46d16320d
| 624
|
py
|
Python
|
adapters/base_adapter.py
|
juangallostra/moonboard
|
d4a35857d480ee4bed06faee44e0347e1070b6b8
|
[
"MIT"
] | null | null | null |
adapters/base_adapter.py
|
juangallostra/moonboard
|
d4a35857d480ee4bed06faee44e0347e1070b6b8
|
[
"MIT"
] | null | null | null |
adapters/base_adapter.py
|
juangallostra/moonboard
|
d4a35857d480ee4bed06faee44e0347e1070b6b8
|
[
"MIT"
] | null | null | null |
from models.problem import Problem
class BaseProblemAdapter():
"""
Map problem data to a Python object that the renderer can use.
"""
def map_problem(self, problem_data) -> Problem:
"""
Given the raw data of a problem, convert it to a Problem object and return it.
:param problem_data: Source from which to map the problem
:type problem_data: dict
:return: Problem object with the parsed problem data as attributes
:rtype: Problem
:raises NotImplementedError: If the method is not implemented
"""
raise NotImplementedError
| 31.2
| 86
| 0.658654
| 586
| 0.939103
| 0
| 0
| 0
| 0
| 0
| 0
| 458
| 0.733974
|
835d3a5a9f1f473d9972b7066265ae37781c89a5
| 7,439
|
py
|
Python
|
meteo_inversion_matrix.py
|
yandex-research/classification-measures
|
210fbc107d5f41e64cc4e6990f0b970973d25995
|
[
"Apache-2.0"
] | 6
|
2021-12-07T03:15:03.000Z
|
2022-02-10T20:39:44.000Z
|
meteo_inversion_matrix.py
|
yandex-research/classification-measures
|
210fbc107d5f41e64cc4e6990f0b970973d25995
|
[
"Apache-2.0"
] | null | null | null |
meteo_inversion_matrix.py
|
yandex-research/classification-measures
|
210fbc107d5f41e64cc4e6990f0b970973d25995
|
[
"Apache-2.0"
] | null | null | null |
from glob import glob
from collections import defaultdict, Counter
import sys
import math
import numpy as np
import random
random.seed(42)
EPS = 1e-5
if len(sys.argv)<2 or sys.argv[1] not in ('10m', '2h'):
use_fcs = list(range(12))
elif sys.argv[1] == '10m':
use_fcs = (0,)
else:
use_fcs = (11,)
def alt_mcc_bin(tp, fn, fp, tn):
n = tp+ fn+ fp+ tn
c2i = (0,1)
m = len(c2i)
cs = defaultdict(lambda:defaultdict(int))
cs[1][1] = tp
cs[0][1] = fp
cs[1][0] = fn
cs[0][0] = tn
ts = defaultdict(int)
ps = defaultdict(int)
for c1 in range(m):
for c2 in range(m):
ts[c1] += cs[c1][c2]
ps[c2] += cs[c1][c2]
sum1 = cs[1][1]*cs[0][0]-cs[0][1]*cs[1][0]
sum2 = ps[1]*ts[1]*ps[0]*ts[0]
return sum1/np.sqrt(1.*sum2)
def alt_cohen_bin4(tp, fn, fp, tn):
n = tp+ fn+ fp+ tn
c2i = (0,1)
m = len(c2i)
cs = defaultdict(lambda:defaultdict(int))
cs[1][1] = tp
cs[0][1] = fp
cs[1][0] = fn
cs[0][0] = tn
ts = defaultdict(int)
ps = defaultdict(int)
for c1 in range(m):
for c2 in range(m):
ts[c1] += cs[c1][c2]
ps[c2] += cs[c1][c2]
sum1 = 0.
sum2 = 0.
for i in range(m):
sum1 += cs[i][i]
sum2 += ps[i]*ts[i]
return (sum1-sum2/n)/(n-sum2/n)
def alt_confent_bin4(tp, fn, fp, tn):
n = tp+ fn+ fp+ tn
c2i = (0,1)
m = len(c2i)
cs = defaultdict(lambda:defaultdict(int))
cs[1][1] = tp
cs[0][1] = fp
cs[1][0] = fn
cs[0][0] = tn
ts = defaultdict(int)
ps = defaultdict(int)
for c1 in range(m):
for c2 in range(m):
ts[c1] += cs[c1][c2]
ps[c2] += cs[c1][c2]
pis = defaultdict(lambda:defaultdict(float))
pjs = defaultdict(lambda:defaultdict(float))
pijs = defaultdict(lambda:defaultdict(lambda:defaultdict(float)))
for c1 in range(m):
for c2 in range(m):
if cs[c1][c2]:
pijs[c1][c1][c2] = cs[c1][c2]/(ts[c1]+ps[c1])
pijs[c2][c1][c2] = cs[c1][c2]/(ts[c2]+ps[c2])
else:
pijs[c1][c1][c2] = 0
pijs[c2][c1][c2] = 0
sum1 = 0.
for c1 in range(m):
sum2 = 0.
for c2 in range(m):
if c1!=c2:
if pijs[c1][c1][c2]: sum2 += pijs[c1][c1][c2]*np.log(pijs[c1][c1][c2])/np.log(2*m-2)
if pijs[c1][c2][c1]: sum2 += pijs[c1][c2][c1]*np.log(pijs[c1][c2][c1])/np.log(2*m-2)
sum1 += sum2*(ts[c1]+ps[c1])/(2.*n)
return -sum1
def alt_sba_bin(tp, fn, fp, tn):
n = tp+ fn+ fp+ tn
c2i = (0,1)
m = len(c2i)
cs = defaultdict(lambda:defaultdict(int))
cs[1][1] = tp
cs[0][1] = fp
cs[1][0] = fn
cs[0][0] = tn
ts = defaultdict(int)
ps = defaultdict(int)
for c1 in range(m):
for c2 in range(m):
ts[c1] += cs[c1][c2]
ps[c2] += cs[c1][c2]
min_agr = True
for i in range(m):
if cs[i][i]>0:
min_agr = False
if min_agr: return 0
sum1 = 0.
for i in range(m):
sum1 += cs[i][i]/ts[i] if ts[i] else ps[i]/n
sum1 += cs[i][i]/ps[i] if ps[i] else ts[i]/n
sum1 /= 2*m
return sum1
def alt_gm1_bin(tp, fn, fp, tn):
r = 1
n = tp+ fn+ fp+ tn
c2i = (0,1)
m = len(c2i)
cs = defaultdict(lambda:defaultdict(int))
cs[1][1] = tp
cs[0][1] = fp
cs[1][0] = fn
cs[0][0] = tn
rr = []
_tp, _fn, _fp, _tn = tp, fn, fp, tn
t = _tp+_fn
p = _tp+_fp
t0 = _tn+_fp
p0 = _tn+_fn
return (n*_tp-t*p)/pow( ( pow(t*t0,r)+pow(p*p0,r) )/2., 1./r)
def alt_CD(tp, fn, fp, tn):
return -np.arccos(alt_mcc_bin(tp, fn, fp, tn))
metrics_impl = dict([
('f1', lambda tp, fn, fp, tn: (2*tp)/(2*tp+fp+fn)),
('jaccard', lambda tp, fn, fp, tn: tp/(tp+fp+fn)),
('ba', lambda tp, fn, fp, tn: ((tp)/(tp+fn)+(tn)/(tn+fp))/2.),
('acc', lambda tp, fn, fp, tn: (tp+tn)/(tp+tn+fp+fn)),
('iba', lambda tp, fp, fn, tn: ((tp)/(tp+fn)+(tn)/(tn+fp))/2.),
('gm1', alt_gm1_bin),
('ce', lambda tp, fn, fp, tn:-alt_confent_bin4(tp, fn, fp, tn)),
('sba', alt_sba_bin),
('kappa', alt_cohen_bin4),
('cc', alt_mcc_bin),
('cd',alt_CD),
])
_cache = dict()
def get_bin_indices(tp, fn, fp, tn):
global _cache
handle = (tp, fn, fp, tn)
if handle in _cache:
return _cache[handle]
_cache[handle] = dict((m, mfn(tp, fn, fp, tn)) for m, mfn in metrics_impl.items())
return _cache[handle]
found_examples = defaultdict(list)
discr_examples = defaultdict(list)
sampled_metrics = defaultdict(dict)
for fn in glob('data/meteo/*.tsv'):
for idx, line in enumerate(open(fn, encoding='utf-8')):
if not idx: continue
exp_group, utc_date, tn, tp, fn, fp = line.strip().split('\t')
tn = list(map(int,tn.split(',')))
tp = list(map(int,tp.split(',')))
fn = list(map(int,fn.split(',')))
fp = list(map(int,fp.split(',')))
for fc in use_fcs:
sampled_metrics[(utc_date, fc)][exp_group] = get_bin_indices(tp[fc], fn[fc], fp[fc], tn[fc])
total = set()
for ds in sampled_metrics:
for i, (a1, m1) in enumerate(sampled_metrics[ds].items()):
for j, (a2, m2) in enumerate(sampled_metrics[ds].items()):
if i<j:
left_winners = []
right_winners = []
draw_cases = []
for m in metrics_impl:
if np.isnan(m1[m]) or np.isnan(m2[m]):
continue
if m1[m]>m2[m] and abs(m1[m]-m2[m])>EPS:
left_winners.append( (m,i,j) )
if m1[m]<m2[m] and abs(m1[m]-m2[m])>EPS:
right_winners.append( (m,i,j) )
if abs(m1[m]-m2[m])<=EPS:
draw_cases.append( (m,i,j) )
handle = frozenset((ds,a1,a2))
if left_winners and right_winners:
for r1 in left_winners:
for r2 in right_winners:
found_examples[handle].append( tuple(sorted([r1[0],r2[0]])) )
discr_examples[ tuple(sorted([r1[0],r2[0]])) ].append( handle )
elif left_winners and draw_cases:
for r1 in left_winners:
for r2 in draw_cases:
found_examples[handle].append( tuple(sorted([r1[0],r2[0]])) )
discr_examples[ tuple(sorted([r1[0],r2[0]])) ].append( handle )
elif right_winners and draw_cases:
for r1 in right_winners:
for r2 in draw_cases:
found_examples[handle].append( tuple(sorted([r1[0],r2[0]])) )
discr_examples[ tuple(sorted([r1[0],r2[0]])) ].append( handle )
else:
if handle not in found_examples:
found_examples[handle] = list()
print('total',len(found_examples))
print('\t'+'\t'.join(sorted(metrics_impl)))
for r1 in sorted(metrics_impl):
r = [r1]
for r2 in sorted(metrics_impl):
n = len(discr_examples[
tuple(sorted([r1,r2]))
]
)
if n:
r.append( str( n ) )
else:
r.append( '' )
print('\t'.join(r))
| 29.058594
| 104
| 0.491329
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 132
| 0.017744
|
835de6ecaa9ce8488f1f8c676c899a539e8ca67c
| 1,217
|
py
|
Python
|
terrain_following/src/image_processor.py
|
ZhiangChen/ros_vision
|
4c8e6580f6b3ab05d8d782a5a0abdbdf44b0c2de
|
[
"MIT"
] | null | null | null |
terrain_following/src/image_processor.py
|
ZhiangChen/ros_vision
|
4c8e6580f6b3ab05d8d782a5a0abdbdf44b0c2de
|
[
"MIT"
] | 1
|
2019-12-07T00:48:36.000Z
|
2019-12-07T00:48:36.000Z
|
terrain_following/src/image_processor.py
|
ZhiangChen/ros_vision
|
4c8e6580f6b3ab05d8d782a5a0abdbdf44b0c2de
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Zhiang Chen
Nov 2019
"""
import rospy
from sensor_msgs.msg import Image
from sensor_msgs.msg import Imu
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
import cv2
class Image_Processor(object):
def __init__(self):
rospy.Subscriber("/r200/depth/image_raw", Image, self.record_callback)
self.delta_time = 0
self.bridge = CvBridge()
print("Image processor node initialized!")
def start_recording(self, save_path, delta_time=1):
self.save_path = save_path
self.frame = 0
self.delta_time = delta_time
def record_callback(self, data):
print("callback works")
if self.delta_time >0:
image = self.bridge.imgmsg_to_cv2(data, "bgr8")
print(image.shape)
cv2.imwrite(self.save_path + str(self.frame) + ".png", image)
rospy.sleep(self.delta_time)
self.frame += 1
if __name__ == '__main__':
rospy.init_node('image_processor', anonymous=True)
IMP = Image_Processor()
IMP.start_recording("/home/zhiang/Pictures/terrain_boulder/")
try:
rospy.spin()
except rospy.ROSInterruptException:
print("Node killed!")
| 28.97619
| 78
| 0.659819
| 732
| 0.601479
| 0
| 0
| 0
| 0
| 0
| 0
| 216
| 0.177486
|
835e51b25ab23118471502fce1356174cfc1f9cc
| 137
|
py
|
Python
|
lab_4/start.py
|
AnastasiaZheleznyakova/2020-2-level-labs
|
926c7abde05f545f27d09a4d96b8014d5a668789
|
[
"MIT"
] | null | null | null |
lab_4/start.py
|
AnastasiaZheleznyakova/2020-2-level-labs
|
926c7abde05f545f27d09a4d96b8014d5a668789
|
[
"MIT"
] | null | null | null |
lab_4/start.py
|
AnastasiaZheleznyakova/2020-2-level-labs
|
926c7abde05f545f27d09a4d96b8014d5a668789
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
pass
RESULT = 1
# DO NOT REMOVE NEXT LINE - KEEP IT INTENTIONALLY LAST
assert RESULT == 1, ''
| 22.833333
| 58
| 0.613139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 66
| 0.481752
|
835f4e7f9614427e618dd0d65cdbcc8a97ccc269
| 157
|
py
|
Python
|
testtarget.py
|
epopisces/template_api_wrapper
|
e581eb31f6123ca2d93803453f2a1ab25c3c1981
|
[
"MIT"
] | null | null | null |
testtarget.py
|
epopisces/template_api_wrapper
|
e581eb31f6123ca2d93803453f2a1ab25c3c1981
|
[
"MIT"
] | null | null | null |
testtarget.py
|
epopisces/template_api_wrapper
|
e581eb31f6123ca2d93803453f2a1ab25c3c1981
|
[
"MIT"
] | null | null | null |
class ToolNameAPI:
thing = 'thing'
toolname_tool = 'example'
tln = ToolNameAPI()
the_repo = "reponame"
author = "authorname"
profile = "authorprofile"
| 15.7
| 25
| 0.713376
| 38
| 0.242038
| 0
| 0
| 0
| 0
| 0
| 0
| 53
| 0.33758
|
835f677bc91f7df84f3075940f43de8d60abf297
| 84
|
py
|
Python
|
learning/__init__.py
|
aleisalem/Maat
|
702c88a6a86f0b56e504df8f4d7ba18e8a39c887
|
[
"Apache-2.0"
] | 4
|
2019-10-11T12:19:29.000Z
|
2020-08-06T21:45:10.000Z
|
learning/__init__.py
|
aleisalem/Maat
|
702c88a6a86f0b56e504df8f4d7ba18e8a39c887
|
[
"Apache-2.0"
] | null | null | null |
learning/__init__.py
|
aleisalem/Maat
|
702c88a6a86f0b56e504df8f4d7ba18e8a39c887
|
[
"Apache-2.0"
] | 1
|
2021-01-05T11:50:22.000Z
|
2021-01-05T11:50:22.000Z
|
__all__ = ["feature_extraction", "hmm_learner", "scikit_learners", "string_kernel"]
| 42
| 83
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 65
| 0.77381
|
835f9384035a1bd549616f5ba14cfd3f214b0f26
| 2,509
|
py
|
Python
|
Metrics/reporter.py
|
augdomingues/SPEX
|
412034eb662b6cac466d7c96ac04c399ff2617c5
|
[
"CC0-1.0"
] | null | null | null |
Metrics/reporter.py
|
augdomingues/SPEX
|
412034eb662b6cac466d7c96ac04c399ff2617c5
|
[
"CC0-1.0"
] | null | null | null |
Metrics/reporter.py
|
augdomingues/SPEX
|
412034eb662b6cac466d7c96ac04c399ff2617c5
|
[
"CC0-1.0"
] | 1
|
2021-09-14T06:28:07.000Z
|
2021-09-14T06:28:07.000Z
|
from os.path import join
from math import ceil
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.rcParams["font.size"] = 14
plt.rcParams["figure.figsize"] = [15, 8]
class reporter:
def __init__(self, name, folder, counts, interactive=True):
plt.subplot(221)
plt.title("Histogram")
sns.distplot(counts)
plt.subplot(222)
plt.title("Boxplot")
plt.boxplot(counts)
plt.subplot(223)
clist = [ceil(c) for c in counts]
clist = np.array(sorted(clist))
integers = np.unique([int(c) for c in clist])
cdf = np.array([sum(clist <= i)/len(clist) for i in integers])
plt.title("CDF - P(x $\leq$ X)")
plt.grid(alpha=0.25)
plt.plot(cdf)
plt.subplot(224)
plt.plot(1 - cdf)
plt.title("CCDF - P(x > X)")
plt.grid(alpha=0.25)
plt.suptitle(name)
plt.savefig(join(folder, name))
plt.clf()
if interactive:
import pygal as pg
box_plot = pg.Box()
box_plot.title = name
box_plot.add("Values", counts)
boxplot_name = name + "_boxplot.svg"
box_plot.render_to_file(join(folder, boxplot_name))
hist = pg.Bar(show_x_labels=False)
clist = [ceil(c) for c in counts]
freqs = [clist.count(i) for i in range(0, int(max(clist)))]
hist.add("Values", freqs)
hist.title = name
hist.x_labels = map(str, integers)
histogram_name = name + "_histogram.svg"
hist.render_to_file(join(folder, histogram_name))
line = pg.Line()
line.title = name
line.add("CDF", cdf)
line.add("CCDF", 1 - cdf)
line.x_labels = map(str, integers)
# line.x_labels = map(str, counts)
line_name = name + "_cdf_ccdf.svg"
line.render_to_file(join(folder, line_name))
with open(join(folder, "report_{}.html".format(name)), "w+") as out:
obj0 = "<object type='image/svg+xml' data='"
obj1 = "'></object>\n"
out.write("<html><head align='center'>Report - {}</head><body>\n".format(name))
out.write("{}{}{}".format(obj0, boxplot_name, obj1))
out.write("{}{}{}".format(obj0, histogram_name, obj1))
out.write("{}{}{}".format(obj0, line_name, obj1))
out.write("</body></html>")
| 31.759494
| 95
| 0.539657
| 2,311
| 0.921084
| 0
| 0
| 0
| 0
| 0
| 0
| 358
| 0.142686
|
835fad4f88ef3c40e122cf474982c0fc18e561fb
| 21,322
|
py
|
Python
|
contrib/translate_test.py
|
csilvers/kake
|
51465b12d267a629dd61778918d83a2a134ec3b2
|
[
"MIT"
] | null | null | null |
contrib/translate_test.py
|
csilvers/kake
|
51465b12d267a629dd61778918d83a2a134ec3b2
|
[
"MIT"
] | null | null | null |
contrib/translate_test.py
|
csilvers/kake
|
51465b12d267a629dd61778918d83a2a134ec3b2
|
[
"MIT"
] | null | null | null |
"""Tests the translate_* files."""
from __future__ import absolute_import
import cPickle
import os
import shutil
from shared.testutil import testsize
from third_party import polib
from kake import compile_all_pot
from kake import compile_small_mo
from kake import translate_handlebars
from kake import translate_javascript
from kake import translate_util
import kake.lib.compile_rule
import kake.lib.testutil
import kake.make
class TranslateString(translate_util.TranslateBase):
"""Treats the input file as a single nltext string."""
def translate(self, infile_name, outfile_lang_moentries_context):
file_contents = self._read_input(infile_name)
for (outfile, lang, mo_entries, _) in outfile_lang_moentries_context:
translated_contents = mo_entries.get_singular_translation(
file_contents.strip())
if translated_contents == file_contents:
translated_contents = None
self._write_output(infile_name, outfile, translated_contents)
class TestBase(kake.lib.testutil.KakeTestBase):
def setUp(self, make_small_mo_file=True):
super(TestBase, self).setUp()
os.makedirs(self._abspath('javascript'))
os.makedirs(self._abspath('caps'))
os.makedirs(self._abspath('intl', 'translations', 'pofiles'))
os.makedirs(self._abspath('intl', 'translations', 'approved_pofiles'))
os.makedirs(self._abspath('genfiles', 'translations', 'caps'))
os.makedirs(self._abspath('genfiles', 'extracted_strings', 'caps'))
os.makedirs(self._abspath('kake'))
shutil.copyfile(os.path.join(self.real_ka_root,
'kake', 'compile_js.js'),
os.path.join(self.tmpdir,
'kake', 'compile_js.js'))
with open(self._abspath('f1'), 'w') as f:
print >>f, 'Graphing linear equations'
with open(self._abspath('javascript', 'f1.js'), 'w') as f:
print >>f, 'a = i18n._("Graphing linear equations");'
print >>f, 'b = i18n._("Hello %(where)s", {where: "world"});'
with open(self._abspath('javascript', 'f1.jsx'), 'w') as f:
print >>f, 'a = i18n._("Graphing linear equations");'
print >>f, 'b = i18n._("Hello %(where)s", {where: "world"});'
# The actual jsx would be: <$_ where="world">Hello %(where)s</$_>
# But our fake jsx-compiler won't correctly 'compile' this, so
# I cheat and put in the post-compiled value.
print >>f, 'c = $_({where: "world"}, "Hello %(where)s", etc, etc);'
with open(self._abspath('javascript', 'f1.handlebars'), 'w') as f:
print >>f, '{{#_}}Graphing linear equations{{/_}}'
# Also test plural functionality
with open(self._abspath('javascript', 'f2.js'), 'w') as f:
print >>f, 'a = $.ngettext("1 second", "%(num)s seconds");'
with open(self._abspath('javascript', 'f2.handlebars'), 'w') as f:
print >>f, ('{{#ngettext num}}1 minute{{else}}'
'{{num}} minutes{{/ngettext}}')
# A plural used in a singular context.
with open(self._abspath('f.html'), 'w') as f:
print >>f, '<div title="1 second">1 minute</div>'
with open(self._abspath('f.js'), 'w') as f:
print >>f, 'a = i18n._("1 minute");'
with open(self._abspath('f.handlebars'), 'w') as f:
print >>f, '{{#_}}1 minute{{/_}}'
# An exercise with no translations.
with open(self._abspath('f_no.html'), 'w') as f:
print >>f, '<script>alert(i18n._("Apple"));</script>'
print >>f, ('<span data-if="alert(i18n._(\'Banana\'));">'
'Canteloupe'
'</span>')
print >>f, '<input type="text" value="Durian" />'
print >>f, '<var>alert(i18n._("Eggplant"));</var>'
print >>f, ('<span data-if="isSingular(A)"><var>A</var> Fig</span>'
'<span data-else=""><var>A</var> Figs</span>')
# Exercise files with partial translations in diferent kinds of nltext
# positions.
with open(self._abspath('f_p1.html'), 'w') as f:
print >>f, '<script>alert(i18n._("Apple"));</script>'
print >>f, '<script>alert(i18n._("Addition 1"));</script>'
with open(self._abspath('f_p2.html'), 'w') as f:
print >>f, '<script>alert(i18n._("Apple"));</script>'
print >>f, '<span data-if="alert(i18n._(\'Addition 1\'));"></span>'
with open(self._abspath('f_p3.html'), 'w') as f:
print >>f, '<script>alert(i18n._("Apple"));</script>'
print >>f, '<span>Addition 1</span>'
with open(self._abspath('f_p4.html'), 'w') as f:
print >>f, '<script>alert(i18n._("Apple"));</script>'
print >>f, '<input type="text" value="Addition 1" />'
with open(self._abspath('f_p5.html'), 'w') as f:
print >>f, '<script>alert(i18n._("Apple"));</script>'
print >>f, '<var>alert(i18n._("Addition 1"));</var>'
with open(self._abspath('f_p6.html'), 'w') as f:
print >>f, '<script>alert(i18n._("Apple"));</script>'
print >>f, ('<span data-if="isSingular(n)">1 hour</span>'
'<span data-else=""><var>n</var> hours</span>')
with open(self._abspath('f_p7.html'), 'w') as f:
print >>f, ('<script>'
'alert(i18n._("Apple")); alert(i18n._("Addition 1"));'
'</script>')
with open(self._abspath('f_p8.html'), 'w') as f:
print >>f, ('<script>'
'alert(i18n._("Apple")); '
'alert(i18n._("Subtraction 1"));'
'</script>')
# A file without a translation
with open(self._abspath('f_no'), 'w') as f:
print >>f, 'Hello, world'
# Make the .po file. We don't need 'occurences' fields for
# our tests, but _write_pofile() wants them, so we make up
# some fake ones.
e1 = polib.POEntry(msgid='Hello %(where)s',
msgstr='HELLO %(where)s',
occurrences=[('a', 1)])
e2 = polib.POEntry(msgid='Graphing linear equations',
msgstr='GRAPHING LINEAR EQUATIONS',
occurrences=[('a', 1)])
e3 = polib.POEntry(msgid='Addition 1',
msgstr='ADDITION 1',
occurrences=[('a', 1)])
e4 = polib.POEntry(msgid='1 second',
msgid_plural='%(num)s seconds',
msgstr_plural={'0': '1 SECOND',
'1': '%(num)s SECONDS',
'2': '%(num)s SECS'},
occurrences=[('a', 1)])
e5 = polib.POEntry(msgid='1 minute',
msgid_plural='{{num}} minutes',
msgstr_plural={'0': '1 MINUTE',
'1': '{{num}} MINUTES',
'2': '{{num}} MINS'},
occurrences=[('a', 1)])
e6 = polib.POEntry(msgid='1 hour',
msgid_plural='<var>n</var> hours',
msgstr_plural={'0': '1 HOUR',
'1': '<var>n</var> HOURS',
'2': '<var>n</var> H'},
occurrences=[('a', 1)])
# This entry differs between the approved pofiles and the unapproved
# pofiles
e3_unapproved = polib.POEntry(msgid='Addition 1',
msgstr='ADDITION ONE',
occurrences=[('a', 1)])
# These entries only exists in the unapproved pofile
e7_unapproved = polib.POEntry(msgid='Subtraction 1',
msgstr='SUBTRACTION ONE',
occurrences=[('a', 1)])
e8_unapproved = polib.POEntry(msgid='1 fortnight',
msgid_plural='{{num}} fortnights',
msgstr_plural={'0': '1 FORTNIGHT',
'1': '{{num}} FORTNIGHTS',
'2': '{{num}} FORTNS'},
occurrences=[('a', 1)])
def save_po_file(entries, outpath):
po_file = polib.POFile()
po_file.extend(entries)
po_file.save(outpath)
save_po_file((e1, e2, e3_unapproved, e4, e5, e6, e7_unapproved,
e8_unapproved),
self._abspath('intl', 'translations',
'pofiles', 'caps.rest.po'))
save_po_file((e1, e2, e3, e4, e5, e6),
self._abspath('intl', 'translations',
'approved_pofiles', 'caps.rest.po'))
# Also make the .pot.pickle files.
po_entry_map = {
'f1': [e2],
'javascript/f1.js': [e2, e1],
'javascript/f1.jsx': [e2, e1],
'javascript/f1.handlebars': [e2],
'javascript/f2.js': [e4],
'javascript/f2.handlebars': [e5],
'f.html': [e4, e5, e8_unapproved],
'f.js': [e5],
'f.handlebars': [e5],
'f_no': [],
'f_no.html': [],
'f_p1.html': [e3],
'f_p2.html': [e3],
'f_p3.html': [e3],
'f_p4.html': [e3],
'f_p5.html': [e3],
'f_p6.html': [e6],
'f_p7.html': [e3],
'f_p8.html': [e7_unapproved],
}
for (fname, po_entries) in po_entry_map.iteritems():
fname = 'genfiles/extracted_strings/en/%s.pot.pickle' % fname
if not os.path.isdir(os.path.dirname(self._abspath(fname))):
os.makedirs(os.path.dirname(self._abspath(fname)))
compile_all_pot._write_pofile(po_entries, self._abspath(fname))
if make_small_mo_file:
for f in po_entry_map:
fout = 'genfiles/extracted_strings/caps/%s.small_mo.pickle' % f
if not os.path.isdir(os.path.dirname(self._abspath(fout))):
os.makedirs(os.path.dirname(self._abspath(fout)))
compile_small_mo.SplitPOFile().build_many([
(fout,
['genfiles/extracted_strings/en/%s.pot.pickle' % f,
'intl/translations/pofiles/caps.rest.po',
'intl/translations/approved_pofiles/caps.rest.po'],
['intl/translations/pofiles/caps.rest.po'],
{})])
def build(self, translator, infile, outfile):
translator.build_many([(
outfile,
[infile,
'genfiles/extracted_strings/caps/%s.small_mo.pickle' % infile],
[outfile],
{'{lang}': 'caps'}
)])
@testsize.tiny
class TestSmallMo(TestBase):
def test_approval_flag(self):
with open(self._abspath('genfiles/extracted_strings/caps/'
'f.html.small_mo.pickle')) as f:
small_mo = cPickle.load(f)
# We have translations for both "1 second", and "1 fortnight"
self.assertIsNotNone(small_mo.get_plural_translation(
"1 second", approved_only=False))
self.assertIsNotNone(small_mo.get_singular_translation(
"1 second", approved_only=False))
self.assertIsNotNone(small_mo.get_plural_translation(
"1 fortnight", approved_only=False))
self.assertIsNotNone(small_mo.get_singular_translation(
"1 fortnight", approved_only=False))
# ...but the translation for "1 fortnight" is not approved.
self.assertIsNotNone(small_mo.get_plural_translation(
"1 second", approved_only=True))
self.assertIsNotNone(small_mo.get_singular_translation(
"1 second", approved_only=True))
self.assertIsNone(small_mo.get_plural_translation(
"1 fortnight", approved_only=True))
self.assertIsNone(small_mo.get_singular_translation(
"1 fortnight", approved_only=True))
@testsize.tiny
class TestTranslations(TestBase):
def test_simple(self):
translator = TranslateString()
self.build(translator, 'f1', 'f1_caps')
self.assertFile('f1_caps', 'GRAPHING LINEAR EQUATIONS')
def test_symlink_when_there_is_no_translation(self):
translator = TranslateString()
self.build(translator, 'f_no', 'caps/f1_symlink')
self.assertFile('caps/f1_symlink', 'Hello, world\n')
self.assertTrue(os.path.islink(self._abspath('caps', 'f1_symlink')))
self.assertEqual(os.path.join('..', 'f_no'),
os.readlink(self._abspath('caps', 'f1_symlink')))
@testsize.tiny
class TestJavascript(TestBase):
def test_singular(self):
translator = translate_javascript.TranslateJavascript()
self.build(translator, 'javascript/f1.js', 'caps/f1.js')
self.assertFile('caps/f1.js',
'a = i18n._("GRAPHING LINEAR EQUATIONS");\n'
'b = i18n._("HELLO %(where)s", {where: "world"});\n')
def test_plural(self):
translator = translate_javascript.TranslateJavascript()
self.build(translator, 'javascript/f2.js', 'caps/f2.js')
self.assertFile('caps/f2.js',
'a = $.ngettext({"lang": "caps", '
'"messages": ["1 SECOND", "%(num)s SECONDS", '
'"%(num)s SECS"]});\n')
def test_ngettext_entry_used_in_singular_context(self):
translator = translate_javascript.TranslateJavascript()
self.build(translator, 'f.js', 'caps/f.js')
self.assertFile('caps/f.js',
'a = i18n._("1 MINUTE");\n')
def test_should_not_translate_file(self):
self.mock_function('intl.english_only.should_not_translate_file',
lambda f: f == 'javascript/f1.js')
translator = translate_javascript.TranslateJavascript()
# caps/f1.js should be a symlink since it's in do-not-translate
self.build(translator, 'javascript/f1.js', 'caps/f1.js')
self.assertTrue(os.path.islink(self._abspath('caps', 'f1.js')))
self.assertEqual('../javascript/f1.js',
os.readlink(self._abspath('caps', 'f1.js')))
# But f2.js is a different story...
self.build(translator, 'javascript/f2.js', 'caps/f2.js')
self.assertFile('caps/f2.js',
'a = $.ngettext({"lang": "caps", '
'"messages": ["1 SECOND", "%(num)s SECONDS", '
'"%(num)s SECS"]});\n')
@testsize.tiny
class TestHandlebars(TestBase):
def test_singular(self):
translator = translate_handlebars.TranslateHandlebars()
self.build(translator, 'javascript/f1.handlebars', 'caps/f1.hbars')
self.assertFile('caps/f1.hbars',
'GRAPHING LINEAR EQUATIONS\n')
def test_plural(self):
translator = translate_handlebars.TranslateHandlebars()
self.build(translator, 'javascript/f2.handlebars', 'caps/f2.hbars')
self.assertFile('caps/f2.hbars',
'{{#ngettext num "caps" 0}}1 MINUTE{{else}}'
'{{#ngettext num "caps" 1}}{{num}} MINUTES{{else}}'
'{{num}} MINS{{/ngettext}}{{/ngettext}}\n')
def test_ngettext_entry_used_in_singular_context(self):
translator = translate_handlebars.TranslateHandlebars()
self.build(translator, 'f.handlebars', 'caps/f.hbars')
self.assertFile('caps/f.hbars',
'1 MINUTE\n')
def test_gettext_entry_used_in_plural_context(self):
with open(self._abspath('f.handlebars'), 'w') as f:
print >>f, ('{{#ngettext num}}Addition 1{{else}}Additions 1'
'{{/ngettext}}')
translator = translate_handlebars.TranslateHandlebars()
self.build(translator, 'f.handlebars', 'caps/f.hbars')
# Shouldn't translate our string since it's a singular string
# used in a plural context, and it doesn't know how to
# translate the plural.
self.assertFile('caps/f.hbars',
'{{#ngettext num}}Addition 1{{else}}Additions 1'
'{{/ngettext}}\n')
@testsize.tiny
class TestBuild(TestBase):
"""Test make.build() on translate targets."""
def setUp(self):
# make.build should make the small-mo file for us.
super(TestBuild, self).setUp(make_small_mo_file=False)
def test_javascript(self):
kake.make.build('genfiles/translations/caps/javascript/f1.js')
self.assertFile('genfiles/translations/caps/javascript/f1.js',
'a = i18n._("GRAPHING LINEAR EQUATIONS");\n'
'b = i18n._("HELLO %(where)s", {where: "world"});\n')
def test_handlebars(self):
kake.make.build('genfiles/translations/caps/javascript/f1.handlebars')
self.assertFile('genfiles/translations/caps/javascript/f1.handlebars',
'GRAPHING LINEAR EQUATIONS\n')
def test_incremental_rebuilds(self):
"""Test we don't re-translate when irrelevant translations change."""
kake.make.build('genfiles/translations/caps/javascript/f1.handlebars')
kake.make.build('genfiles/translations/caps/javascript/f2.handlebars')
po_path = self._abspath('intl', 'translations', 'approved_pofiles',
'caps.rest.po')
with open(po_path) as f:
old_po = f.read()
new_po = old_po.replace('MINUTE', 'MINUUUUTE') # used in f2, not f1
with open(po_path, 'w') as f:
print >>f, new_po
self.assertFileLacks(
'genfiles/translations/caps/javascript/f2.handlebars',
'MINUUUUTE')
# Now rebuilding f1 should be a noop.
cr = kake.lib.compile_rule.find_compile_rule(
'genfiles/translations/caps/javascript/f1.handlebars')
with self.assertCalled(cr.compile_instance.translate, 0):
kake.make.build(
'genfiles/translations/caps/javascript/f1.handlebars')
# While rebuilding f2 should not be.
with self.assertCalled(cr.compile_instance.translate, 1):
kake.make.build(
'genfiles/translations/caps/javascript/f2.handlebars')
self.assertFileContains(
'genfiles/translations/caps/javascript/f2.handlebars',
'MINUUUUTE')
class TestBuildForFakeLang(TestBase):
"""Test make.build() using the special codepath for fake languages."""
# Note we don't make any fake boxes.po file at all. kake
# automatically extracts the strings from the input file,
# fake-translates them, and inserts them into the translated file,
# all on the fly.
_BOX = u'\u25a1'.encode('utf-8')
_UTF8_GRAPHING_LINEAR_EQUATIONS = '%s %s %s' % (_BOX * len('GRAPHING'),
_BOX * len('LINEAR'),
_BOX * len('EQUATIONS'))
_S_GRAPHING_LINEAR_EQUATIONS = '%s %s %s' % (r'\u25a1' * len('GRAPHING'),
r'\u25a1' * len('LINEAR'),
r'\u25a1' * len('EQUATIONS'))
_S_HELLO_WORLD = '%s %%(where)s' % (r'\u25a1' * len('HELLO'))
_S_ADDITION_1 = '%s %s' % (r'\u25a1' * len('ADDITION'),
r'\u25a1' * len('1'))
def test_javascript(self):
kake.make.build('genfiles/translations/boxes/javascript/f1.js')
self.assertFile('genfiles/translations/boxes/javascript/f1.js',
'a = i18n._("%s");\n'
'b = i18n._("%s", {where: "world"});\n'
% (self._S_GRAPHING_LINEAR_EQUATIONS,
self._S_HELLO_WORLD))
def test_jsx(self):
kake.make.build('genfiles/compiled_jsx/boxes/javascript/f1.jsx.js')
self.assertFile('genfiles/compiled_jsx/boxes/javascript/f1.jsx.js',
'a = i18n._("%s");\n'
'b = i18n._("%s", {where: "world"});\n'
'c = $_({where: "world"}, "%s", etc, etc);\n'
% (self._S_GRAPHING_LINEAR_EQUATIONS,
self._S_HELLO_WORLD,
self._S_HELLO_WORLD))
def test_handlebars(self):
kake.make.build('genfiles/translations/boxes/javascript/f1.handlebars')
self.assertFile('genfiles/translations/boxes/javascript/f1.handlebars',
'%s\n' % self._UTF8_GRAPHING_LINEAR_EQUATIONS)
| 45.657388
| 79
| 0.535409
| 20,793
| 0.97519
| 0
| 0
| 7,889
| 0.369993
| 0
| 0
| 7,761
| 0.36399
|
83609972eefc4a7ddcf363f8e89f7408af9885f3
| 115
|
py
|
Python
|
backend/backend/urls.py
|
lucasrafaldini/SpaceXLaunches
|
abcd3686677bc3e25903bc2ed1e084e00090ba33
|
[
"MIT"
] | 1
|
2021-09-21T17:51:11.000Z
|
2021-09-21T17:51:11.000Z
|
backend/backend/urls.py
|
lucasrafaldini/SpaceXLaunches
|
abcd3686677bc3e25903bc2ed1e084e00090ba33
|
[
"MIT"
] | 9
|
2020-06-06T00:42:57.000Z
|
2022-02-27T17:29:18.000Z
|
backend/backend/urls.py
|
lucasrafaldini/SpaceXLaunches
|
abcd3686677bc3e25903bc2ed1e084e00090ba33
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from django.urls import include
urlpatterns = [url("api/", include("api.urls"))]
| 23
| 48
| 0.73913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 0.13913
|
8360d7fa109831cb6f6bca8e81a94ffadbaafea4
| 223
|
py
|
Python
|
primitives_ubc/regCCFS/__init__.py
|
tonyjo/ubc_primitives
|
bc94a403f176fe28db2a9ac9d1a48cb9db021f90
|
[
"Apache-2.0"
] | null | null | null |
primitives_ubc/regCCFS/__init__.py
|
tonyjo/ubc_primitives
|
bc94a403f176fe28db2a9ac9d1a48cb9db021f90
|
[
"Apache-2.0"
] | 4
|
2020-07-19T00:45:29.000Z
|
2020-12-10T18:25:41.000Z
|
primitives_ubc/regCCFS/__init__.py
|
tonyjo/ubc_primitives
|
bc94a403f176fe28db2a9ac9d1a48cb9db021f90
|
[
"Apache-2.0"
] | 1
|
2021-04-30T18:13:49.000Z
|
2021-04-30T18:13:49.000Z
|
from .ccfsReg import CanonicalCorrelationForestsRegressionPrimitive
__all__ = ['CanonicalCorrelationForestsRegressionPrimitive']
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__) # type: ignore
| 27.875
| 67
| 0.847534
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 62
| 0.278027
|
8361ead530441db9333244ae0f2f8927bfbf291f
| 2,305
|
py
|
Python
|
bprof/profile.py
|
joelfrederico/bprof
|
09ada1e8cb0b2ddee3e212e1721c81964fc5f7a4
|
[
"MIT"
] | null | null | null |
bprof/profile.py
|
joelfrederico/bprof
|
09ada1e8cb0b2ddee3e212e1721c81964fc5f7a4
|
[
"MIT"
] | null | null | null |
bprof/profile.py
|
joelfrederico/bprof
|
09ada1e8cb0b2ddee3e212e1721c81964fc5f7a4
|
[
"MIT"
] | 1
|
2019-10-19T21:22:02.000Z
|
2019-10-19T21:22:02.000Z
|
class BaseFunction:
def __init__(self, name, n_calls, internal_ns):
self._name = name
self._n_calls = n_calls
self._internal_ns = internal_ns
@property
def name(self):
return self._name
@property
def n_calls(self):
return self._n_calls
@property
def internal_ns(self):
return self._internal_ns
class Lines:
def __init__(self, line_str, n_calls, internal, external):
self._line_str = line_str
self._n_calls = n_calls
self._internal = internal
self._external = external
@property
def text(self):
return self._line_str
@property
def n_calls(self):
return self._n_calls
@property
def internal(self):
return self._internal
@property
def external(self):
return self._external
@property
def total(self):
return self.internal + self.external
class Function(BaseFunction):
def __init__(self, name, lines, n_calls, internal_ns):
self._name = name
self._lines = lines
self._n_calls = n_calls
self._internal_ns = internal_ns
@property
def lines(self):
return self._lines
@property
def name(self):
return self._name
@property
def n_calls(self):
return self._n_calls
@property
def internal_ns(self):
return self._internal_ns
@property
def total(self):
tot = 0
for line in self.lines:
tot += line.total
return tot + self.internal_ns
class Profile:
@staticmethod
def from_data(data):
profile = Profile()
profile._functions = []
for key, fdata in data['functions'].items():
lines = []
for line in fdata['lines']:
line = Lines(line['line_str'], line['n_calls'],
line['internal_ns'], line['external_ns'])
lines.append(line)
func = Function(lines=lines, name=fdata['name'],
n_calls=fdata['n_calls'],
internal_ns=fdata['internal_ns'])
profile._functions.append(func)
return profile
@property
def functions(self):
return self._functions
| 22.598039
| 70
| 0.577007
| 2,295
| 0.995662
| 0
| 0
| 1,600
| 0.694143
| 0
| 0
| 91
| 0.039479
|
836261e038e930e3ea31c7a6628689b091e5c9d1
| 8,108
|
py
|
Python
|
src/compas/numerical/dr/dr_numpy.py
|
arpastrana/compas
|
ed677a162c14dbe562c82d72f370279259faf7da
|
[
"MIT"
] | null | null | null |
src/compas/numerical/dr/dr_numpy.py
|
arpastrana/compas
|
ed677a162c14dbe562c82d72f370279259faf7da
|
[
"MIT"
] | 9
|
2019-09-11T08:53:19.000Z
|
2019-09-16T08:35:39.000Z
|
src/compas/numerical/dr/dr_numpy.py
|
Licini/compas
|
34f65adb3d0abc3f403312ffba62aa76f3376292
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from numpy import array
from numpy import isnan
from numpy import isinf
from numpy import ones
from numpy import zeros
from scipy.linalg import norm
from scipy.sparse import diags
from compas.numerical import connectivity_matrix
from compas.numerical import normrow
__all__ = ['dr_numpy']
K = [
[0.0],
[0.5, 0.5],
[0.5, 0.0, 0.5],
[1.0, 0.0, 0.0, 1.0],
]
class Coeff():
def __init__(self, c):
self.c = c
self.a = (1 - c * 0.5) / (1 + c * 0.5)
self.b = 0.5 * (1 + self.a)
def dr_numpy(vertices, edges, fixed, loads, qpre, fpre, lpre, linit, E, radius,
callback=None, callback_args=None, **kwargs):
"""Implementation of the dynamic relaxation method for form findong and analysis
of articulated networks of axial-force members.
Parameters
----------
vertices : list
XYZ coordinates of the vertices.
edges : list
Connectivity of the vertices.
fixed : list
Indices of the fixed vertices.
loads : list
XYZ components of the loads on the vertices.
qpre : list
Prescribed force densities in the edges.
fpre : list
Prescribed forces in the edges.
lpre : list
Prescribed lengths of the edges.
linit : list
Initial length of the edges.
E : list
Stiffness of the edges.
radius : list
Radius of the edges.
callback : callable, optional
User-defined function that is called at every iteration.
callback_args : tuple, optional
Additional arguments passed to the callback.
Returns
-------
xyz : array
XYZ coordinates of the equilibrium geometry.
q : array
Force densities in the edges.
f : array
Forces in the edges.
l : array
Lengths of the edges
r : array
Residual forces.
Notes
-----
For more info, see [1]_.
References
----------
.. [1] De Laet L., Veenendaal D., Van Mele T., Mollaert M. and Block P.,
*Bending incorporated: designing tension structures by integrating bending-active elements*,
Proceedings of Tensinet Symposium 2013,Istanbul, Turkey, 2013.
Examples
--------
>>>
"""
# --------------------------------------------------------------------------
# callback
# --------------------------------------------------------------------------
if callback:
assert callable(callback), 'The provided callback is not callable.'
# --------------------------------------------------------------------------
# configuration
# --------------------------------------------------------------------------
kmax = kwargs.get('kmax', 10000)
dt = kwargs.get('dt', 1.0)
tol1 = kwargs.get('tol1', 1e-3)
tol2 = kwargs.get('tol2', 1e-6)
coeff = Coeff(kwargs.get('c', 0.1))
ca = coeff.a
cb = coeff.b
# --------------------------------------------------------------------------
# attribute lists
# --------------------------------------------------------------------------
num_v = len(vertices)
num_e = len(edges)
free = list(set(range(num_v)) - set(fixed))
# --------------------------------------------------------------------------
# attribute arrays
# --------------------------------------------------------------------------
x = array(vertices, dtype=float).reshape((-1, 3)) # m
p = array(loads, dtype=float).reshape((-1, 3)) # kN
qpre = array(qpre, dtype=float).reshape((-1, 1))
fpre = array(fpre, dtype=float).reshape((-1, 1)) # kN
lpre = array(lpre, dtype=float).reshape((-1, 1)) # m
linit = array(linit, dtype=float).reshape((-1, 1)) # m
E = array(E, dtype=float).reshape((-1, 1)) # kN/mm2 => GPa
radius = array(radius, dtype=float).reshape((-1, 1)) # mm
# --------------------------------------------------------------------------
# sectional properties
# --------------------------------------------------------------------------
A = 3.14159 * radius ** 2 # mm2
EA = E * A # kN
# --------------------------------------------------------------------------
# create the connectivity matrices
# after spline edges have been aligned
# --------------------------------------------------------------------------
C = connectivity_matrix(edges, 'csr')
Ct = C.transpose()
Ci = C[:, free]
Cit = Ci.transpose()
Ct2 = Ct.copy()
Ct2.data **= 2
# --------------------------------------------------------------------------
# if none of the initial lengths are set,
# set the initial lengths to the current lengths
# --------------------------------------------------------------------------
if all(linit == 0):
linit = normrow(C.dot(x))
# --------------------------------------------------------------------------
# initial values
# --------------------------------------------------------------------------
q = ones((num_e, 1), dtype=float)
l = normrow(C.dot(x)) # noqa: E741
f = q * l
v = zeros((num_v, 3), dtype=float)
r = zeros((num_v, 3), dtype=float)
# --------------------------------------------------------------------------
# helpers
# --------------------------------------------------------------------------
def rk(x0, v0, steps=2):
def a(t, v):
dx = v * t
x[free] = x0[free] + dx[free]
# update residual forces
r[free] = p[free] - D.dot(x)
return cb * r / mass
if steps == 1:
return a(dt, v0)
if steps == 2:
B = [0.0, 1.0]
K0 = dt * a(K[0][0] * dt, v0)
K1 = dt * a(K[1][0] * dt, v0 + K[1][1] * K0)
dv = B[0] * K0 + B[1] * K1
return dv
if steps == 4:
B = [1. / 6., 1. / 3., 1. / 3., 1. / 6.]
K0 = dt * a(K[0][0] * dt, v0)
K1 = dt * a(K[1][0] * dt, v0 + K[1][1] * K0)
K2 = dt * a(K[2][0] * dt, v0 + K[2][1] * K0 + K[2][2] * K1)
K3 = dt * a(K[3][0] * dt, v0 + K[3][1] * K0 + K[3][2] * K1 + K[3][3] * K2)
dv = B[0] * K0 + B[1] * K1 + B[2] * K2 + B[3] * K3
return dv
raise NotImplementedError
# --------------------------------------------------------------------------
# start iterating
# --------------------------------------------------------------------------
for k in range(kmax):
# print(k)
q_fpre = fpre / l
q_lpre = f / lpre
q_EA = EA * (l - linit) / (linit * l)
q_lpre[isinf(q_lpre)] = 0
q_lpre[isnan(q_lpre)] = 0
q_EA[isinf(q_EA)] = 0
q_EA[isnan(q_EA)] = 0
q = qpre + q_fpre + q_lpre + q_EA
Q = diags([q[:, 0]], [0])
D = Cit.dot(Q).dot(C)
mass = 0.5 * dt ** 2 * Ct2.dot(qpre + q_fpre + q_lpre + EA / linit)
# RK
x0 = x.copy()
v0 = ca * v.copy()
dv = rk(x0, v0, steps=4)
v[free] = v0[free] + dv[free]
dx = v * dt
x[free] = x0[free] + dx[free]
# update
u = C.dot(x)
l = normrow(u) # noqa: E741
f = q * l
r = p - Ct.dot(Q).dot(u)
# crits
crit1 = norm(r[free])
crit2 = norm(dx[free])
# callback
if callback:
callback(k, x, [crit1, crit2], callback_args)
# convergence
if crit1 < tol1:
break
if crit2 < tol2:
break
return x, q, f, l, r
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
| 34.067227
| 103
| 0.398249
| 143
| 0.017637
| 0
| 0
| 0
| 0
| 0
| 0
| 3,773
| 0.465343
|
8362ff8cbd0cfe323812bd28b2652a04191c1026
| 462
|
py
|
Python
|
getColorFromNumber.py
|
clean-code-craft-tcq-1/modular-python-preetikadyan
|
0775e7e62edbbb0d7c3506b2bd072562a44d7f8b
|
[
"MIT"
] | null | null | null |
getColorFromNumber.py
|
clean-code-craft-tcq-1/modular-python-preetikadyan
|
0775e7e62edbbb0d7c3506b2bd072562a44d7f8b
|
[
"MIT"
] | null | null | null |
getColorFromNumber.py
|
clean-code-craft-tcq-1/modular-python-preetikadyan
|
0775e7e62edbbb0d7c3506b2bd072562a44d7f8b
|
[
"MIT"
] | null | null | null |
from main import *
def get_color_from_pair_number(pair_number):
zero_based_pair_number = pair_number - 1
major_index = zero_based_pair_number // len(MINOR_COLORS)
if major_index >= len(MAJOR_COLORS):
raise Exception('Major index out of range')
minor_index = zero_based_pair_number % len(MINOR_COLORS)
if minor_index >= len(MINOR_COLORS):
raise Exception('Minor index out of range')
return MAJOR_COLORS[major_index], MINOR_COLORS[minor_index]
| 42
| 61
| 0.779221
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 52
| 0.112554
|
83638a87db865ba288d6ca6639d585c34a522b6e
| 98
|
py
|
Python
|
raspy/io/pwm_channel.py
|
cyrusbuilt/RasPy
|
1e34840cc90ea7f19317e881162209d3d819eb09
|
[
"MIT"
] | null | null | null |
raspy/io/pwm_channel.py
|
cyrusbuilt/RasPy
|
1e34840cc90ea7f19317e881162209d3d819eb09
|
[
"MIT"
] | null | null | null |
raspy/io/pwm_channel.py
|
cyrusbuilt/RasPy
|
1e34840cc90ea7f19317e881162209d3d819eb09
|
[
"MIT"
] | null | null | null |
"""The PWM channel to use."""
CHANNEL0 = 0
"""Channel zero."""
CHANNEL1 = 1
"""Channel one."""
| 10.888889
| 29
| 0.581633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 66
| 0.673469
|
836427cbbb35895144687ddb6c7a92d78b59686e
| 10,157
|
py
|
Python
|
xls/dslx/interpreter/concrete_type_helpers.py
|
hafixo/xls
|
21009ec2165d04d0037d9cf3583b207949ef7a6d
|
[
"Apache-2.0"
] | null | null | null |
xls/dslx/interpreter/concrete_type_helpers.py
|
hafixo/xls
|
21009ec2165d04d0037d9cf3583b207949ef7a6d
|
[
"Apache-2.0"
] | null | null | null |
xls/dslx/interpreter/concrete_type_helpers.py
|
hafixo/xls
|
21009ec2165d04d0037d9cf3583b207949ef7a6d
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
#
# Copyright 2020 The XLS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for dealing with concrete types and interpreter values."""
from typing import Tuple, Optional
from absl import logging
from xls.dslx import bit_helpers
from xls.dslx.interpreter.errors import FailureError
from xls.dslx.interpreter.value import Tag
from xls.dslx.interpreter.value import Value
from xls.dslx.python import cpp_ast as ast
from xls.dslx.python.cpp_concrete_type import ArrayType
from xls.dslx.python.cpp_concrete_type import BitsType
from xls.dslx.python.cpp_concrete_type import ConcreteType
from xls.dslx.python.cpp_concrete_type import EnumType
from xls.dslx.python.cpp_concrete_type import is_ubits
from xls.dslx.python.cpp_concrete_type import TupleType
from xls.dslx.python.cpp_pos import Span
from xls.dslx.python.cpp_scanner import Keyword
from xls.dslx.python.cpp_scanner import Token
from xls.dslx.python.cpp_scanner import TokenKind
from xls.dslx.python.cpp_scanner import TYPE_KEYWORDS_TO_SIGNEDNESS_AND_BITS
def _strength_reduce_enum(type_: ast.Enum, bit_count: int) -> ConcreteType:
"""Turns an enum to corresponding (bits) concrete type (w/signedness).
For example, used in conversion checks.
Args:
type_: AST node (enum definition) to convert.
bit_count: The bit count of the underlying bits type for the enum
definition, as determined by type inference or interpretation.
Returns:
The concrete type that represents the enum's underlying bits type.
"""
assert isinstance(type_, ast.Enum), type_
signed = type_.signed
assert isinstance(signed, bool), type_
return BitsType(signed, bit_count)
def concrete_type_from_value(value: Value) -> ConcreteType:
"""Returns the concrete type of 'value'.
Note that:
* Non-zero-length arrays are assumed (for zero length arrays we can't
currently deduce the type from the value because the concrete element type
is not reified in the array value.
* Enums are strength-reduced to their underlying bits (storage) type.
Args:
value: Value to determine the concrete type for.
"""
if value.tag in (Tag.UBITS, Tag.SBITS):
signed = value.tag == Tag.SBITS
return BitsType(signed, value.bits_payload.bit_count)
elif value.tag == Tag.ARRAY:
element_type = concrete_type_from_value(value.array_payload.index(0))
return ArrayType(element_type, len(value))
elif value.tag == Tag.TUPLE:
return TupleType(
tuple(concrete_type_from_value(m) for m in value.tuple_members))
else:
assert value.tag == Tag.ENUM, value
return _strength_reduce_enum(value.type_, value.bits_payload.bit_count)
def concrete_type_from_element_type_and_dims(
element_type: ConcreteType, dims: Tuple[int, ...]) -> ConcreteType:
"""Wraps element_type in arrays according to `dims`, dims[0] as most minor."""
t = element_type
for dim in dims:
t = ArrayType(t, dim)
return t
def concrete_type_from_dims(primitive: Token,
dims: Tuple[int, ...]) -> 'ConcreteType':
"""Creates a concrete type from the primitive type token and dims.
Args:
primitive: The token holding the primitive type as a keyword.
dims: Dimensions to apply to the primitive type; e.g. () is scalar, (5) is
1-D array of 5 elements having the primitive type.
Returns:
A concrete type object.
Raises:
ValueError: If the primitive keyword is unrecognized or dims are empty.
"""
if primitive.is_keyword(Keyword.BITS) or primitive.is_keyword(Keyword.UN):
base_type = BitsType(signed=False, size=dims[-1])
elif primitive.is_keyword(Keyword.SN):
base_type = BitsType(signed=True, size=dims[-1])
else:
assert primitive.kind == TokenKind.KEYWORD
signedness, bits = TYPE_KEYWORDS_TO_SIGNEDNESS_AND_BITS[primitive.value]
element_type = BitsType(signedness, bits)
while dims:
dims, minor = dims[:-1], dims[-1]
element_type = ArrayType(element_type, minor)
return element_type
result = concrete_type_from_element_type_and_dims(base_type, dims[:-1])
logging.vlog(4, '%r %r => %r', primitive, dims, result)
return result
def _value_compatible_with_type(module: ast.Module, type_: ConcreteType,
value: Value) -> bool:
"""Returns whether value is compatible with type_ (recursively)."""
assert isinstance(value, Value), value
if isinstance(type_, TupleType) and value.is_tuple():
return all(
_value_compatible_with_type(module, ct, m)
for ct, m in zip(type_.get_unnamed_members(), value.tuple_members))
if isinstance(type_, ArrayType) and value.is_array():
et = type_.get_element_type()
return all(
_value_compatible_with_type(module, et, m)
for m in value.array_payload.elements)
if isinstance(type_, EnumType) and value.tag == Tag.ENUM:
return type_.get_nominal_type(module) == value.type_
if isinstance(type_,
BitsType) and not type_.signed and value.tag == Tag.UBITS:
return value.bits_payload.bit_count == type_.get_total_bit_count()
if isinstance(type_, BitsType) and type_.signed and value.tag == Tag.SBITS:
return value.bits_payload.bit_count == type_.get_total_bit_count()
if value.tag == Tag.ENUM and isinstance(type_, BitsType):
return (value.type_.get_signedness() == type_.signed and
value.bits_payload.bit_count == type_.get_total_bit_count())
if value.tag == Tag.ARRAY and is_ubits(type_):
flat_bit_count = value.array_payload.flatten().bits_payload.bit_count
return flat_bit_count == type_.get_total_bit_count()
if isinstance(type_, EnumType) and value.is_bits():
return (type_.signed == (value.tag == Tag.SBITS) and
type_.get_total_bit_count() == value.get_bit_count())
raise NotImplementedError(type_, value)
def concrete_type_accepts_value(module: ast.Module, type_: ConcreteType,
value: Value) -> bool:
"""Returns whether 'value' conforms to this concrete type."""
if value.tag == Tag.UBITS:
return (isinstance(type_, BitsType) and not type_.signed and
value.bits_payload.bit_count == type_.get_total_bit_count())
if value.tag == Tag.SBITS:
return (isinstance(type_, BitsType) and type_.signed and
value.bits_payload.bit_count == type_.get_total_bit_count())
if value.tag in (Tag.ARRAY, Tag.TUPLE, Tag.ENUM):
return _value_compatible_with_type(module, type_, value)
raise NotImplementedError(type_, value)
def concrete_type_convert_value(module: ast.Module, type_: ConcreteType,
value: Value, span: Span,
enum_values: Optional[Tuple[Value, ...]],
enum_signed: Optional[bool]) -> Value:
"""Converts 'value' into a value of this concrete type."""
logging.vlog(3, 'Converting value %s to type %s', value, type_)
if value.tag == Tag.UBITS and isinstance(type_, ArrayType):
bits_per_element = type_.get_element_type().get_total_bit_count().value
bits = value.bits_payload
def bit_slice_value_at_index(i):
return Value(
Tag.UBITS,
bits.slice(
i * bits_per_element, (i + 1) * bits_per_element, lsb_is_0=False))
return Value.make_array(
tuple(bit_slice_value_at_index(i) for i in range(type_.size.value)))
if (isinstance(type_, EnumType) and
value.tag in (Tag.UBITS, Tag.SBITS, Tag.ENUM) and
value.get_bit_count() == type_.get_total_bit_count()):
# Check that the bits we're converting from are present in the enum type
# we're converting to.
nominal_type = type_.get_nominal_type(module)
for enum_value in enum_values:
if value.bits_payload == enum_value.bits_payload:
break
else:
raise FailureError(
span,
'Value is not valid for enum {}: {}'.format(nominal_type.identifier,
value))
return Value.make_enum(value.bits_payload, nominal_type)
if (value.tag == Tag.ENUM and isinstance(type_, BitsType) and
type_.get_total_bit_count() == value.get_bit_count()):
constructor = Value.make_sbits if type_.signed else Value.make_ubits
bit_count = type_.get_total_bit_count().value
return constructor(bit_count, value.bits_payload.value)
def zero_ext() -> Value:
assert isinstance(type_, BitsType)
constructor = Value.make_sbits if type_.signed else Value.make_ubits
bit_count = type_.get_total_bit_count().value
return constructor(bit_count,
value.get_bits_value() & bit_helpers.to_mask(bit_count))
def sign_ext() -> Value:
assert isinstance(type_, BitsType)
constructor = Value.make_sbits if type_.signed else Value.make_ubits
bit_count = type_.get_total_bit_count().value
logging.vlog(3, 'Sign extending %s to %s', value, bit_count)
return constructor(bit_count, value.bits_payload.sign_ext(bit_count).value)
if value.tag == Tag.UBITS:
return zero_ext()
if value.tag == Tag.SBITS:
return sign_ext()
if value.tag == Tag.ENUM:
assert enum_signed is not None
return sign_ext() if enum_signed else zero_ext()
# If we're converting an array into bits, flatten the array payload.
if value.tag == Tag.ARRAY and isinstance(type_, BitsType):
return value.array_payload.flatten()
if concrete_type_accepts_value(module, type_, value): # Vacuous conversion.
return value
raise FailureError(
span,
'Interpreter failure: cannot convert value %s (of type %s) to type %s' %
(value, concrete_type_from_value(value), type_))
| 39.216216
| 80
| 0.713104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,488
| 0.244954
|
83655e2b69ea8d94a79a740f034c0045712e2d9d
| 97
|
py
|
Python
|
ABC/131/a.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
ABC/131/a.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
ABC/131/a.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
S = str(input())
if S[0]==S[1] or S[1]==S[2] or S[2]==S[3]:
print("Bad")
else:
print("Good")
| 16.166667
| 42
| 0.494845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.113402
|
83663c35c9a7b7d5e9b6087f0826f94225c82bb6
| 15,354
|
py
|
Python
|
model_neu/optimized/hyperutils.py
|
lelange/cu-ssp
|
9f1a7abf79a2fb6ef2ae0f37de79469c2dc3488f
|
[
"MIT"
] | null | null | null |
model_neu/optimized/hyperutils.py
|
lelange/cu-ssp
|
9f1a7abf79a2fb6ef2ae0f37de79469c2dc3488f
|
[
"MIT"
] | null | null | null |
model_neu/optimized/hyperutils.py
|
lelange/cu-ssp
|
9f1a7abf79a2fb6ef2ae0f37de79469c2dc3488f
|
[
"MIT"
] | null | null | null |
from bson import json_util
import json
import os
import numpy as np
import tensorflow as tf
from keras.layers.core import K #import keras.backend as K
import time
import pandas as pd
import multiprocessing
#
from keras.preprocessing import text, sequence
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
RESULTS_DIR = "results/"
MAXLEN_SEQ = 700
data_root = '/nosave/lange/cu-ssp/data/'
residue_list = list('ACEDGFIHKMLNQPSRTWVYX') + ['NoSeq']
q8_list = list('LBEGIHST') + ['NoSeq']
"""Json utils to print, save and load training results."""
def print_json(result):
"""Pretty-print a jsonable structure (e.g.: result)."""
print(json.dumps(
result,
default=json_util.default, sort_keys=True,
indent=4, separators=(',', ': ')
))
def save_json_result(model_name, result):
"""Save json to a directory and a filename."""
result_name = '{}.txt.json'.format(model_name)
if not os.path.exists(RESULTS_DIR):
os.makedirs(RESULTS_DIR)
with open(os.path.join(RESULTS_DIR, result_name), 'w') as f:
json.dump(
result, f,
default=json_util.default, sort_keys=True,
indent=4, separators=(',', ': ')
)
def load_json_result(best_result_name):
"""Load json from a path (directory + filename)."""
result_path = os.path.join(RESULTS_DIR, best_result_name)
with open(result_path, 'r') as f:
return json.JSONDecoder().decode(
f.read()
# default=json_util.default,
# separators=(',', ': ')
)
def load_best_hyperspace(name = 'json'):
results = [
f for f in list(sorted(os.listdir(RESULTS_DIR))) if name in f
]
if len(results) == 0:
return None
best_result_name = results[-1]
return load_json_result(best_result_name)["space"]
# transformations for pssm:
def sigmoid_p(data):
return logistic.cdf(data)
# transformations for hmm:
def normal_h(data):
return 2**((-data/1000))
# for both:
def standard(data):
mean = np.mean(data)
std = np.std(data)
data_ = (data - mean) / std
return data_
# Computes and returns the n-grams of a particular sequence, defaults to trigrams
def seq2ngrams(seqs, n = 1):
return np.array([[seq[i : i + n] for i in range(len(seq))] for seq in seqs])
## metrics for this task:
# The custom accuracy metric used for this task
def accuracy(y_true, y_predicted):
y = tf.argmax(y_true, axis =- 1)
y_ = tf.argmax(y_predicted, axis =- 1)
mask = tf.greater(y, 0)
return K.cast(K.equal(tf.boolean_mask(y, mask), tf.boolean_mask(y_, mask)), K.floatx())
def weighted_accuracy(y_true, y_pred):
return K.sum(K.equal(K.argmax(y_true, axis=-1),
K.argmax(y_pred, axis=-1)) * K.sum(y_true, axis=-1)) / K.sum(y_true)
def kullback_leibler_divergence(y_true, y_pred):
'''Calculates the Kullback-Leibler (KL) divergence between prediction
and target values.
'''
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
return K.sum(y_true * K.log(y_true / y_pred), axis=-1)
def matthews_correlation(y_true, y_pred):
'''Calculates the Matthews correlation coefficient measure for quality
of binary classification problems.
'''
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tp = K.sum(y_pos * y_pred_pos)
tn = K.sum(y_neg * y_pred_neg)
fp = K.sum(y_neg * y_pred_pos)
fn = K.sum(y_pos * y_pred_neg)
numerator = (tp * tn - fp * fn)
denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return numerator / (denominator + K.epsilon())
def precision(y_true, y_pred):
'''Calculates the precision, a metric for multi-label classification of
how many selected items are relevant.
'''
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
'''Calculates the recall, a metric for multi-label classification of
how many relevant items are selected.
'''
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def fbeta_score(y_true, y_pred, beta=1):
'''Calculates the F score, the weighted harmonic mean of precision and recall.
This is useful for multi-label classification, where input samples can be
classified as sets of labels. By only using accuracy (precision) a model
would achieve a perfect score by simply assigning every class to every
input. In order to avoid this, a metric should penalize incorrect class
assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)
computes this, as a weighted mean of the proportion of correct class
assignments vs. the proportion of incorrect class assignments.
With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning
correct classes becomes more important, and with beta > 1 the metric is
instead weighted towards penalizing incorrect class assignments.
'''
if beta < 0:
raise ValueError('The lowest choosable beta is zero (only precision).')
# If there are no true positives, fix the F score at 0 like sklearn.
if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
return 0
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
bb = beta ** 2
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
return fbeta_score
# losses:
def nll(y_true, y_pred):
""" Negative log likelihood. """
# keras.losses.binary_crossentropy give the mean
# over the last axis. we require the sum
return K.sum(K.binary_crossentropy(y_true, y_pred), axis=-1)
'''
def get_data(npy_path, normalize_profiles):
# daten durcheinander würfeln?
data = np.load(npy_path+'.npy')
max_len = 700
data_reshape = data.reshape(data.shape[0], 700, -1)
residue_onehot = data_reshape[:,:,0:22]
residue_q8_onehot = data_reshape[:,:,22:31]
profile = data_reshape[:,:,35:57]
#pad profiles to same length
zero_arr = np.zeros((profile.shape[0], max_len - profile.shape[1], profile.shape[2]))
profile_padded = np.concatenate([profile, zero_arr], axis=1)
residue_array = np.array(residue_list)[residue_onehot.argmax(2)]
q8_array = np.array(q8_list)[residue_q8_onehot.argmax(2)]
residue_str_list = []
q8_str_list = []
for vec in residue_array:
x = ''.join(vec[vec != 'NoSeq'])
residue_str_list.append(x)
for vec in q8_array:
x = ''.join(vec[vec != 'NoSeq'])
q8_str_list.append(x)
id_list = np.arange(1, len(residue_array) + 1)
len_list = np.array([len(x) for x in residue_str_list])
train_df = pd.DataFrame({'id': id_list, 'len': len_list, 'input': residue_str_list, 'expected': q8_str_list})
input_one_hot = residue_onehot
q8_onehot = residue_q8_onehot
train_input_seqs, train_target_seqs= train_df[['input', 'expected']][(train_df.len <= 700)].values.T
input_seqs
input_pssm = profile_padded
#SPÄTERE::
#nput_hmm = None
#rsa_onehot = None; output_data = [q8_onehot, rsa_onehot]
#input_data = [input_one_hot, input_seqs, input_pssm, input_hmm]
input_data = [input_one_hot, input_seqs, input_pssm]
output_data = q8_onehot
return input_data, output_data
'''
def load_augmented_data(npy_path, max_len):
data = np.load(npy_path)
data_reshape = data.reshape(data.shape[0], 700, -1)
residue_onehot = data_reshape[:,:,0:22]
residue_q8_onehot = data_reshape[:,:,22:31]
profile = data_reshape[:,:,35:57]
#pad profiles to same length
zero_arr = np.zeros((profile.shape[0], max_len - profile.shape[1], profile.shape[2]))
profile_padded = np.concatenate([profile, zero_arr], axis=1)
residue_array = np.array(residue_list)[residue_onehot.argmax(2)]
q8_array = np.array(q8_list)[residue_q8_onehot.argmax(2)]
residue_str_list = []
q8_str_list = []
for vec in residue_array:
x = ''.join(vec[vec != 'NoSeq'])
residue_str_list.append(x)
for vec in q8_array:
x = ''.join(vec[vec != 'NoSeq'])
q8_str_list.append(x)
id_list = np.arange(1, len(residue_array) + 1)
len_list = np.array([len(x) for x in residue_str_list])
train_df = pd.DataFrame({'id': id_list, 'len': len_list, 'input': residue_str_list, 'expected': q8_str_list})
return train_df, profile_padded
def get_data():
cb513filename = data_root+'data_princeton/cb513.npy'
cb6133filteredfilename = data_root+'data_princeton/cb6133filtered.npy'
maxlen_seq = 700
# load train and test and cut length to maxlen_seq
train_df, X_aug_train = load_augmented_data(cb6133filteredfilename, maxlen_seq)
train_input_seqs, train_target_seqs = train_df[['input', 'expected']][(train_df.len <= maxlen_seq)].values.T
test_df, X_aug_test = load_augmented_data(cb513filename, maxlen_seq)
test_input_seqs, test_target_seqs = test_df[['input', 'expected']][(test_df.len <= maxlen_seq)].values.T
# Using the tokenizer to encode and decode the sequences for use in training
# use preprocessing tools for text from keras to encode input sequence as word rank numbers and target sequence as one hot.
# To ensure easy to use training and testing, all sequences are padded with zeros to the maximum sequence length
# transform sequences to trigrams
train_input_grams = seq2ngrams(train_input_seqs)
# transform sequences
# fit alphabet on train basis
tokenizer_encoder = Tokenizer()
tokenizer_encoder.fit_on_texts(train_input_grams)
tokenizer_decoder = Tokenizer(char_level=True)
tokenizer_decoder.fit_on_texts(train_target_seqs)
# train
train_input_data = tokenizer_encoder.texts_to_sequences(train_input_grams)
X_train = sequence.pad_sequences(train_input_data, maxlen=maxlen_seq, padding='post')
# transform targets to one-hot
train_target_data = tokenizer_decoder.texts_to_sequences(train_target_seqs)
train_target_data = sequence.pad_sequences(train_target_data, maxlen=maxlen_seq, padding='post')
y_train = to_categorical(train_target_data)
input_one_hot = to_categorical(X_train)
# test
test_input_grams = seq2ngrams(test_input_seqs)
test_input_data = tokenizer_encoder.texts_to_sequences(test_input_grams)
X_test = sequence.pad_sequences(test_input_data, maxlen=maxlen_seq, padding='post')
test_target_data = tokenizer_decoder.texts_to_sequences(test_target_seqs)
test_target_data = sequence.pad_sequences(test_target_data, maxlen=maxlen_seq, padding='post')
y_test = to_categorical(test_target_data)
input_one_hot_test = to_categorical(X_test)
#### validation data
'''
n_samples = len(train_df)
np.random.seed(0)
validation_idx = np.random.choice(np.arange(n_samples), size=300, replace=False)
training_idx = np.array(list(set(np.arange(n_samples)) - set(validation_idx)))
X_val = X_train[validation_idx]
X_train = X_train[training_idx]
y_val = y_train[validation_idx]
y_train = y_train[training_idx]
X_aug_val = X_aug_train[validation_idx]
X_aug_train = X_aug_train[training_idx]
'''
#hmm profiles
input_hmm = np.load(data_root+'data_princeton/hmm_train.npy', allow_pickle=True)[:,:700,:]
input_hmm_test = np.load(data_root+'data_princeton/hmm_cb513.npy', allow_pickle=True)[:,:700,:]
#elmo embedding
input_elmo_train = np.load(data_root+'data_princeton/train_input_embedding.npy')
input_elmo_test = np.load(data_root+'data_princeton/cb513_input_embedding.npy')
print(input_elmo_train.shape)
print(input_elmo_test.shape)
input_data_train = [input_one_hot, X_train, input_elmo_train, standard(X_aug_train), input_hmm]
output_data_train = y_train
print(len(y_train))
print(input_hmm.shape)
print(len(y_test))
print(input_hmm_test.shape)
input_data_test = [input_one_hot_test, X_test, input_elmo_test, standard(X_aug_test), input_hmm_test]
output_data_test = y_test
return input_data_train, output_data_train, input_data_test, output_data_test
# fit_on_texts Updates internal vocabulary based on a list of texts
# texts_to_sequences Transforms each text in texts to a sequence of integers, 0 is reserved for padding
#fertig, nur get_data noch machen
def evaluate_model(model, load_file, hype_space, X_test, y_test):
start_time = time.time()
file_test = ['cb513'] #add more later
test_accs = []
for test in file_test:
model.load_weights(load_file)
score = model.evaluate(X_test, y_test, verbose=2, batch_size=1)
for metric, s in zip(model.metrics_names, score):
print(test + ' test ', metric, ': ', s)
test_accs.append(score[1])
m, s = divmod(time.time() - start_time, 60)
print("Needed {:.0f}min {:.0f}s to evaluate model.".format(m, s))
return dict(zip(file_test, test_accs))
def load_6133_filted():
'''
TRAIN data Cullpdb+profile_6133_filtered
Test data CB513\CASP10\CASP11
'''
print("Loading train data (Cullpdb_filted)...")
data = np.load()
data = np.reshape(data, (-1, 700, 57))
# print data.shape
datahot = data[:, :, 0:21] # sequence feature
# print 'sequence feature',dataonehot[1,:3,:]
datapssm = data[:, :, 35:56] # profile feature
# print 'profile feature',datapssm[1,:3,:]
labels = data[:, :, 22:30] # secondary struture label , 8-d
# shuffle data
# np.random.seed(2018)
num_seqs, seqlen, feature_dim = np.shape(data)
num_classes = labels.shape[2]
seq_index = np.arange(0, num_seqs) #
np.random.shuffle(seq_index)
# train data
trainhot = datahot[seq_index[:5278]] # 21
trainlabel = labels[seq_index[:5278]] # 8
trainpssm = datapssm[seq_index[:5278]] # 21
# val data
vallabel = labels[seq_index[5278:5534]] # 8
valpssm = datapssm[seq_index[5278:5534]] # 21
valhot = datahot[seq_index[5278:5534]] # 21
train_hot = np.ones((trainhot.shape[0], trainhot.shape[1]))
for i in xrange(trainhot.shape[0]):
for j in xrange(trainhot.shape[1]):
if np.sum(trainhot[i, j, :]) != 0:
train_hot[i, j] = np.argmax(trainhot[i, j, :])
val_hot = np.ones((valhot.shape[0], valhot.shape[1]))
for i in xrange(valhot.shape[0]):
for j in xrange(valhot.shape[1]):
if np.sum(valhot[i, j, :]) != 0:
val_hot[i, j] = np.argmax(valhot[i, j, :])
solvindex = range(33, 35)
trainsolvlabel = data[:5600, :, solvindex]
trainsolvvalue = trainsolvlabel[:, :, 0] * 2 + trainsolvlabel[:, :, 1]
trainsolvlabel = np.zeros((trainsolvvalue.shape[0], trainsolvvalue.shape[1], 4))
for i in xrange(trainsolvvalue.shape[0]):
for j in xrange(trainsolvvalue.shape[1]):
if np.sum(trainlabel[i, j, :]) != 0:
trainsolvlabel[i, j, trainsolvvalue[i, j]] = 1
return train_hot, trainpssm, trainlabel, val_hot, valpssm, vallabel
| 36.557143
| 127
| 0.681972
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,824
| 0.379265
|
8369920fc0165d90314e66e5b7970c7cffdf56d6
| 106
|
py
|
Python
|
spark_application/transformations/__init__.py
|
ketanvatsalya/pyspark_project_template
|
72f6cc843ce04cbbf15eaf49c2435b7f31366194
|
[
"MIT"
] | null | null | null |
spark_application/transformations/__init__.py
|
ketanvatsalya/pyspark_project_template
|
72f6cc843ce04cbbf15eaf49c2435b7f31366194
|
[
"MIT"
] | null | null | null |
spark_application/transformations/__init__.py
|
ketanvatsalya/pyspark_project_template
|
72f6cc843ce04cbbf15eaf49c2435b7f31366194
|
[
"MIT"
] | null | null | null |
"""
Package to hold the Transformation Classes
"""
from . import base
from . import spend_per_department
| 15.142857
| 42
| 0.764151
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 50
| 0.471698
|