blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
126bc1c31467b15f3249bc26ab009233ac079281 | 463c053bcf3f4a7337b634890720ea9467f14c87 | /doc/source/ray-core/doc_code/actor-sync.py | 48bc4eb410c11b92e875b05571c3a5d87c840fce | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | pdames/ray | e8faddc4440976211a6bcead8f8b6e62c1dcda01 | 918d3601c6519d333f10910dc75eb549cbb82afa | refs/heads/master | 2023-01-23T06:11:11.723212 | 2022-05-06T22:55:59 | 2022-05-06T22:55:59 | 245,515,407 | 1 | 1 | Apache-2.0 | 2023-01-14T08:02:21 | 2020-03-06T20:59:04 | Python | UTF-8 | Python | false | false | 881 | py | import asyncio
import ray
# We set num_cpus to zero because this actor will mostly just block on I/O.
@ray.remote(num_cpus=0)
class SignalActor:
def __init__(self):
self.ready_event = asyncio.Event()
def send(self, clear=False):
self.ready_event.set()
if clear:
self.ready_event.clear()
async def wait(self, should_wait=True):
if should_wait:
await self.ready_event.wait()
@ray.remote
def wait_and_go(signal):
ray.get(signal.wait.remote())
print("go!")
signal = SignalActor.remote()
tasks = [wait_and_go.remote(signal) for _ in range(4)]
print("ready...")
# Tasks will all be waiting for the signals.
print("set..")
ray.get(signal.send.remote())
# Tasks are unblocked.
ray.get(tasks)
# Output is:
# ready...
# get set..
# (pid=77366) go!
# (pid=77372) go!
# (pid=77367) go!
# (pid=77358) go!
| [
"noreply@github.com"
] | pdames.noreply@github.com |
b54029177b1f51498c2ca5f68014f1adc259274a | 2e1863e159b036b3e2983c310d471608fb6381be | /tests/test_select_multiple_field.py | a5280b4c22725ca481bad8dc8fd1204f4d5658cc | [] | no_license | ltvolks/wtforms-json | 8c33104b2f0fe1929b7c1352c38a93ef8e3329ce | 026f421beeac405d01b2c42e0caf585ee0a6e707 | refs/heads/master | 2021-01-24T00:04:48.906797 | 2013-09-21T12:27:22 | 2013-09-21T12:27:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,183 | py | import wtforms_json
from wtforms import *
wtforms_json.init()
def test_select_field():
fixtures = [
{'name': 'Scarlet Witch', 'childs': 3, '__result': True},
{'name': 'Black Cat', 'childs': 0, '__result': False},
{'name': 'Tigra', 'childs': 1, '__result': True},
]
class MomForm(Form):
name = TextField()
childs = SelectField(choices=((1, 1), (2, 2), (3, 3)), coerce=int)
for fixture in fixtures:
result = fixture.pop('__result')
assert MomForm.from_json(fixture).validate() == result
def test_select_multiple_field():
fixtures = [
{'name': 'Juggernaut', 'gadgets': [1, 2, 3, 4], '__result': True},
{'name': 'Wolverine', 'gadgets': [], '__result': False},
{'name': 'Beast', 'gadgets': [4], '__result': True},
]
class AppleFanBoyForm(Form):
name = TextField()
gadgets = SelectMultipleField(
choices=(
(1, 'Macbook Pro'),
(2, 'Macbook Air'),
(3, 'iPhone'),
(4, 'iPad')
),
validators=[validators.required()],
coerce=int
)
for fixture in fixtures:
result = fixture.pop('__result')
assert AppleFanBoyForm.from_json(fixture).validate() == result
def test_custom_field():
# a custom field that returns a list
# it doesn't inherits from SelectMultipleField
class SuperPowersField(SelectFieldBase):
POWERS = [
('fly', ''),
('super strength', ''),
('regeneration', ''),
('stamina', ''),
('agility', ''),
]
widget = widgets.Select(multiple=True)
def iter_choices(self):
if self.allow_blank:
yield (u'__None', self.blank_text, self.data is None)
for item in self.POWERS:
label = self.label_attr or ''
selected = item[0] in self.data
yield (item[0], item[1], selected)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self.data = [
item[0] for item in self.POWERS
if str(item[0]) in valuelist]
if not len(self.data):
self.data = None
def _is_selected(self, item):
return item in self.data
class SuperHeroForm(Form):
name = TextField()
powers = SuperPowersField(
validators=[validators.required()]
)
fixtures = [
{'name': 'Juggernaut', 'powers': ['super strength'], '__result': True},
{
'name': 'Wolverine',
'powers': ['stamina', 'agility', 'regeneration'],
'__result': True
},
{'name': 'Beast', 'powers': ['agility'], '__result': True},
{'name': 'Rocket Rackoon', 'powers': [], '__result': False}
]
for fixture in fixtures:
result = fixture.pop('__result')
assert SuperHeroForm.from_json(fixture).validate() == result
| [
"hans.r.69@gmail.com"
] | hans.r.69@gmail.com |
89134400ccc28a26b516bbb2c28f2c8435a1331f | cdd87af340f1431f7866211551b826c5d8295f51 | /tests/samplers_tests/test_grid.py | 968915cf1fb678ab8efcd4ffba076372c2252405 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | VladSkripniuk/optuna | ce6dc19cc83c9d92da531f1a8782c0ac85063a7a | 81d5b67a81ae14d606e6d6120ce50d02e90b0942 | refs/heads/master | 2021-01-26T01:46:50.574361 | 2020-03-02T14:25:01 | 2020-03-02T14:25:01 | 243,262,903 | 0 | 0 | MIT | 2020-02-26T12:57:39 | 2020-02-26T12:57:39 | null | UTF-8 | Python | false | false | 5,821 | py | from collections import OrderedDict
import itertools
import numpy as np
import pytest
import optuna
from optuna import samplers
if optuna.type_checking.TYPE_CHECKING:
from collections import ValuesView # NOQA
from typing import Dict # NOQA
from typing import List # NOQA
from typing import Union # NOQA
from optuna.samplers.grid import GridValueType # NOQA
from optuna.trial import Trial # NOQA
def _n_grids(search_space):
# type: (Dict[str, List[Union[str, float, None]]]) -> int
return int(np.prod([len(v) for v in search_space.values()]))
def test_grid_sampler_experimental_warning():
# type: () -> None
with pytest.warns(optuna.exceptions.ExperimentalWarning):
optuna.samplers.GridSampler({'some_param': [0, 1]})
def test_study_optimize_with_single_search_space():
# type: () -> None
def objective(trial):
# type: (Trial) -> float
a = trial.suggest_int('a', 0, 100)
b = trial.suggest_uniform('b', -0.1, 0.1)
c = trial.suggest_categorical('c', ('x', 'y'))
d = trial.suggest_discrete_uniform('d', -5, 5, 1)
e = trial.suggest_loguniform('e', 0.0001, 1)
if c == 'x':
return a * d
else:
return b * e
# Test that all combinations of the grid is sampled.
search_space = {
'b': np.arange(-0.1, 0.1, 0.05),
'c': ['x', 'y'],
'd': [-0.5, 0.5],
'e': [0.1],
'a': list(range(0, 100, 20)),
}
n_grids = _n_grids(search_space)
study = optuna.create_study(sampler=samplers.GridSampler(search_space))
study.optimize(objective, n_trials=n_grids)
def sorted_values(d):
# type: (Dict[str, List[GridValueType]]) -> ValuesView[List[GridValueType]]
return OrderedDict(sorted(d.items())).values()
all_grids = itertools.product(*sorted_values(search_space))
all_suggested_values = [tuple([p for p in sorted_values(t.params)]) for t in study.trials]
assert set(all_grids) == set(all_suggested_values)
ids = sorted([t.system_attrs['grid_id'] for t in study.trials])
assert ids == list(range(n_grids))
# Test that an optimization fails if the number of trials is more than that of all grids.
with pytest.raises(ValueError):
study.optimize(objective, n_trials=1)
# Test a non-existing parameter name in the grid.
search_space = {'a': list(range(0, 100, 20))}
study = optuna.create_study(sampler=samplers.GridSampler(search_space))
with pytest.raises(ValueError):
study.optimize(objective)
# Test a value with out of range.
search_space = {
'a': [110], # 110 is out of range specified by the suggest method.
'b': [0],
'c': ['x'],
'd': [0],
'e': [0.1]
}
study = optuna.create_study(sampler=samplers.GridSampler(search_space))
with pytest.raises(ValueError):
study.optimize(objective)
def test_study_optimize_with_multiple_search_spaces():
# type: () -> None
def objective(trial):
# type: (Trial) -> float
a = trial.suggest_int('a', 0, 100)
b = trial.suggest_uniform('b', -100, 100)
return a * b
# Run 3 trials with a search space.
search_space_0 = {
'a': [0, 50],
'b': [-50, 0, 50]
} # type: Dict[str, List[GridValueType]]
sampler_0 = samplers.GridSampler(search_space_0)
study = optuna.create_study(sampler=sampler_0)
study.optimize(objective, n_trials=3)
assert len(study.trials) == 3
for t in study.trials:
assert sampler_0._same_search_space(t.system_attrs['search_space'])
# Run 2 trials with another space.
search_space_1 = {'a': [0, 25], 'b': [-50]} # type: Dict[str, List[GridValueType]]
sampler_1 = samplers.GridSampler(search_space_1)
study.sampler = sampler_1
study.optimize(objective, n_trials=2)
assert not sampler_0._same_search_space(sampler_1._search_space)
assert len(study.trials) == 5
for t in study.trials[:3]:
assert sampler_0._same_search_space(t.system_attrs['search_space'])
for t in study.trials[3:5]:
assert sampler_1._same_search_space(t.system_attrs['search_space'])
# Run 3 trials with the first search space again.
study.sampler = sampler_0
study.optimize(objective, n_trials=3)
assert len(study.trials) == 8
for t in study.trials[:3]:
assert sampler_0._same_search_space(t.system_attrs['search_space'])
for t in study.trials[3:5]:
assert sampler_1._same_search_space(t.system_attrs['search_space'])
for t in study.trials[5:]:
assert sampler_0._same_search_space(t.system_attrs['search_space'])
def test_cast_value():
# type: () -> None
samplers.GridSampler._check_value('x', None)
samplers.GridSampler._check_value('x', True)
samplers.GridSampler._check_value('x', False)
samplers.GridSampler._check_value('x', -1)
samplers.GridSampler._check_value('x', -1.5)
samplers.GridSampler._check_value('x', float('nan'))
samplers.GridSampler._check_value('x', 'foo')
samplers.GridSampler._check_value('x', '')
with pytest.raises(ValueError):
samplers.GridSampler._check_value('x', [1])
def test_has_same_search_space():
# type: () -> None
search_space = {'x': [3, 2, 1], 'y': ['a', 'b', 'c']} # type: Dict[str, List[GridValueType]]
sampler = samplers.GridSampler(search_space)
assert sampler._same_search_space(search_space)
assert sampler._same_search_space({'x': np.array([3, 2, 1]), 'y': ['a', 'b', 'c']})
assert sampler._same_search_space({'y': ['c', 'a', 'b'], 'x': [1, 2, 3]})
assert not sampler._same_search_space({'x': [3, 2, 1, 0], 'y': ['a', 'b', 'c']})
assert not sampler._same_search_space({'x': [3, 2], 'y': ['a', 'b', 'c']})
| [
"sano@preferred.jp"
] | sano@preferred.jp |
80a14f17033899ccf595bdf87f2e69d0d98a674a | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4455861.3.spec | e8931b48c6dd319bb00567b3ab78976b86bbf00e | [] | no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,317 | spec | {
"id": "mgm4455861.3",
"metadata": {
"mgm4455861.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 285428,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 309,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 4602,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 307,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/100.preprocess.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 448,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/205.screen.h_sapiens_asm.info"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 1478,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 260990,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 373,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 309,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/299.screen.passed.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 89858,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 308,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 151810,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 48,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 243108,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 96042,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 65220,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 612606,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/450.rna.sims.gz"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 25114,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 10077,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 33168,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 334,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 40472,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 3011464,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 103,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 16,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 1189,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 37,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 3702,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 4758,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 2113,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 540,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 24096,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 80,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 11678,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455861.3/file/999.done.species.stats"
}
},
"id": "mgm4455861.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4455861.3"
}
},
"raw": {
"mgm4455861.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4455861.3"
}
}
} | [
"jared.wilkening@gmail.com"
] | jared.wilkening@gmail.com |
17aedfb1a805c18cd16b8e0bcc4cbcae87f0bcd3 | 81930c01935554f98066c711635d8f9342d19232 | /WebRoot/fckeditor/editor/filemanager/connectors/py/fckutil.py | 8578d65d46eda1990c3536b89568d781cf79b63d | [] | no_license | keets2012/teachingOnLine | c995ca38d06324c80b359afd79b61d9da297e450 | eb64718162310f513c61dc0c220da93eeedef16a | refs/heads/master | 2021-01-10T08:53:04.461883 | 2016-09-12T01:55:15 | 2016-09-12T01:55:15 | 43,011,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,666 | py | <<<<<<< HEAD
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Utility functions for the File Manager Connector for Python
"""
import string, re
import os
import config as Config
# Generic manipulation functions
def removeExtension(fileName):
index = fileName.rindex(".")
newFileName = fileName[0:index]
return newFileName
def getExtension(fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension
def removeFromStart(string, char):
return string.lstrip(char)
def removeFromEnd(string, char):
return string.rstrip(char)
# Path functions
def combinePaths( basePath, folder ):
return removeFromEnd( basePath, '/' ) + '/' + removeFromStart( folder, '/' )
def getFileName(filename):
" Purpose: helper function to extrapolate the filename "
for splitChar in ["/", "\\"]:
array = filename.split(splitChar)
if (len(array) > 1):
filename = array[-1]
return filename
def sanitizeFolderName( newFolderName ):
"Do a cleanup of the folder name to avoid possible problems"
# Remove . \ / | : ? * " < > and control characters
return re.sub( '(?u)\\.|\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[^\u0000-\u001f\u007f-\u009f]', '_', newFolderName )
def sanitizeFileName( newFileName ):
"Do a cleanup of the file name to avoid possible problems"
# Replace dots in the name with underscores (only one dot can be there... security issue).
if ( Config.ForceSingleExtension ): # remove dots
newFileName = re.sub ( '/\\.(?![^.]*$)/', '_', newFileName ) ;
newFileName = newFileName.replace('\\','/') # convert windows to unix path
newFileName = os.path.basename (newFileName) # strip directories
# Remove \ / | : ? *
return re.sub ( '(?u)/\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[^\u0000-\u001f\u007f-\u009f]/', '_', newFileName )
def getCurrentFolder(currentFolder):
if not currentFolder:
currentFolder = '/'
# Check the current folder syntax (must begin and end with a slash).
if (currentFolder[-1] <> "/"):
currentFolder += "/"
if (currentFolder[0] <> "/"):
currentFolder = "/" + currentFolder
# Ensure the folder path has no double-slashes
while '//' in currentFolder:
currentFolder = currentFolder.replace('//','/')
# Check for invalid folder paths (..)
if '..' in currentFolder or '\\' in currentFolder:
return None
return currentFolder
def mapServerPath( environ, url):
" Emulate the asp Server.mapPath function. Given an url path return the physical directory that it corresponds to "
# This isn't correct but for the moment there's no other solution
# If this script is under a virtual directory or symlink it will detect the problem and stop
return combinePaths( getRootPath(environ), url )
def mapServerFolder(resourceTypePath, folderPath):
return combinePaths ( resourceTypePath , folderPath )
def getRootPath(environ):
"Purpose: returns the root path on the server"
# WARNING: this may not be thread safe, and doesn't work w/ VirtualServer/mod_python
# Use Config.UserFilesAbsolutePath instead
if environ.has_key('DOCUMENT_ROOT'):
return environ['DOCUMENT_ROOT']
else:
realPath = os.path.realpath( './' )
selfPath = environ['SCRIPT_FILENAME']
selfPath = selfPath [ : selfPath.rfind( '/' ) ]
selfPath = selfPath.replace( '/', os.path.sep)
position = realPath.find(selfPath)
# This can check only that this script isn't run from a virtual dir
# But it avoids the problems that arise if it isn't checked
raise realPath
if ( position < 0 or position <> len(realPath) - len(selfPath) or realPath[ : position ]==''):
raise Exception('Sorry, can\'t map "UserFilesPath" to a physical path. You must set the "UserFilesAbsolutePath" value in "editor/filemanager/connectors/py/config.py".')
return realPath[ : position ]
=======
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Utility functions for the File Manager Connector for Python
"""
import string, re
import os
import config as Config
# Generic manipulation functions
def removeExtension(fileName):
index = fileName.rindex(".")
newFileName = fileName[0:index]
return newFileName
def getExtension(fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension
def removeFromStart(string, char):
return string.lstrip(char)
def removeFromEnd(string, char):
return string.rstrip(char)
# Path functions
def combinePaths( basePath, folder ):
return removeFromEnd( basePath, '/' ) + '/' + removeFromStart( folder, '/' )
def getFileName(filename):
" Purpose: helper function to extrapolate the filename "
for splitChar in ["/", "\\"]:
array = filename.split(splitChar)
if (len(array) > 1):
filename = array[-1]
return filename
def sanitizeFolderName( newFolderName ):
"Do a cleanup of the folder name to avoid possible problems"
# Remove . \ / | : ? * " < > and control characters
return re.sub( '(?u)\\.|\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[^\u0000-\u001f\u007f-\u009f]', '_', newFolderName )
def sanitizeFileName( newFileName ):
"Do a cleanup of the file name to avoid possible problems"
# Replace dots in the name with underscores (only one dot can be there... security issue).
if ( Config.ForceSingleExtension ): # remove dots
newFileName = re.sub ( '/\\.(?![^.]*$)/', '_', newFileName ) ;
newFileName = newFileName.replace('\\','/') # convert windows to unix path
newFileName = os.path.basename (newFileName) # strip directories
# Remove \ / | : ? *
return re.sub ( '(?u)/\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[^\u0000-\u001f\u007f-\u009f]/', '_', newFileName )
def getCurrentFolder(currentFolder):
if not currentFolder:
currentFolder = '/'
# Check the current folder syntax (must begin and end with a slash).
if (currentFolder[-1] <> "/"):
currentFolder += "/"
if (currentFolder[0] <> "/"):
currentFolder = "/" + currentFolder
# Ensure the folder path has no double-slashes
while '//' in currentFolder:
currentFolder = currentFolder.replace('//','/')
# Check for invalid folder paths (..)
if '..' in currentFolder or '\\' in currentFolder:
return None
return currentFolder
def mapServerPath( environ, url):
" Emulate the asp Server.mapPath function. Given an url path return the physical directory that it corresponds to "
# This isn't correct but for the moment there's no other solution
# If this script is under a virtual directory or symlink it will detect the problem and stop
return combinePaths( getRootPath(environ), url )
def mapServerFolder(resourceTypePath, folderPath):
return combinePaths ( resourceTypePath , folderPath )
def getRootPath(environ):
"Purpose: returns the root path on the server"
# WARNING: this may not be thread safe, and doesn't work w/ VirtualServer/mod_python
# Use Config.UserFilesAbsolutePath instead
if environ.has_key('DOCUMENT_ROOT'):
return environ['DOCUMENT_ROOT']
else:
realPath = os.path.realpath( './' )
selfPath = environ['SCRIPT_FILENAME']
selfPath = selfPath [ : selfPath.rfind( '/' ) ]
selfPath = selfPath.replace( '/', os.path.sep)
position = realPath.find(selfPath)
# This can check only that this script isn't run from a virtual dir
# But it avoids the problems that arise if it isn't checked
raise realPath
if ( position < 0 or position <> len(realPath) - len(selfPath) or realPath[ : position ]==''):
raise Exception('Sorry, can\'t map "UserFilesPath" to a physical path. You must set the "UserFilesAbsolutePath" value in "editor/filemanager/connectors/py/config.py".')
return realPath[ : position ]
>>>>>>> 51ac7a4e78cf43e26729d50406bb516e7ce0a18d
| [
"928420597@qq.com"
] | 928420597@qq.com |
f36a0117e05b4bddbc624741aec311a233dc86b1 | 06bdb0bcfb92dccf798dcc5fee90ee07e0b8e340 | /Assignment 1/opencl.py | bff4994695864017faae9e7cb2c0648472f8673b | [] | no_license | peter-xxxxx/Hetero-Computing-Homework | ae007019cf39026f4bcc91835cbe35696ed87adc | 76e763021eb5dd7adf7fad7ca82b4c0ceb962cfb | refs/heads/master | 2020-04-02T05:08:58.978875 | 2018-10-21T21:49:53 | 2018-10-21T21:49:53 | 154,054,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,264 | py | #!/usr/bin/env python
import time
import pyopencl as cl
import pyopencl.array
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class openclModule:
def __init__(self, idata):
# idata: an array of lowercase characters.
# Get platform and device
NAME = 'NVIDIA CUDA'
platforms = cl.get_platforms()
devs = None
for platform in platforms:
if platform.name == NAME:
devs = platform.get_devices()
# TODO:
# Set up a command queue:
# host variables
# device memory allocation
# kernel code
self.a_cpu = idata
self.L = len(idata)
# Set up a command queue; we need to enable profiling to time GPU operations:
ctx = cl.Context(devs)
self.queue = cl.CommandQueue(ctx, properties=cl.command_queue_properties.PROFILING_ENABLE)
kernel = """
__kernel void func(__global char* a, __global char* b) {
unsigned int i = get_global_id(0);
b[i] = a[i] - 32;
}
"""
self.a_gpu = cl.array.to_device(self.queue, idata)
self.b_gpu = cl.array.empty(self.queue, idata.shape, idata.dtype)
self.prg = cl.Program(ctx, kernel).build()
def runAdd_parallel(self):
# return: an array containing capitalized characters from idata and running time.
# TODO:
# function call
# memory copy to host
# Return output and measured time
start = time.time()
self.prg.func(self.queue, self.a_cpu.shape, None, self.a_gpu.data, self.b_gpu.data)
end = time.time()
return [self.b_gpu.get(), end-start]
def runAdd_serial(self):
#return: an array containing capitalized characters from idata and running time.
b_cpu = np.empty_like(self.a_cpu)
start = time.time()
for i in range(self.L):
b_cpu[i] = chr(ord(self.a_cpu[i])-32)
end = time.time()
return [b_cpu, end-start]
# generate a char array of 'a' to 'z'
alphabet = np.empty(26, 'S1')
for i in range(26):
alphabet[i] = chr(ord('a')+i)
# concatenate sequence
a_cpu = np.tile(alphabet, 1)
# call a openclModule object
openclmo = openclModule(a_cpu)
# run GPU adding and CPU adding
b_gpu = openclmo.runAdd_parallel()
b_cpu = openclmo.runAdd_serial()
# show result
print 'input=\n', a_cpu
print 'py_output=\n', b_cpu # py_output is the output of your serial function
print 'parallel_output=\n', b_gpu # parallel_output is the output of your parallel function
print 'Code equality:\n', (b_cpu[0]==b_gpu[0])
print '--------------------------------'
# concatenate sequence
a_cpu = np.tile(alphabet, 3)
# call a openclModule object
openclmo = openclModule(a_cpu)
# run GPU adding and CPU adding
b_gpu = openclmo.runAdd_parallel()
b_cpu = openclmo.runAdd_serial()
# show result
print 'input=\n', a_cpu
print 'py_output=\n', b_cpu # py_output is the output of your serial function
print 'parallel_output=\n', b_gpu # parallel_output is the output of your parallel function
print 'Code equality:\n', (b_cpu[0]==b_gpu[0])
print '--------------------------------'
x_axis = []
python_time_list = []
gpu_time_list = []
for repeat_time in range(1,30):
a_cpu = np.tile(alphabet, repeat_time)
openclmo = openclModule(a_cpu)
M = 10
times = []
for i in range(M):
times.append(openclmo.runAdd_serial()[1])
python_time_list.append(np.average(times))
times = []
for i in range(M):
times.append(openclmo.runAdd_parallel()[1])
gpu_time_list.append(np.average(times))
x_axis.append(repeat_time)
print 'string_len=', len(a_cpu), '\tpy_time: ', python_time_list[-1], '\tparallel_time: ', gpu_time_list[-1]
# py_time is the running time of your serial function, parallel_time is the running time of your parallel function.
print '--------------------------------'
print 'python time list:\n', python_time_list
print 'gpu time list:\n', gpu_time_list
plt.plot(x_axis, python_time_list, label='cpu')
plt.plot(x_axis, gpu_time_list, label='gpu')
plt.title("cost time vs L on OpenCL")
plt.xlabel('L')
plt.ylabel('time cost')
plt.legend()
plt.savefig('opencl.png')
| [
"xusuyang@peters-Mac.local"
] | xusuyang@peters-Mac.local |
b7301d1fbf818f6ef127734af94615d410d2a459 | 6c995906999a000ff2de54ad021c8ae7ea27344c | /src/python/driver.py | 00afcd2569183b1d3f02948256b61da939a38646 | [] | no_license | harrygun/ZA_rec | 2427b1e042c383b8513e1c44b43258da92ba3736 | 7d4be8279b631de39820c8bf1b84a4b9c48c6cc4 | refs/heads/master | 2021-01-13T15:59:25.678425 | 2016-12-16T01:21:38 | 2016-12-16T01:21:38 | 76,792,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,334 | py | import os
import numpy as np
import pylab as pl
import genscript.progcontrol as pc
from genscript.extendclass import *
import genscript.mpiutil as mpi
import cosmology.power as power
def Testing_portal(p):
return
param_dict={
'power_spectrum_fname': '/home/xwang/workspace/general-data/power/fiducial_matterpower.dat',
'a_init': 1e-2,
'smooth_R': 0,
'smooth_type': 'Gauss',
'smooth_R_list_type': 'linear'
}
prog_control={
'do_testing': False,
#-------------------------------#
#-------------------------------#
}
if __name__=='__main__':
# ->> initialization <<- #
init_dict=myDict(prog_control)+myDict(param_dict)
p=pc.prog_init(**init_dict)
root='../../workspace/result/'
# ->> smoothing list <<- #
p.z= 0.
# ->> Rmin, Rmax: only integer numbers for convinience of file name <<- #
''' -------------------------------------------------
->> Making some plots <<-
-------------------------------------------------
'''
''' -------------------------------------------------
->> do some testing <<-
-------------------------------------------------
'''
if p.do_testing==True:
Testing_portal(p)
# ->> The End <<- #
p.finalize()
| [
"xwang@cita.utoronto.ca"
] | xwang@cita.utoronto.ca |
18ffbacb67477efa68678d7568f47f2f8a6dbe4c | 25afa63cbb605182238a665695fc49324f48a33d | /source/main/urls.py | 0506ae8a5072d8addfeb640c039d1be28e77f2d8 | [] | no_license | York5/Issue_Tracker | 84a45346161e3e7e2668017e24600efa727a5b32 | 5b3a842fc44fb13316e62bcbc96930961772b716 | refs/heads/master | 2023-05-01T01:45:00.844541 | 2019-10-31T12:24:29 | 2019-10-31T12:24:29 | 211,317,633 | 0 | 0 | null | 2023-04-21T20:38:14 | 2019-09-27T12:45:31 | HTML | UTF-8 | Python | false | false | 886 | py | """main URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('webapp.urls', namespace='webapp')),
path('accounts/', include('accounts.urls', namespace='accounts'))
]
| [
"mikhailvishin2@yandex.com"
] | mikhailvishin2@yandex.com |
24bc41eb2f4f89ca812fcf85604187464eda7aa2 | ac661c9fa744984a3e7bd399bd887b466643023a | /audio_chatbot.py | 258aaaadb3388766401895af005b53b68ef3e893 | [] | no_license | ratulbasak/audio_chatbot | 00e8ad5980dcb684c84d0eb53c3fdebc740ab23a | e77a94ede8b93f66fd8fd92cf9958949c38a0a7d | refs/heads/master | 2021-01-13T03:35:02.275159 | 2016-08-08T21:09:17 | 2016-08-08T21:09:17 | 77,297,527 | 0 | 0 | null | 2016-12-24T19:29:44 | 2016-12-24T19:29:43 | null | UTF-8 | Python | false | false | 2,694 | py | from tkinter import *
import os, html
import pyaudio
import speech_recognition as sr
import tkinter.scrolledtext
class App():
def __init__(self):
self.doss = os.getcwd()
self.com_sign = "Bot >> "
self.user_sign = "You >> "
self.r = sr.Recognizer()
self.window = Tk()
self.window.title("ChatBot / arsho")
screen_width = self.window.winfo_screenwidth()
screen_height = self.window.winfo_screenheight()
window_height = 600
window_width = 600
x_pos = (screen_width - window_width) // 2
y_pos = (screen_height - window_height) // 2
geometry_str = str(window_width)+"x"+\
str(window_height)+"+"+str(x_pos)+"+"+\
str(y_pos)
self.window.geometry(geometry_str)
self.start_btn_str = "Talk with me"
self.start_btn = Button(self.window,text=self.start_btn_str,\
command=self.start_btn_click)
self.start_btn.pack(padx=10, pady=10,fill=BOTH)
self.chat = tkinter.scrolledtext.ScrolledText(self.window)
self.chat.pack(padx=10, pady=10, fill=BOTH, expand=True)
self.chat.insert(INSERT,"Welcome to chat. ")
self.window.mainloop()
def start_btn_click(self):
print("Btn clicked.")
self.chat.insert(INSERT,"\n")
self.chat.insert(INSERT,self.com_sign+"Say something\n")
with sr.Microphone() as source:
audio = self.r.adjust_for_ambient_noise(source)
audio = self.r.listen(source)
try:
s = (self.r.recognize_google(audio))
message = (s.lower())
self.chat.insert(INSERT,self.user_sign+message+"\n")
if message == "exit":
self.chat.insert(INSERT,self.com_sign+"Bye bye\n")
elif message == "how are you":
self.chat.insert(INSERT,self.com_sign+"I am fine. What about you?\n")
elif message == "you are good":
self.chat.insert(INSERT,self.com_sign+"Thanks for your compliment.\n")
elif message == "goodbye":
self.chat.insert(INSERT,self.com_sign+"See you soon.\n")
else:
self.chat.insert(INSERT,self.com_sign+"I am listening, ain't I?\n")
except sr.UnknownValueError:
self.chat.insert(INSERT,self.com_sign+"could not understand audio\n")
except sr.RequestError as e:
self.chat.insert(INSERT,self.com_sign+"Could not request results$; {0}".format(e))
app = App()
| [
"shovon.sylhet@gmail.com"
] | shovon.sylhet@gmail.com |
ad2ed771f1e096556714341485d9077e6de44995 | 65b4bb5a4ba3284453654ef96cf5e5073228083d | /floydhub_beta_clean.py | 66e8c523073eeca19bb37ae0265b2a4a692cc3f3 | [] | no_license | amlawson98/Colorblind_Neural_Net | 8ffc720af67271de78fed7e81f1d6315126e4db5 | 98223f06c893f9e314f11bca09f345a49307494c | refs/heads/master | 2020-09-08T12:24:32.181109 | 2019-11-12T04:59:23 | 2019-11-12T04:59:23 | 221,132,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,723 | py | # Get images
X = []
for filename in os.listdir('../Train/'):
X.append(img_to_array(load_img('../Train/'+filename)))
X = np.array(X, dtype=float)
# Set up training and test data
split = int(0.95*len(X))
Xtrain = X[:split]
Xtrain = 1.0/255*Xtrain
#Design the neural network
model = Sequential()
model.add(InputLayer(input_shape=(256, 256, 1)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same', strides=2))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same', strides=2))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same', strides=2))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(2, (3, 3), activation='tanh', padding='same'))
model.add(UpSampling2D((2, 2)))
# Finish model
model.compile(optimizer='rmsprop', loss='mse')
# Image transformer
datagen = ImageDataGenerator(
shear_range=0.2,
zoom_range=0.2,
rotation_range=20,
horizontal_flip=True)
# Generate training data
batch_size = 50
def image_a_b_gen(batch_size):
for batch in datagen.flow(Xtrain, batch_size=batch_size):
lab_batch = rgb2lab(batch)
X_batch = lab_batch[:,:,:,0]
Y_batch = lab_batch[:,:,:,1:] / 128
yield (X_batch.reshape(X_batch.shape+(1,)), Y_batch)
# Train model
TensorBoard(log_dir='/output')
model.fit_generator(image_a_b_gen(batch_size), steps_per_epoch=10000, epochs=1)
# Test images
Xtest = rgb2lab(1.0/255*X[split:])[:,:,:,0]
Xtest = Xtest.reshape(Xtest.shape+(1,))
Ytest = rgb2lab(1.0/255*X[split:])[:,:,:,1:]
Ytest = Ytest / 128
print model.evaluate(Xtest, Ytest, batch_size=batch_size)
# Load black and white images
color_me = []
for filename in os.listdir('../Test/'):
color_me.append(img_to_array(load_img('../Test/'+filename)))
color_me = np.array(color_me, dtype=float)
color_me = rgb2lab(1.0/255*color_me)[:,:,:,0]
color_me = color_me.reshape(color_me.shape+(1,))
# Test model
output = model.predict(color_me)
output = output * 128
# Output colorizations
for i in range(len(output)):
cur = np.zeros((256, 256, 3))
cur[:,:,0] = color_me[i][:,:,0]
cur[:,:,1:] = output[i]
imsave("result/img_"+str(i)+".png", lab2rgb(cur)) | [
"32847387+amlawson98@users.noreply.github.com"
] | 32847387+amlawson98@users.noreply.github.com |
63d1c5aed5f4bd5277d394d2d3c8809e34ccc391 | 468a7f2728fc4024825f30181cc0324e3edefa16 | /venv/bin/chardetect | 52240009f99ce43850d3aee924e058ed4c8a0194 | [] | no_license | CarusCha/pythonProject | 60492a676cd1cc8629f359fc0528b2a105fd1019 | 422913bef98c9ccc95c62287d36564d7a94795ea | refs/heads/main | 2023-07-27T00:40:20.854879 | 2021-09-06T14:54:54 | 2021-09-06T14:54:54 | 403,646,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | #!/Users/chajonghun/PycharmProjects/pythonProject/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"chajonghun.kor@gmail.com"
] | chajonghun.kor@gmail.com | |
ea3d4ec1463d1e8dec3cc664a53e3f17777103e6 | b1ce53aabfcc1852e013d386272d7d4260226995 | /venv/Test8.py | ef3360f3f23e2fa0f3079b38fb454d585ee32656 | [] | no_license | naren6195/pythonProject | 91e57431a20c8e6c8eab0c7ffc6a264e08051a36 | 10950f0205c30315d171dc1468dd176c7d5efb34 | refs/heads/master | 2023-04-25T18:16:47.438760 | 2021-06-01T18:59:39 | 2021-06-01T18:59:39 | 372,927,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | import os
os.chdir("C:\\Users\\Narendra\\Desktop\\NewPythonTest")
print(os.getcwd())
f1=open('4567.txt','r+')
#f2=f1.read()
f3=f1.readlines()
f4=f1.readline()
#print(f3)
#print(f4)
for l in f3:
print(l)
| [
"narendrasai6195@live.com"
] | narendrasai6195@live.com |
934f3a82f33908e41b028a031cfa39c63aa7a5e6 | 1f3ef231c7b907fd2da2636909322b6ce115e8ae | /userbot/plugins/support.py | d6abd1ae4be7effd0551c6f0fe1d433008e90f81 | [
"MIT"
] | permissive | indianSammy07/Wolfuser | 61774799e4d981eaded19d553b2baacbac97f8fc | 9c56dde1f81cec9eb4dd85a369f3a1f8b99f0763 | refs/heads/master | 2022-11-16T18:54:10.286881 | 2020-07-22T03:14:01 | 2020-07-22T03:14:01 | 274,834,329 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | """Emoji
Available Commands:
.support
Credits to noone
"""
from telethon import events
import asyncio
from userbot.utils import admin_cmd
@borg.on(admin_cmd("wolf"))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.1
animation_ttl = range(0,36)
#input_str = event.pattern_match.group(1)
# if input_str == "support":
await event.edit("for our support group")
animation_chars = [
"Click here",
"[Wolf Support](https://t.me/WOLFUSERBOT_SUPPORT)"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 18])
| [
"noreply@github.com"
] | indianSammy07.noreply@github.com |
c2c8d845e7acdd265fa76cea7af6223a766cddc9 | 47a3d4959fb84085c30cf1eac45a2de1315fdabe | /rango/admin.py | a46eca8b2c51798eee3fadf4453b7395a39cb6ed | [] | no_license | vikasreddy92/tangowithdjango | 80cc430f6e396bedcc1c4cf467efe02dbc34d683 | ef444eb554fde172ae82812e200ac0a2da1f516e | refs/heads/master | 2020-05-29T09:13:46.698535 | 2016-10-05T04:12:33 | 2016-10-05T04:12:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | from django.contrib import admin
from rango.models import Category, Pages, UserProfile
# Register your models here.
admin.site.register(Category)
class PageAdmin(admin.ModelAdmin):
list_display = ('title', 'category' , 'url')
admin.site.register(Pages, PageAdmin)
admin.site.register(UserProfile) | [
"santhoshboggarapu@gmail.com"
] | santhoshboggarapu@gmail.com |
a6c7eec4770c837eabbcd5463e2e7ec107094b27 | 4fc1a545a317c45b827b79d61af30ecd60bcf5f9 | /engine/game_connector.py | 895578e887ffded5e32c1d6fbf410cbb7612d58b | [] | no_license | surenm/hex | 7fad0b8bf22be888d555c7885f541cb707fea1ee | dfdc688908743782b24872c610cb5efa4e39696f | refs/heads/master | 2020-11-29T15:24:04.589840 | 2014-09-17T15:35:10 | 2014-09-17T15:35:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,573 | py | import game_api
import time
from optparse import OptionParser
import example_player
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("", "--a_input", dest="a_in",
help="input file for player A", metavar="FILE")
parser.add_option("", "--b_input", dest="a_out",
help="input file for player B", metavar="FILE")
parser.add_option("", "--a_output", dest="b_in",
help="output file for player A", metavar="FILE")
parser.add_option("", "--b_output", dest="b_out",
help="output file for player B", metavar="FILE")
parser.add_option("-t", "--test", dest="test",
help="test the game engine", action="store_true")
(options, args) = parser.parse_args()
if options["test"]:
connA = game_api.MockConnection()
connB = game_api.MockConnection()
else:
connA = game_api.PlayerConnection(options['a_input'], options['a_output'])
connB = game_api.PlayerConnection(options['b_input'], options['b_output'])
api = game_api.GameApi(connA, connB)
# start a new game
starting_state = api.new_game()
print api.game.board
# wait 3 seconds before first move
time.sleep(3)
while not api.game.game_over:
api.next_move()
print api.game.board
# 0.3 seconds per move
time.sleep(0.3)
# update database with winner/loser/gamestate/etc
print "The game is over and the winner is player {}".format(api.game.winner.id)
| [
"max@recoset.com"
] | max@recoset.com |
307a73a433443dbda7b4f25e04203673d8e9775f | 6b532a243c693739151664e9e9772d9391ab39df | /code/intersection.py | f3832e9f9df1c8ed6cc2a7f496b92419a8949dfe | [] | no_license | slugwarz05/checkpoint | f3c592d91c362f15ede662226ee9174e04ff3555 | ab4d8e57589f4058db010d8e5c56a7a0875887c0 | refs/heads/master | 2021-01-15T23:27:40.999510 | 2016-02-12T17:36:26 | 2016-02-12T17:36:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | class Intersection:
"""
Implements an intersection, used for parametrically limiting pedestrian flow.
Note: the intersection is not yet used in this initial model, but its definition
is provided here.
"""
"""
Creates a new intersection.
Args:
int_id: Integer. Unique numerical identifier for the intersection.
nodes: List. List of Node objects that are part of the intersection.
Returns:
A new Intersection object.
"""
def __init__(self, int_id, nodes=[]):
# Unique identifier.
self.int_id = int_id
# List of nodes that are in the intersection
self.nodes = nodes
# State of whether the intersection is available to pedestrians (default - closed to peds).
self.is_open = False
# Close all the nodes in the intersection.
def close_me():
# Need to handle case when pedestrian is in the middle of an intersection during closing.
for node in self.nodes:
node.available = False
# Re-open the intersection by setting all included nodes to available.
def open_me():
for node in self.nodes:
node.available = True | [
"matthewbentonmay@gmail.com"
] | matthewbentonmay@gmail.com |
544c68bbb9bcd0defc1e22d8be96424e01d8528a | c73baa3234fdd58f4209fb6960d9afa6a6efc492 | /9.updating/qq_bot.py | 292021d139d8da0bfac89da30ca58e1531878631 | [
"MIT"
] | permissive | hireach/examples-of-web-crawlers | 173f2ef2223bc45d5585c0cee6aeb3b6e9186e96 | 1cd077367981834da34370644a238fbd5194512a | refs/heads/master | 2020-05-25T03:26:00.522076 | 2019-05-19T20:12:00 | 2019-05-19T20:12:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,625 | py | # -*- coding:utf-8 -*-
# 引用自定义库
from decrypt import hash33_token
from decrypt import hash33_bkn
from decrypt import get_sck
from url_request import get_html
from url_request import post_html
from decrypt import get_csrf_token
# 引用第三方库
import re
import time
from requests import get
from requests import post
from requests.packages import urllib3
from requests.utils import dict_from_cookiejar
from json import loads
import PIL.Image
import PIL.ImageTk
from io import BytesIO
from tkinter_gui import *
class Bot(object):
"""
QQ机器人对象,用于获取指定QQ号的群信息及群成员信息,
同时,该接口可获取指定QQ的所有好友分组,但是获取的好友数据仅包含备注名和QQ号
"""
def __init__(self):
self.is_login = False
self.cookies_merge_dict_in_id_qq_com = {}
self.cookies_merge_dict_in_qun_qq_com = {}
self.cookies_merge_dict_in_qzone_qq_com = {}
self.qq_number = ''
self.login_id_qq_com()
self.login_qun_qq_com()
self.login_qzone_qq_com()
# 登录成功后,将QQ头像显示到图片框
picture = self.get_profile_picture(self.qq_number,140)
BytesIOObj = BytesIO()
BytesIOObj.write(picture)
qr_code = PIL.Image.open(BytesIOObj)
image = PIL.ImageTk.PhotoImage(qr_code)
image_label['image'] = image
def login_qun_qq_com(self):
# 登录qun.qq.com
# 访问网页,为了获取参数pt_login_sig
login_url = 'http://ui.ptlogin2.qq.com/cgi-bin/login?appid=549000912&s_url=http://qun.qq.com/member.html'
html = get_html(login_url, '')
# 对返回的cookies进行转化为dict类型,方便处理
cookies_back_dict = dict_from_cookiejar(html.cookies)
pt_login_sig = cookies_back_dict['pt_login_sig']
self.cookies_merge_dict_in_qun_qq_com.update(cookies_back_dict)
# 访问网页,为了获取参数ptqrtoken
qrcode_url = 'https://ptlogin2.qq.com/ptqrshow?appid=549000912&e=2&l=M&s=4&d=72&v=4&t=0.39550762134604156'
html = get_html(qrcode_url, '')
# 对返回的cookies进行转化为dict类型,方便处理
cookies_back_dict = dict_from_cookiejar(html.cookies)
qrsig = cookies_back_dict['qrsig']
ptqrtoken = hash33_token(qrsig)
self.cookies_merge_dict_in_qun_qq_com.update(cookies_back_dict)
# 将二维码显示到图片框
BytesIOObj = BytesIO()
BytesIOObj.write(html.content)
qr_code = PIL.Image.open(BytesIOObj)
image = PIL.ImageTk.PhotoImage(qr_code)
image_label['image'] = image
# 实时检测二维码状态
while (True):
# 目标网址
target_url = 'http://ptlogin2.qq.com/ptqrlogin?u1=http://qun.qq.com/member.html&' + 'ptqrtoken=' + str(
ptqrtoken) + '&ptredirect=1&h=1&t=1&g=1&from_ui=1&ptlang=2052&action=0-0-1499652067577&js_ver=10224&js_type=1&login_sig=' + str(
pt_login_sig) + '&pt_uistyle=40&aid=549000912&'
# 登录,需要带上访问cookies
html = get_html(target_url, self.cookies_merge_dict_in_qun_qq_com)
# 返回的响应码为200说明二维码没过期
if (html.status_code):
if ('二维码未失效' in html.text):
custom_print(u'(2/3)登录qun.qq.com中,当前二维码未失效,请你扫描二维码进行登录')
elif ('二维码认证' in html.text):
custom_print(u'(2/3)登录qun.qq.com中,扫描成功,正在认证中')
elif ('登录成功' in html.text):
self.is_login = True
custom_print(u'(2/3)登录qun.qq.com中,登录成功')
break
if ('二维码已经失效' in html.text):
custom_print(u'(2/3)登录qun.qq.com中,当前二维码已失效,请重启本软件')
exit()
# 延时
time.sleep(2)
# 登录成功后,把返回的cookies合并进去
cookies_back_dict = dict_from_cookiejar(html.cookies)
self.cookies_merge_dict_in_qun_qq_com.update(cookies_back_dict)
# print(u'当前cookies:{}'.format(cookies_merge_dict))
# 获取此次登录的qq号码
qq_list = re.findall(r'&uin=(.+?)&service', html.text)
self.qq_number = qq_list[0]
# 登录成功后,会返回一个地址,需要对该地址进行访问以便获取新的返回cookies
startIndex = (html.text).find('http')
endIndex = (html.text).find('pt_3rd_aid=0')
url = (html.text)[startIndex:endIndex] + 'pt_3rd_aid=0'
# 这里需要注意的是,需要禁止重定向,才能正确获得返回的cookies
html = get(url, cookies=self.cookies_merge_dict_in_qun_qq_com, allow_redirects=False)
# 把返回的cookies合并进去
cookies_back_dict = dict_from_cookiejar(html.cookies)
self.cookies_merge_dict_in_qun_qq_com.update(cookies_back_dict)
def login_qzone_qq_com(self):
# 登录qzone.qq.com
# 访问网页,为了获取参数pt_login_sig
login_url = 'https://xui.ptlogin2.qq.com/cgi-bin/xlogin?proxy_url=https://qzs.qq.com/qzone/v6/portal/proxy.html&daid=5&&hide_title_bar=1&low_login=0&qlogin_auto_login=1&no_verifyimg=1&link_target=blank&appid=549000912&style=22&target=self&s_url=https://qzs.qq.com/qzone/v5/loginsucc.html?para=izone&pt_qr_app=手机QQ空间&pt_qr_link=https://z.qzone.com/download.html&self_regurl=https://qzs.qq.com/qzone/v6/reg/index.html&pt_qr_help_link=https://z.qzone.com/download.html&pt_no_auth=0'
html = get_html(login_url, '')
# 对返回的cookies进行转化为dict类型,方便处理
cookies_back_dict = dict_from_cookiejar(html.cookies)
pt_login_sig = cookies_back_dict['pt_login_sig']
self.cookies_merge_dict_in_qzone_qq_com.update(cookies_back_dict)
# 访问网页,为了获取参数ptqrtoken
qrcode_url = 'https://ssl.ptlogin2.qq.com/ptqrshow?appid=549000912&e=2&l=M&s=4&d=72&v=4&t=0.0010498811219192827&daid=5&pt_3rd_aid=0'
html = get_html(qrcode_url, '')
# 对返回的cookies进行转化为dict类型,方便处理
cookies_back_dict = dict_from_cookiejar(html.cookies)
qrsig = cookies_back_dict['qrsig']
ptqrtoken = hash33_token(qrsig)
self.cookies_merge_dict_in_qzone_qq_com.update(cookies_back_dict)
# 将二维码显示到图片框
BytesIOObj = BytesIO()
BytesIOObj.write(html.content)
qr_code = PIL.Image.open(BytesIOObj)
image = PIL.ImageTk.PhotoImage(qr_code)
image_label['image'] = image
# 实时检测二维码状态
while (True):
# 目标网址
target_url = 'https://ssl.ptlogin2.qq.com/ptqrlogin?u1=https://qzs.qq.com/qzone/v5/loginsucc.html?para=izone&ptqrtoken=' + str(ptqrtoken) + '&ptredirect=0&h=1&t=1&g=1&from_ui=1&ptlang=2052&action=0-0-1558286321351&js_ver=19042519&js_type=1&login_sig=' + str(pt_login_sig) + '&pt_uistyle=40&aid=549000912&daid=5&'
# 登录,需要带上访问cookies
html = get_html(target_url, self.cookies_merge_dict_in_qzone_qq_com)
# 返回的响应码为200说明二维码没过期
if (html.status_code):
if ('二维码未失效' in html.text):
custom_print(u'(3/3)登录qzone.qq.com中,当前二维码未失效,请你扫描二维码进行登录')
elif ('二维码认证' in html.text):
custom_print(u'(3/3)登录qzone.qq.com中,扫描成功,正在认证中')
elif ('登录成功' in html.text):
self.is_login = True
custom_print(u'(3/3)登录qzone.qq.com中,登录成功')
break
if ('二维码已经失效' in html.text):
custom_print(u'(3/3)登录qzone.qq.com中,当前二维码已失效,请重启本软件')
exit()
# 延时
time.sleep(2)
# 登录成功后,把返回的cookies合并进去
cookies_back_dict = dict_from_cookiejar(html.cookies)
self.cookies_merge_dict_in_qzone_qq_com.update(cookies_back_dict)
# 获取此次登录的qq号码
qq_list = re.findall(r'&uin=(.+?)&service', html.text)
self.qq_number = qq_list[0]
# 登录成功后,会返回一个地址,需要对该地址进行访问以便获取新的返回cookies
startIndex = (html.text).find('http')
endIndex = (html.text).find('pt_3rd_aid=0')
url = (html.text)[startIndex:endIndex] + 'pt_3rd_aid=0'
# 屏蔽https证书警告
urllib3.disable_warnings()
# 这里需要注意的是,需要禁止重定向,才能正确获得返回的cookies
html = get(url, cookies=self.cookies_merge_dict_in_qzone_qq_com, allow_redirects=False, verify=False)
# 把返回的cookies合并进去
cookies_back_dict = dict_from_cookiejar(html.cookies)
self.cookies_merge_dict_in_qzone_qq_com.update(cookies_back_dict)
def login_id_qq_com(self):
# 登录id.qq.com
# 访问网页,为了获取参数pt_login_sig
login_url = 'https://xui.ptlogin2.qq.com/cgi-bin/xlogin?pt_disable_pwd=1&appid=1006102&daid=1&style=23&hide_border=1&proxy_url=https://id.qq.com/login/proxy.html&s_url=https://id.qq.com/index.html'
html = get_html(login_url, '')
# 对返回的cookies进行转化为dict类型,方便处理
cookies_back_dict = dict_from_cookiejar(html.cookies)
pt_login_sig = cookies_back_dict['pt_login_sig']
self.cookies_merge_dict_in_id_qq_com.update(cookies_back_dict)
# 访问网页,为了获取参数ptqrtoken
qrcode_url = 'https://ssl.ptlogin2.qq.com/ptqrshow?appid=1006102&e=2&l=M&s=4&d=72&v=4&t=0.10239549811477189&daid=1&pt_3rd_aid=0'
html = get_html(qrcode_url, '')
# 对返回的cookies进行转化为dict类型,方便处理
cookies_back_dict = dict_from_cookiejar(html.cookies)
qrsig = cookies_back_dict['qrsig']
ptqrtoken = hash33_token(qrsig)
self.cookies_merge_dict_in_id_qq_com.update(cookies_back_dict)
# 将二维码显示到图片框
BytesIOObj = BytesIO()
BytesIOObj.write(html.content)
qr_code = PIL.Image.open(BytesIOObj)
image = PIL.ImageTk.PhotoImage(qr_code)
image_label['image'] = image
# 实时检测二维码状态
while (True):
# 目标网址
target_url = 'https://ssl.ptlogin2.qq.com/ptqrlogin?u1=https://id.qq.com/index.html&ptqrtoken=' + str(ptqrtoken) + '&ptredirect=1&h=1&t=1&g=1&from_ui=1&ptlang=2052&action=0-0-1556812236254&js_ver=19042519&js_type=1&login_sig=' + str(pt_login_sig) + '&pt_uistyle=40&aid=1006102&daid=1&'
# 登录,需要带上访问cookies
html = get_html(target_url, self.cookies_merge_dict_in_id_qq_com)
# 返回的响应码为200说明二维码没过期
if (html.status_code):
if ('二维码未失效' in html.text):
custom_print(u'(1/3)登录id.qq.com中,当前二维码未失效,请你扫描二维码进行登录')
elif ('二维码认证' in html.text):
custom_print(u'(1/3)登录id.qq.com中,扫描成功,正在认证中')
elif ('登录成功' in html.text):
self.is_login = True
custom_print(u'(1/3)登录id.qq.com中,登录成功')
break
if ('二维码已经失效' in html.text):
custom_print(u'(1/3)登录id.qq.com中,当前二维码已失效,请重启本软件')
exit()
# 延时
time.sleep(2)
# 登录成功后,把返回的cookies合并进去
self.cookies_merge_dict_in_id_qq_com = dict_from_cookiejar(html.cookies)
self.cookies_merge_dict_in_id_qq_com.update(cookies_back_dict)
# print(u'当前cookies:{}'.format(cookies_merge_dict))
# 获取此次登录的qq号码
qq_list = re.findall(r'&uin=(.+?)&service', html.text)
self.qq_number = qq_list[0]
# 登录成功后,会返回一个地址,需要对该地址进行访问以便获取新的返回cookies
startIndex = (html.text).find('http')
endIndex = (html.text).find('pt_3rd_aid=0')
url = (html.text)[startIndex:endIndex] + 'pt_3rd_aid=0'
# 屏蔽https证书警告
urllib3.disable_warnings()
# 这里需要注意的是,需要禁止重定向,才能正确获得返回的cookies
html = get(url, cookies=self.cookies_merge_dict_in_id_qq_com, allow_redirects=False, verify=False)
# 把返回的cookies合并进去
cookies_back_dict = dict_from_cookiejar(html.cookies)
self.cookies_merge_dict_in_id_qq_com.update(cookies_back_dict)
def get_group(self):
# 获取所有群基本信息
# bkn由参数skey通过另一个加密函数得到
bkn = hash33_bkn(self.cookies_merge_dict_in_qun_qq_com['skey'])
submit_data = {'bkn': bkn}
html = post_html('https://qun.qq.com/cgi-bin/qun_mgr/get_group_list', self.cookies_merge_dict_in_qun_qq_com, submit_data)
group_info = loads(html.text)
print(group_info)
return group_info['join']
def get_members_in_group(self,group_number):
# 获取某个群的群成员
# bkn由参数skey通过另一个加密函数得到
bkn = hash33_bkn(self.cookies_merge_dict_in_qun_qq_com['skey'])
url = 'http://qinfo.clt.qq.com/cgi-bin/qun_info/get_members_info_v1?friends=1&name=1&gc=' + str(group_number) + '&bkn=' + str(bkn) + '&src=qinfo_v3'
html = get_html(url, self.cookies_merge_dict_in_qun_qq_com)
group_member = loads(html.text)
return group_member
def get_all_friends_in_qq(self):
# 获取所有qq好友基本信息
# bkn由参数skey通过另一个加密函数得到
bkn = hash33_bkn(self.cookies_merge_dict_in_qun_qq_com['skey'])
submit_data = {'bkn': bkn}
html = post_html('https://qun.qq.com/cgi-bin/qun_mgr/get_friend_list', self.cookies_merge_dict_in_qun_qq_com, submit_data)
friend_info = loads(html.text)
# print(friend_info)
return friend_info['result']
def get_info_in_qq_friend(self,qq_number):
# 获取某个qq好友的详细信息
# 需要提交的数据
# bkn由参数skey通过另一个加密函数得到
bkn = hash33_bkn(self.cookies_merge_dict_in_qun_qq_com['skey'])
submit_data = {'keyword':str(qq_number), 'ldw': str(bkn), 'num':'20', 'page':'0', 'sessionid':'0', 'agerg':'0', 'sex':'0', 'firston':'0', 'video':'0', 'country':'1', 'province':'65535', 'city':'0', 'district':'0', 'hcountry':'1', 'hprovince':'0', 'hcity':'0', 'hdistrict':'0', 'online':'0'}
# 需要提交的cookies
# cookies = {'uin':self.cookies_merge_dict_in_qun_qq_com['uin'], 'skey':self.cookies_merge_dict_in_qun_qq_com['skey'], 'ptisp':self.cookies_merge_dict_in_qun_qq_com['ptisp'], 'RK':self.cookies_merge_dict_in_qun_qq_com['RK'], 'ptcz':self.cookies_merge_dict_in_qun_qq_com['ptcz']}
# 设置请求头,模拟人工
header = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Origin': 'http://find.qq.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer':'http://find.qq.com/',
}
# 屏蔽https证书警告
urllib3.disable_warnings()
# 网页访问,post方式
html = post('http://cgi.find.qq.com/qqfind/buddy/search_v3', data=submit_data, cookies=self.cookies_merge_dict_in_qun_qq_com, headers=header, verify=False)
# 将好友信息解析为python对象
friend_info = loads(html.text)
# print(friend_info)
return friend_info['result']['buddy']['info_list'][0]
def get_profile_picture(self, qq_number, size=100):
# 获取指定qq的头像,size的值可为40、100、140,默认为100
# 屏蔽https证书警告
urllib3.disable_warnings()
# 设置请求头,模拟人工
header = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer':'http://find.qq.com/'
}
# 网页访问,get方式
html = get('http://q1.qlogo.cn/g?b=qq&nk=' + str(qq_number) + '&s=' + str(size), headers=header, verify=False)
return html.content
def get_quit_of_group(self):
# 获取最近30天内退出的群
# 需要提交的数据
# bkn由参数skey通过另一个加密函数得到
bkn = hash33_bkn(self.cookies_merge_dict_in_qun_qq_com['skey'])
submit_data = {'bkn': str(bkn)}
# 设置请求头,模拟人工
header = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
'Content-Type': 'text/plain',
'origin': 'https://huifu.qq.com',
'referer' : 'https://huifu.qq.com/recovery/index.html?frag=0'
}
# 屏蔽https证书警告
urllib3.disable_warnings()
# 网页访问,post方式
html = post('https://huifu.qq.com/cgi-bin/gr_grouplist', data=submit_data, cookies=self.cookies_merge_dict_in_qun_qq_com, headers=header, verify=False)
# 将返回数据解析为python对象
result = loads(html.text)
return result
def get_delete_friend_in_360day(self):
# 获取最近一年删除的好友名单
# 需要提交的数据
# bkn由参数skey通过另一个加密函数得到
bkn = hash33_bkn(self.cookies_merge_dict_in_qun_qq_com['skey'])
qq_number = str(self.qq_number)
skey = str(self.cookies_merge_dict_in_qun_qq_com['skey'])
url = 'https://proxy.vip.qq.com/cgi-bin/srfentry.fcgi?bkn=' + str(bkn) + '&ts=&g_tk=' + str(bkn) + '&data={"11053":{"iAppId":1,"iKeyType":1,"sClientIp":"","sSessionKey":"' + skey + '","sUin":"' + qq_number + '"}}'
# 设置请求头,模拟人工
header = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'https://huifu.qq.com/recovery/index.html?frag=1',
'Origin': 'https://huifu.qq.com',
'Connection': 'close'
}
# 屏蔽https证书警告
urllib3.disable_warnings()
# 网页访问,post方式
html = get(url, cookies=self.cookies_merge_dict_in_qun_qq_com, headers=header, verify=False)
# 将返回数据解析为python对象
result = loads(html.text)
# print(result)
# 364天内没有删除的好友
delFriendList = result['11053']['data']['delFriendList']
if(len(delFriendList) == 0):
return []
# 364天内有删除的好友
qq_number_list = delFriendList['364']['vecUin']
# 返回364天内的被删除的好友名单
return qq_number_list
def is_vip_svip(self):
# 判断此次登录的qq是否为vip或者svip
# 需要提交的数据
# bkn由参数skey通过另一个加密函数得到
bkn = hash33_bkn(self.cookies_merge_dict_in_qun_qq_com['skey'])
qq_number = str(self.qq_number)
skey = str(self.cookies_merge_dict_in_qun_qq_com['skey'])
url = 'https://proxy.vip.qq.com/cgi-bin/srfentry.fcgi?bkn=' + str(bkn) + '&ts=&g_tk=' + str(bkn) + '&data={"11053":{"iAppId":1,"iKeyType":1,"sClientIp":"","sSessionKey":"' + skey + '","sUin":"' + qq_number + '"}}'
# 设置请求头,模拟人工
header = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'https://huifu.qq.com/recovery/index.html?frag=1',
'Origin': 'https://huifu.qq.com',
'Connection': 'close'
}
# 屏蔽https证书警告
urllib3.disable_warnings()
# 网页访问,post方式
html = get(url, cookies=self.cookies_merge_dict_in_qun_qq_com, headers=header, verify=False)
# 将返回数据解析为python对象
result = loads(html.text)
isSvip = result['11053']['data']['isSvip']
isVip = result['11053']['data']['isVip']
return {'isSvip':isSvip, 'isVip':isVip}
def get_qb(self):
# 获取该账户的qb值
# 需要提交的数据
qq_number = str(self.qq_number)
skey = str(self.cookies_merge_dict_in_qun_qq_com['skey'])
url = 'https://api.unipay.qq.com/v1/r/1450000186/wechat_query?cmd=4&pf=vip_m-pay_html5-html5&pfkey=pfkey&from_h5=1&from_https=1&openid=' + qq_number + '&openkey=' + skey + '&session_id=uin&session_type=skey'
# 设置请求头,模拟人工
header = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'https://my.pay.qq.com/account/index.shtml',
'Origin': 'https://my.pay.qq.com',
'Connection': 'close'
}
# 屏蔽https证书警告
urllib3.disable_warnings()
# 网页访问,get方式
html = get(url, cookies=self.cookies_merge_dict_in_qun_qq_com, headers=header, verify=False)
# 将返回数据解析为python对象
result = loads(html.text)
qb_value = float(result['qb_balance']) / 10
return qb_value
def get_pay_for_another(self):
# 获取帮别人的代付
# 需要提交的数据
skey = str(self.cookies_merge_dict_in_qun_qq_com['skey'])
url = 'https://pay.qq.com/cgi-bin/personal/account_msg.cgi?p=0.6796416908412624&cmd=1&sck=' + get_sck(skey) + '&type=100&showitem=2&per=100&pageno=1&r=0.3177912609760205'
# 设置请求头,模拟人工
header = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'https://pay.qq.com/infocenter/infocenter.shtml?asktype=100',
'Connection': 'keep-alive'
}
# 屏蔽https证书警告
urllib3.disable_warnings()
# 网页访问,get方式
html = get(url, cookies=self.cookies_merge_dict_in_qun_qq_com, headers=header, verify=False)
# 将返回数据解析为python对象
result = loads(html.text)
# print(result)
return result['resultinfo']['list']
def get_detail_information(self):
# 获取该账户的详细资料
# 存储返回数据
result = {}
# 获取基本信息
# bkn由参数skey通过另一个加密函数得到
bkn = hash33_bkn(self.cookies_merge_dict_in_id_qq_com['skey'])
url = 'https://id.qq.com/cgi-bin/summary?ldw=' + str(bkn)
# 设置请求头,模拟人工
header = {
'Accept': '*/*',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
'Accept-Encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Referer': 'https://id.qq.com/home/home.html?ver=10049&',
'Connection': 'keep-alive'
}
# 屏蔽https证书警告
urllib3.disable_warnings()
# 网页访问,get方式
html = get(url, cookies=self.cookies_merge_dict_in_id_qq_com, headers=header, verify=False)
# 指定返回数据编码格式
html.encoding = 'utf-8'
# 将返回数据解析为python对象,并存入result
result.update(loads(html.text))
# 获取在线天数
skey = str(self.cookies_merge_dict_in_id_qq_com['skey'])
g_tk = str(get_csrf_token(skey))
url = 'https://cgi.vip.qq.com/querygrow/get?r=0.8102122812749504&g_tk=' + g_tk
# 设置请求头,模拟人工
header = {
'Accept': '*/*',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
'Accept-Encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Referer': 'https://id.qq.com/level/mylevel.html?ver=10043&',
'Connection': 'keep-alive'
}
# 屏蔽https证书警告
urllib3.disable_warnings()
# 网页访问,get方式
html = get(url, cookies=self.cookies_merge_dict_in_id_qq_com, headers=header, verify=False)
# 指定返回数据编码格式
html.encoding = 'utf-8'
# 将返回数据解析为python对象,并存入result
result.update(loads(html.text))
# 获取更加详细的资料
while(True):
url = 'https://id.qq.com/cgi-bin/userinfo?ldw=' + str(bkn)
# 设置请求头,模拟人工
header = {
'Accept': '*/*',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
'Accept-Encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Referer': 'https://id.qq.com/myself/myself.html?ver=10045&',
'Connection': 'keep-alive'
}
# 屏蔽https证书警告
urllib3.disable_warnings()
# 网页访问,get方式
html = get(url, cookies=self.cookies_merge_dict_in_id_qq_com, headers=header, verify=False)
# 指定返回数据编码格式
html.encoding = 'utf-8'
# 该网站有时候会返回空数据,所以要判断一下,如果是空则重新发包获取
if(html.text != ''):
# 将返回数据解析为python对象,并存入result
result.update(loads(html.text))
# 跳出循环
break
# 数据获取完毕,筛选出我们想返回的结果
data = {}
data.update({'bind_email':result['bind_email']})
data.update({'nickname': result['nick']})
data.update({'age': result['age']})
data.update({'birthday': str(result['bir_y']) + '/' + str(result['bir_m']) + '/' + str(result['bir_d'])})
data.update({'last_contact_friend_count': result['chat_count']})
data.update({'friend_count': result['friend_count']})
data.update({'group_count': result['group_count']})
data.update({'remark_friend_count': result['remark_count']})
data.update({'odd_friend_count': result['odd_count']})
data.update({'qq_level': result['level']})
data.update({'qq_level_rank': str(result['level_rank']) + '/' + str(result['friend_count'])})
data.update({'qq_age': result['qq_age']})
data.update({'mobile_qq_online_hour': result['iMobileQQOnlineTime']})
data.update({'no_hide_online_hour': result['iNoHideOnlineTime']})
data.update({'total_active_day': result['iTotalActiveDay']})
qq_signature = result['ln'].replace(' ',' ')
# data.update({'qq_signature': qq_signature})
return data
def who_care_about_me(self):
# qq空间亲密度 谁在意我
# bkn由参数skey通过另一个加密函数得到
bkn = hash33_bkn(self.cookies_merge_dict_in_qzone_qq_com['p_skey'])
# 获取参数qzonetoken
urllib3.disable_warnings()
target_url = 'https://user.qzone.qq.com/' + self.qq_number
html = get_html(target_url, self.cookies_merge_dict_in_qzone_qq_com)
qzonetoken = re.findall(r'{ try{return "(.+?)";', html.text)
qzonetoken = qzonetoken[0]
# 获取谁在意我数据
target_url = 'https://rc.qzone.qq.com/proxy/domain/r.qzone.qq.com/cgi-bin/tfriend/friend_ship_manager.cgi?uin=' + self.qq_number + '&do=2&rd=0.32313768189269365&fupdate=1&clean=0&g_tk=' + str(bkn) + '&qzonetoken=' + str(qzonetoken) + '&g_tk=' + str(bkn)
urllib3.disable_warnings()
html = get_html(target_url, self.cookies_merge_dict_in_qzone_qq_com)
# 处理返回数据
result_data = (html.text).replace('_Callback(','')
result_data = result_data[:len(result_data)-2]
# 将返回数据转化为python对象
result_data = loads(result_data)
result_data = result_data['data']['items_list']
print(result_data)
def i_care_about_who(self):
# qq空间亲密度 我在意谁
# bkn由参数skey通过另一个加密函数得到
bkn = hash33_bkn(self.cookies_merge_dict_in_qzone_qq_com['p_skey'])
# 获取参数qzonetoken
urllib3.disable_warnings()
target_url = 'https://user.qzone.qq.com/' + self.qq_number
html = get_html(target_url, self.cookies_merge_dict_in_qzone_qq_com)
qzonetoken = re.findall(r'{ try{return "(.+?)";', html.text)
qzonetoken = qzonetoken[0]
# 获取我在意谁数据
target_url = 'https://rc.qzone.qq.com/proxy/domain/r.qzone.qq.com/cgi-bin/tfriend/friend_ship_manager.cgi?uin=' + self.qq_number + '&do=1&rd=0.9680629025032721&fupdate=1&clean=1&g_tk=' + str(bkn) + '&qzonetoken=' + str(qzonetoken) + '&g_tk=' + str(bkn)
urllib3.disable_warnings()
html = get_html(target_url, self.cookies_merge_dict_in_qzone_qq_com)
# 处理返回数据
result_data = (html.text).replace('_Callback(','')
result_data = result_data[:len(result_data)-2]
# 将返回数据转化为python对象
result_data = loads(result_data)
result_data = result_data['data']['items_list']
print(result_data)
def qzone_friendship(self, number):
# 获取成为好友的天数,以及与这个好友共同的好友个数和群聊个数
# bkn由参数skey通过另一个加密函数得到
bkn = hash33_bkn(self.cookies_merge_dict_in_qzone_qq_com['p_skey'])
# 获取参数qzonetoken
urllib3.disable_warnings()
target_url = 'https://user.qzone.qq.com/' + self.qq_number
html = get_html(target_url, self.cookies_merge_dict_in_qzone_qq_com)
qzonetoken = re.findall(r'{ try{return "(.+?)";', html.text)
qzonetoken = qzonetoken[0]
# 获取我在意谁数据
target_url = 'https://user.qzone.qq.com/proxy/domain/r.qzone.qq.com/cgi-bin/friendship/cgi_friendship?activeuin=' + self.qq_number +'&passiveuin=' + str(number) + '&situation=1&isCalendar=1&g_tk=' + str(bkn) + '&qzonetoken=' + str(qzonetoken) + '&g_tk=' + str(bkn)
urllib3.disable_warnings()
html = get_html(target_url, self.cookies_merge_dict_in_qzone_qq_com)
# 处理返回数据
result_data = (html.text).replace('_Callback(','')
result_data = result_data[:len(result_data)-2]
# 将返回数据转化为python对象
result_data = loads(result_data)
print(result_data)
| [
"3257179914@qq.com"
] | 3257179914@qq.com |
48a3d0e17c6da5f6f7978d6fecf15aab2c14acfb | f4a8ce0c4e37ff85047d7e45a425cf8d2a1aa9d9 | /Random/file.py | eb3fa04edd818ff81ddf0d59f63c7c98da05827c | [] | no_license | oladapo-joseph/rock_paper_scissors | 514b1ffb7d14c395131d3f94c7de670b5dd5ac84 | 799e3b574e0a0a81109a7508423c04c553a4b033 | refs/heads/main | 2023-02-11T14:22:34.552554 | 2021-01-14T01:38:36 | 2021-01-14T01:38:36 | 310,174,204 | 0 | 0 | null | 2021-01-14T01:38:37 | 2020-11-05T02:53:48 | Python | UTF-8 | Python | false | false | 314 | py | def test():
infile = open('test001.txt', 'r', encoding='utf8')
outfile = open('test002.txt', 'w', encoding='utf8')
for line in infile:
lines = ":".join(line)
# s = line.split(".")
outfile.write(lines)
# outfile.write(s)
infile.close()
outfile.close()
test()
| [
"oladapo_joseph@yahoo.com"
] | oladapo_joseph@yahoo.com |
b6855e0a4702ef6be028e101470e0dc92b243364 | 62e9fb33329fbefa89287e5bc343cb9c120306a1 | /tensorflow_probability/python/sts/forecast.py | 9505ff71edfc41823f9f6152b7bc80dc2968fd62 | [
"Apache-2.0"
] | permissive | npfp/probability | 3c103d4b9d7a72d3d16eb79b1e4f648afbaca057 | 3911f4463cdcca6cc118633742430885fb0c88cb | refs/heads/master | 2022-05-01T14:23:40.504258 | 2022-04-07T20:08:45 | 2022-04-07T20:10:58 | 246,853,846 | 0 | 0 | Apache-2.0 | 2020-03-12T14:23:04 | 2020-03-12T14:23:03 | null | UTF-8 | Python | false | false | 24,131 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Methods for forecasting in StructuralTimeSeries models."""
# Dependency imports
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.experimental import util as tfe_util
from tensorflow_probability.python.internal import distribution_util as dist_util
from tensorflow_probability.python.sts.internal import util as sts_util
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
def _prefer_static_event_ndims(distribution):
if distribution.event_shape.ndims is not None:
return distribution.event_shape.ndims
else:
return tf.size(distribution.event_shape_tensor())
@deprecation.deprecated_arg_values(
'2021-12-31',
'`Predictive distributions returned by`tfp.sts.one_step_predictive` will '
'soon compute per-timestep probabilities (treating timesteps as part of '
'the batch shape) instead of a single probability for an entire series '
'(the current approach, in which timesteps are treated as event shape). '
'Please update your code to pass `timesteps_are_event_shape=False` (this '
'will soon be the default) and to explicitly sum over the per-timestep log '
'probabilities if this is required.',
timesteps_are_event_shape=True)
def one_step_predictive(model, observed_time_series, parameter_samples,
timesteps_are_event_shape=True):
"""Compute one-step-ahead predictive distributions for all timesteps.
Given samples from the posterior over parameters, return the predictive
distribution over observations at each time `T`, given observations up
through time `T-1`.
Args:
model: An instance of `StructuralTimeSeries` representing a
time-series model. This represents a joint distribution over
time-series and their parameters with batch shape `[b1, ..., bN]`.
observed_time_series: `float` `Tensor` of shape
`concat([sample_shape, model.batch_shape, [num_timesteps, 1]])` where
`sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`
dimension may (optionally) be omitted if `num_timesteps > 1`. Any `NaN`s
are interpreted as missing observations; missingness may be also be
explicitly specified by passing a `tfp.sts.MaskedTimeSeries` instance.
parameter_samples: Python `list` of `Tensors` representing posterior samples
of model parameters, with shapes `[concat([[num_posterior_draws],
param.prior.batch_shape, param.prior.event_shape]) for param in
model.parameters]`. This may optionally also be a map (Python `dict`) of
parameter names to `Tensor` values.
timesteps_are_event_shape: Deprecated, for backwards compatibility only.
If `False`, the predictive distribution will return per-timestep
probabilities
Default value: `True`.
Returns:
predictive_dist: a `tfd.MixtureSameFamily` instance with event shape
`[num_timesteps] if timesteps_are_event_shape else []` and
batch shape `concat([sample_shape, model.batch_shape,
[] if timesteps_are_event_shape else [num_timesteps])`, with
`num_posterior_draws` mixture components. The `t`th step represents the
forecast distribution `p(observed_time_series[t] |
observed_time_series[0:t-1], parameter_samples)`.
#### Examples
Suppose we've built a model and fit it to data using HMC:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
Passing the posterior samples into `one_step_predictive`, we construct a
one-step-ahead predictive distribution:
```python
one_step_predictive_dist = tfp.sts.one_step_predictive(
model, observed_time_series, parameter_samples=samples)
predictive_means = one_step_predictive_dist.mean()
predictive_scales = one_step_predictive_dist.stddev()
```
If using variational inference instead of HMC, we'd construct a forecast using
samples from the variational posterior:
```python
surrogate_posterior = tfp.sts.build_factored_surrogate_posterior(
model=model)
loss_curve = tfp.vi.fit_surrogate_posterior(
target_log_prob_fn=model.joint_distribution(observed_time_series).log_prob,
surrogate_posterior=surrogate_posterior,
optimizer=tf.optimizers.Adam(learning_rate=0.1),
num_steps=200)
samples = surrogate_posterior.sample(30)
one_step_predictive_dist = tfp.sts.one_step_predictive(
model, observed_time_series, parameter_samples=samples)
```
We can visualize the forecast by plotting:
```python
from matplotlib import pylab as plt
def plot_one_step_predictive(observed_time_series,
forecast_mean,
forecast_scale):
plt.figure(figsize=(12, 6))
num_timesteps = forecast_mean.shape[-1]
c1, c2 = (0.12, 0.47, 0.71), (1.0, 0.5, 0.05)
plt.plot(observed_time_series, label="observed time series", color=c1)
plt.plot(forecast_mean, label="one-step prediction", color=c2)
plt.fill_between(np.arange(num_timesteps),
forecast_mean - 2 * forecast_scale,
forecast_mean + 2 * forecast_scale,
alpha=0.1, color=c2)
plt.legend()
plot_one_step_predictive(observed_time_series,
forecast_mean=predictive_means,
forecast_scale=predictive_scales)
```
To detect anomalous timesteps, we check whether the observed value at each
step is within a 95% predictive interval, i.e., two standard deviations from
the mean:
```python
z_scores = ((observed_time_series[..., 1:] - predictive_means[..., :-1])
/ predictive_scales[..., :-1])
anomalous_timesteps = tf.boolean_mask(
tf.range(1, num_timesteps),
tf.abs(z_scores) > 2.0)
```
"""
with tf.name_scope('one_step_predictive'):
[
observed_time_series,
is_missing
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
# Run filtering over the training timesteps to extract the
# predictive means and variances.
num_timesteps = dist_util.prefer_static_value(
tf.shape(observed_time_series))[-2]
lgssm = tfe_util.JitPublicMethods(
model.make_state_space_model(num_timesteps=num_timesteps,
param_vals=parameter_samples),
trace_only=True) # Avoid eager overhead w/o introducing XLA dependence.
(_, _, _, _, _, observation_means, observation_covs
) = lgssm.forward_filter(observed_time_series, mask=is_missing)
# Squeeze dims to convert from LGSSM's event shape `[num_timesteps, 1]`
# to a scalar time series.
predictive_dist = sts_util.mix_over_posterior_draws(
means=observation_means[..., 0],
variances=observation_covs[..., 0, 0])
if timesteps_are_event_shape:
predictive_dist = tfd.Independent(
predictive_dist, reinterpreted_batch_ndims=1)
return predictive_dist
def forecast(model,
observed_time_series,
parameter_samples,
num_steps_forecast,
include_observation_noise=True):
"""Construct predictive distribution over future observations.
Given samples from the posterior over parameters, return the predictive
distribution over future observations for num_steps_forecast timesteps.
Args:
model: An instance of `StructuralTimeSeries` representing a
time-series model. This represents a joint distribution over
time-series and their parameters with batch shape `[b1, ..., bN]`.
observed_time_series: `float` `Tensor` of shape
`concat([sample_shape, model.batch_shape, [num_timesteps, 1]])` where
`sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`
dimension may (optionally) be omitted if `num_timesteps > 1`. Any `NaN`s
are interpreted as missing observations; missingness may be also be
explicitly specified by passing a `tfp.sts.MaskedTimeSeries` instance.
parameter_samples: Python `list` of `Tensors` representing posterior samples
of model parameters, with shapes `[concat([[num_posterior_draws],
param.prior.batch_shape, param.prior.event_shape]) for param in
model.parameters]`. This may optionally also be a map (Python `dict`) of
parameter names to `Tensor` values.
num_steps_forecast: scalar `int` `Tensor` number of steps to forecast.
include_observation_noise: Python `bool` indicating whether the forecast
distribution should include uncertainty from observation noise. If `True`,
the forecast is over future observations, if `False`, the forecast is over
future values of the latent noise-free time series.
Default value: `True`.
Returns:
forecast_dist: a `tfd.MixtureSameFamily` instance with event shape
[num_steps_forecast, 1] and batch shape
`concat([sample_shape, model.batch_shape])`, with `num_posterior_draws`
mixture components.
#### Examples
Suppose we've built a model and fit it to data using HMC:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
Passing the posterior samples into `forecast`, we construct a forecast
distribution:
```python
forecast_dist = tfp.sts.forecast(model, observed_time_series,
parameter_samples=samples,
num_steps_forecast=50)
forecast_mean = forecast_dist.mean()[..., 0] # shape: [50]
forecast_scale = forecast_dist.stddev()[..., 0] # shape: [50]
forecast_samples = forecast_dist.sample(10)[..., 0] # shape: [10, 50]
```
If using variational inference instead of HMC, we'd construct a forecast using
samples from the variational posterior:
```python
surrogate_posterior = tfp.sts.build_factored_surrogate_posterior(
model=model)
loss_curve = tfp.vi.fit_surrogate_posterior(
target_log_prob_fn=model.joint_distribution(observed_time_series).log_prob,
surrogate_posterior=surrogate_posterior,
optimizer=tf.optimizers.Adam(learning_rate=0.1),
num_steps=200)
samples = surrogate_posterior.sample(30)
forecast_dist = tfp.sts.forecast(model, observed_time_series,
parameter_samples=samples,
num_steps_forecast=50)
```
We can visualize the forecast by plotting:
```python
from matplotlib import pylab as plt
def plot_forecast(observed_time_series,
forecast_mean,
forecast_scale,
forecast_samples):
plt.figure(figsize=(12, 6))
num_steps = observed_time_series.shape[-1]
num_steps_forecast = forecast_mean.shape[-1]
num_steps_train = num_steps - num_steps_forecast
c1, c2 = (0.12, 0.47, 0.71), (1.0, 0.5, 0.05)
plt.plot(np.arange(num_steps), observed_time_series,
lw=2, color=c1, label='ground truth')
forecast_steps = np.arange(num_steps_train,
num_steps_train+num_steps_forecast)
plt.plot(forecast_steps, forecast_samples.T, lw=1, color=c2, alpha=0.1)
plt.plot(forecast_steps, forecast_mean, lw=2, ls='--', color=c2,
label='forecast')
plt.fill_between(forecast_steps,
forecast_mean - 2 * forecast_scale,
forecast_mean + 2 * forecast_scale, color=c2, alpha=0.2)
plt.xlim([0, num_steps])
plt.legend()
plot_forecast(observed_time_series,
forecast_mean=forecast_mean,
forecast_scale=forecast_scale,
forecast_samples=forecast_samples)
```
"""
with tf.name_scope('forecast'):
[
observed_time_series,
mask
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
# Run filtering over the observed timesteps to extract the
# latent state posterior at timestep T+1 (i.e., the final
# filtering distribution, pushed through the transition model).
# This is the prior for the forecast model ("today's prior
# is yesterday's posterior").
num_observed_steps = dist_util.prefer_static_value(
tf.shape(observed_time_series))[-2]
observed_data_ssm = tfe_util.JitPublicMethods(
model.make_state_space_model(num_timesteps=num_observed_steps,
param_vals=parameter_samples),
trace_only=True) # Avoid eager overhead w/o introducing XLA dependence.
(_, _, _, predictive_mean, predictive_cov, _, _
) = observed_data_ssm.forward_filter(observed_time_series,
mask=mask,
final_step_only=True)
# Build a batch of state-space models over the forecast period. Because
# we'll use MixtureSameFamily to mix over the posterior draws, we need to
# do some shenanigans to move the `[num_posterior_draws]` batch dimension
# from the leftmost to the rightmost side of the model's batch shape.
# TODO(b/120245392): enhance `MixtureSameFamily` to reduce along an
# arbitrary axis, and eliminate `move_dimension` calls here.
parameter_samples = model._canonicalize_param_vals_as_map(parameter_samples) # pylint: disable=protected-access
parameter_samples_with_reordered_batch_dimension = {
param.name: dist_util.move_dimension(
parameter_samples[param.name],
0, -(1 + _prefer_static_event_ndims(param.prior)))
for param in model.parameters}
forecast_prior = tfd.MultivariateNormalTriL(
loc=dist_util.move_dimension(predictive_mean, 0, -2),
scale_tril=tf.linalg.cholesky(
dist_util.move_dimension(predictive_cov, 0, -3)))
# Ugly hack: because we moved `num_posterior_draws` to the trailing (rather
# than leading) dimension of parameters, the parameter batch shapes no
# longer broadcast against the `constant_offset` attribute used in `sts.Sum`
# models. We fix this by manually adding an extra broadcasting dim to
# `constant_offset` if present.
# The root cause of this hack is that we mucked with param dimensions above
# and are now passing params that are 'invalid' in the sense that they don't
# match the shapes of the model's param priors. The fix (as above) will be
# to update MixtureSameFamily so we can avoid changing param dimensions
# altogether.
# TODO(b/120245392): enhance `MixtureSameFamily` to reduce along an
# arbitrary axis, and eliminate this hack.
kwargs = {}
if hasattr(model, 'constant_offset'):
kwargs['constant_offset'] = tf.convert_to_tensor(
value=model.constant_offset,
dtype=forecast_prior.dtype)[..., tf.newaxis, :]
if not include_observation_noise:
parameter_samples_with_reordered_batch_dimension[
'observation_noise_scale'] = tf.zeros_like(
parameter_samples_with_reordered_batch_dimension[
'observation_noise_scale'])
# We assume that any STS model that has a `constant_offset` attribute
# will allow it to be overridden as a kwarg. This is currently just
# `sts.Sum`.
# TODO(b/120245392): when kwargs hack is removed, switch back to calling
# the public version of `_make_state_space_model`.
forecast_ssm = model._make_state_space_model( # pylint: disable=protected-access
num_timesteps=num_steps_forecast,
param_map=parameter_samples_with_reordered_batch_dimension,
initial_state_prior=forecast_prior,
initial_step=num_observed_steps,
**kwargs)
# Avoid eager-mode loops when querying the forecast.
forecast_ssm = tfe_util.JitPublicMethods(forecast_ssm, trace_only=True)
num_posterior_draws = dist_util.prefer_static_value(
forecast_ssm.batch_shape_tensor())[-1]
return tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
logits=tf.zeros([num_posterior_draws], dtype=forecast_ssm.dtype)),
components_distribution=forecast_ssm)
@deprecation.deprecated_arg_values(
'2021-12-31',
'`Imputed distributions returned by`tfp.sts.impute_missing_values` will '
'soon compute per-timestep probabilities (treating timesteps as part of '
'the batch shape) instead of a single probability for an entire series '
'(the current approach, in which timesteps are treated as event shape). '
'Please update your code to pass `timesteps_are_event_shape=False` (this '
'will soon be the default) and to explicitly sum over the per-timestep log '
'probabilities if this is required.',
timesteps_are_event_shape=True)
def impute_missing_values(model,
observed_time_series,
parameter_samples,
include_observation_noise=False,
timesteps_are_event_shape=True):
"""Runs posterior inference to impute the missing values in a time series.
This method computes the posterior marginals `p(latent state | observations)`,
given the time series at observed timesteps (a missingness mask should
be specified using `tfp.sts.MaskedTimeSeries`). It pushes this posterior back
through the observation model to impute a predictive distribution on the
observed time series. At unobserved steps, this is an imputed value; at other
steps it is interpreted as the model's estimate of the underlying noise-free
series.
Args:
model: `tfp.sts.Sum` instance defining an additive STS model.
observed_time_series: `float` `Tensor` of shape
`concat([sample_shape, model.batch_shape, [num_timesteps, 1]])` where
`sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`
dimension may (optionally) be omitted if `num_timesteps > 1`. Any `NaN`s
are interpreted as missing observations; missingness may be also be
explicitly specified by passing a `tfp.sts.MaskedTimeSeries` instance.
parameter_samples: Python `list` of `Tensors` representing posterior
samples of model parameters, with shapes `[concat([
[num_posterior_draws], param.prior.batch_shape,
param.prior.event_shape]) for param in model.parameters]`. This may
optionally also be a map (Python `dict`) of parameter names to
`Tensor` values.
include_observation_noise: If `False`, the imputed uncertainties
represent the model's estimate of the noise-free time series at each
timestep. If `True`, they represent the model's estimate of the range of
values that could be *observed* at each timestep, including any i.i.d.
observation noise.
Default value: `False`.
timesteps_are_event_shape: Deprecated, for backwards compatibility only.
If `False`, the predictive distribution will return per-timestep
probabilities
Default value: `True`.
Returns:
imputed_series_dist: a `tfd.MixtureSameFamily` instance with event shape
`[num_timesteps] if timesteps_are_event_shape else []` and
batch shape `concat([sample_shape, model.batch_shape,
[] if timesteps_are_event_shape else [num_timesteps])`, with
`num_posterior_draws` mixture components.
#### Example
To specify a time series with missing values, use `tfp.sts.MaskedTimeSeries`:
```python
time_series_with_nans = [-1., 1., np.nan, 2.4, np.nan, 5]
observed_time_series = tfp.sts.MaskedTimeSeries(
time_series=time_series_with_nans,
is_missing=tf.math.is_nan(time_series_with_nans))
```
Masked time series can be passed to `tfp.sts` methods in place of a
`observed_time_series` `Tensor`:
```python
# Build model using observed time series to set heuristic priors.
linear_trend_model = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series)
model = tfp.sts.Sum([linear_trend_model],
observed_time_series=observed_time_series)
# Fit model to data
parameter_samples, _ = tfp.sts.fit_with_hmc(model, observed_time_series)
```
After fitting a model, `impute_missing_values` will return a distribution
```python
# Impute missing values
imputed_series_distribution = tfp.sts.impute_missing_values(
model, observed_time_series, parameter_samples=parameter_samples)
print('imputed means and stddevs: ',
imputed_series_distribution.mean(),
imputed_series_distribution.stddev())
```
"""
with tf.name_scope('impute_missing_values'):
[
observed_time_series,
mask
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
# Run smoothing over the training timesteps to extract the
# predictive means and variances.
num_timesteps = dist_util.prefer_static_value(
tf.shape(observed_time_series))[-2]
lgssm = tfe_util.JitPublicMethods(
model.make_state_space_model(num_timesteps=num_timesteps,
param_vals=parameter_samples),
trace_only=True) # Avoid eager overhead w/o introducing XLA dependence.
posterior_means, posterior_covs = lgssm.posterior_marginals(
observed_time_series, mask=mask)
observation_means, observation_covs = lgssm.latents_to_observations(
latent_means=posterior_means,
latent_covs=posterior_covs)
if not include_observation_noise:
# Extract just the variance of observation noise by pushing forward
# zero-variance latents.
_, observation_noise_covs = lgssm.latents_to_observations(
latent_means=posterior_means,
latent_covs=tf.zeros_like(posterior_covs))
# Subtract out the observation noise that was added in the original
# pushforward. Note that this could cause numerical issues if the
# observation noise is very large. If this becomes an issue we could
# avoid the subtraction by plumbing `include_observation_noise` through
# `lgssm.latents_to_observations`.
observation_covs -= observation_noise_covs
# Squeeze dims to convert from LGSSM's event shape `[num_timesteps, 1]`
# to a scalar time series.
imputed_values_dist = sts_util.mix_over_posterior_draws(
means=observation_means[..., 0],
variances=observation_covs[..., 0, 0])
if timesteps_are_event_shape:
imputed_values_dist = tfd.Independent(
imputed_values_dist, reinterpreted_batch_ndims=1)
return imputed_values_dist
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
1ef93eb81a2c7b76ee8332735487da4f4a17a2aa | 3dcca553db05a113781162eb6f7af6881ad05f93 | /Python Start/Лекция 2-3. Числа/Lection_2_tsk_1.py | 213cb2f696c2f9621d2e934c822e8bcd38347de5 | [] | no_license | YevhenMix/courses | f137864d0f098dd1246ce9191a8890260b90f42a | 628d224a57f07945c60982749b38dfe03e69a6e0 | refs/heads/master | 2023-02-28T08:36:44.643941 | 2021-02-04T21:37:51 | 2021-02-04T21:37:51 | 331,698,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | print('Программа разбивает 5-ти значное число на цифры из которого оно состоит.')
# решение когда сразу int
num = int(input('Введите желаемое число: '))
first = num // 10000
second = num % 10000 // 1000
third = num % 1000 // 100
fourth = num % 100 // 10
fifth = num % 10
print(first)
print(second)
print(third)
print(fourth)
print(fifth)
# строковый вариант с помощью функции map
a, b, c, d, e = map(int, input())
print(a)
print(b)
print(c)
print(d)
print(e)
# через генератор
a, b, c, d, e = (int(i) for i in input())
print(a)
print(b)
print(c)
print(d)
print(e)
| [
"jeka.mixaylov58@gmail.com"
] | jeka.mixaylov58@gmail.com |
a25f0dbe9161129f6ddf0034ac21ea1857e7f672 | 7352a0827b27dae0932c0d680bf2e68da4ae3b06 | /handwriting_recognition.py | 3e7fbde585a52864ce5669cc6edeaf37d02b57e1 | [] | no_license | adith-a-danthi/Tensorflow-Programs | 830326fc0795617855c590ea23ef11930696e42d | 3aa6ba9c585c5de560f10176847f227dcc61d47f | refs/heads/master | 2022-11-05T16:21:50.564723 | 2020-06-26T14:48:16 | 2020-06-26T14:48:16 | 267,604,828 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | import tensorflow as tf
# To stop training after 99% accuracy is reached
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy') > 0.99):
print("\nReached 99% accuracy so cancelling training!")
self.model.stop_training = True
callback = myCallback()
mnist = tf.keras.datasets.mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_images = training_images / 255.0
test_images = test_images / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation = tf.nn.relu),
tf.keras.layers.Dense(10, activation = tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=10, callbacks=callback)
op = model.predict(test_images)
print(len(op), "test images")
n = int(input("Enter image number to check prediction: "))
print(op[n])
print(test_labels[n]) | [
"adith.danthi@gmail.com"
] | adith.danthi@gmail.com |
61c28f0a8d8ac1e14fb99000c8f0c3a4653fa6c5 | 177c62658bb3954b867c679eaf7eb2f1793fc5e6 | /ebroker/bin/convert_extract_ebroker_market_data_with_candle_stick.py | c14b156eef1da38803dbdb32aa6b1216a1c6ac81 | [] | no_license | hfyan0/ebrokerdata_cash | d7298bef7bb3cbcfb359f39afdc80b69ee7d210f | 52c7e8663963fa105ea5691b5b38e30b73fd1b1b | refs/heads/master | 2020-04-06T07:13:05.330211 | 2016-09-06T05:40:49 | 2016-09-06T05:40:49 | 62,302,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,724 | py | #!/usr/bin/python
import sys
import time
import re
import candlestick_generator
# Created by DT
# Updated by CCF
# Last update: 20140103, CCF
# Version v0.3
# Change:
# * Parse the line char by char instead of spliting "|"
# * Data structure change
# * New function: tokenizeLine(line)
# Version v0.2
# Change:
# Added deal src id list (deal_src_id_list). The list will be enabled if OMS (key = 110) is enabled.
######################################################################################################
# Convert RAW EBroker real time log file into the following formats:
# 091500,HSIH3,23330,44,B,23330,2,23306,3,23304,4,23301,1,23300,2,A,23336,3,23338,1,23345,1,23347,4,23350,6
#
# time ,ID ,last price,Accu Size,B,<Price, Size> x 5,A,<Price, Size> x 5
# Example: ./convert_extract_ebroker_market_data.py EBrokerSampleInput.log 080000 145900 No BidAskTradeOMS
#
# The 4th argument is used to toggle follow file
# Example: ./convert_extract_ebroker_market_data.py EBrokerSampleInput.log 080000 145900 Yes BidAskTradeOMS
#
# The 5th argument is used to specify what output we want to generate
# supported types are:
# BidAsk output line when there are Bid Ask changes
# BidAskTrade output line when there are Bid Ask or Trade (keyvalue 17,3) changes
# BidAskTrade_DH_DL_PC_TO output line when there are Bid Ask or Trade (keyvalue 17,3) changes
# e.g.
# 091500,HSIH3,23330,44,B,23330,2,23306,3,23304,4,23301,1,23300,2,A,23336,3,23338,1,23345,1,23347,4,23350,6,PC,23350,DH,24000,DL,23000,TO,0
# BidAskTradeOMS output line when there are Bid Ask or OMS Trade (110) changes
#
######################################################################################
### http://stackoverflow.com/questions/1475950/tail-f-in-python-with-no-time-sleep ###
######################################################################################
deal_src_id_list = [1,7,36,43]
candlestick_period_size=2
def follow(thefile):
thefile.seek(0,0) # if change to (0,2) -> Go to the end of the file
sleep = 0.00001
while True:
line = thefile.readline()
if not line:
time.sleep(sleep) # Sleep briefly
if sleep < 1.0:
sleep += 0.00001
continue
sleep = 0.00001
yield line
def testfollow(filename):
logfile = open(filename)
loglines = follow(logfile)
for line in loglines:
print line,
###
def myInt(fieldvalue) :
if (len(fieldvalue)==0) :
return 0
else :
return int(fieldvalue)
def init():
global datafile, currPrices
if logfile == "stdin":
datafile = sys.stdin
else:
datafile = open(logfile, "r")
currPrices = {}
def initlib():
global currPrices
currPrices = {}
def initID(currID):
global currPrices
if (currID + "_" + "bid" in currPrices):
return
currPrices[currID + "_" + "bid"] = {}
currPrices[currID + "_" + "ask"] = {}
currPrices[currID + "_" + "bidSize"] = {}
currPrices[currID + "_" + "askSize"] = {}
currPrices[currID + "_" + "bidQueue"] = {}
currPrices[currID + "_" + "askQueue"] = {}
currPrices[currID] = {}
currPrices[currID]["lastPrice"] = 999999
currPrices[currID]["accSize"] = 0
currPrices[currID]["previousClose"] = 999999
currPrices[currID]["dayHigh"] = 999999
currPrices[currID]["dayLow"] = 999999
currPrices[currID]["turnOver"] = 999999
feedcode=currID.split("_")[2]
mydate=currID.split("_")[1]
starttime="091500"
currPrices[currID]["candlestick_generator"] = candlestick_generator.candlestick_generator(feedcode,candlestick_period_size,mydate,starttime)
for i in range(5):
currPrices[currID + "_bid"][i] = 999999
currPrices[currID + "_bidSize"][i] = 999999
currPrices[currID + "_bidQueue"][i] = 999999
currPrices[currID + "_ask"][i] = 999999
currPrices[currID + "_askSize"][i] = 999999
currPrices[currID + "_askQueue"][i] = 999999
def updateDepthPrice(currID, isBid, depth, price):
global currPrices
if len(price) > 0:
currPrices[currID + "_" + ["ask" , "bid"][isBid]][depth] = float(price)
else:
currPrices[currID + "_" + ["ask" , "bid"][isBid]][depth] = float(999999)
def updateDepthSize(currID, isBid, depth, size):
global currPrices
currPrices[currID + "_" + ["askSize" , "bidSize"][isBid]][depth] = myInt(size)
def updateDepthQueue(currID, isBid, depth, size):
global currPrices
currPrices[currID + "_" + ["askQueue" , "bidQueue"][isBid]][depth] = int(size)
def printDepthInfo(currID, updatetime):
global currPrices
global genType
outstr = "%s,%s,%s,%s,"%(updatetime.replace(":",""), currID.split("_")[2],currPrices[currID]["lastPrice"],currPrices[currID]["accSize"])
bidOutStr = ""
askOutStr = ""
for i in range(5):
bidOutStr = bidOutStr + "%.3lf,%d%s"%(currPrices[currID + "_bid"][i], currPrices[currID + "_bidSize"][i], ["",","][i <4])
askOutStr = askOutStr + "%.3lf,%d%s"%(currPrices[currID + "_ask"][i], currPrices[currID + "_askSize"][i], ["",","][i <4])
pc_dh_dl_toStr = ",PC," + "%.3lf"%(currPrices[currID]["previousClose"])
pc_dh_dl_toStr = pc_dh_dl_toStr + ",DH," + "%.3lf"%(currPrices[currID]["dayHigh"])
pc_dh_dl_toStr = pc_dh_dl_toStr + ",DL," + "%.3lf"%(currPrices[currID]["dayLow"])
pc_dh_dl_toStr = pc_dh_dl_toStr + ",TO," + "%d"%(currPrices[currID]["turnOver"])
if (genType == "BidAskTrade_DH_DL_PC_TO"):
return outstr + "B," + bidOutStr + ",A," + askOutStr + pc_dh_dl_toStr
else:
return outstr + "B," + bidOutStr + ",A," + askOutStr
def updateLastPrice(currID, lastPrice):
global currPrices
currPrices[currID]["lastPrice"] = lastPrice
def updateLastPriceOMS(currID, OMS):
#110| 09:14:00 46 20776 1 20|
global currPrices
myfields = OMS.split(" ")
# print "OMS",myfields
# 20131209 CCF
# if int(myfields[5]) <= 2 or int(myfields[5]) == 43 or int(myfields[5]) == 7:
if int(myfields[5]) in deal_src_id_list:
currPrices[currID]["lastPrice"] = float(myfields[3])
def updateAccSize(currID, accSize):
global currPrices
currPrices[currID]["accSize"] = accSize
def updatePreviousClose(currID, previousClose):
global currPrices
currPrices[currID]["previousClose"] = float(previousClose)
def updateDayHigh(currID, dayHigh):
global currPrices
currPrices[currID]["dayHigh"] = float(dayHigh)
def updateDayLow(currID, dayLow):
global currPrices
currPrices[currID]["dayLow"] = float(dayLow)
def updateTurnOver(currID, turnOver):
global currPrices
# currPrices[currID]["turnOver"] = int(turnOver)
currPrices[currID]["turnOver"] = float(turnOver)
def updateAccSizeOMS(currID, OMS):
#110| 09:14:00 46 20776 1 20|
global currPrices
myfields = OMS.split(" ")
# 20131209 CCF
# if int(myfields[5]) <= 2 or int(myfields[5]) == 43 or int(myfields[5]) == 7:
if int(myfields[5]) in deal_src_id_list:
currPrices[currID]["accSize"] += int(myfields[2])
return True
else:
return False
# Function: tokenizeLine
# input (line): Ebroker RAW data line
# input (delim): Delimiter
# output: key-value pair map
def tokenizeLine(raw_line, delim):
keyvalue_map = {}
line = raw_line.rstrip("\n")
# print "line =", line
header, next_pos = getNextField(line, delim, 0)
subscription_id, next_pos = getNextField(line, delim, next_pos)
keyvalue_map["header"] = header
keyvalue_map["subscription_id"] = subscription_id
for kv in getNextKeyValue(line, delim, next_pos):
keyvalue_map[kv[0]] = kv[1]
# print keyvalue_map
return keyvalue_map
def getNextField(line, delim, current_pos):
start_index = current_pos
end_index = line.find(delim, start_index)
return line[start_index:end_index],end_index+1
def getNextKeyValue(line, delim, current_pos):
while True:
key, next_pos = getNextField(line, delim, current_pos)
#print "current_pos = ", current_pos, key, next_pos
if (next_pos == 0):
return
value_start_pos = next_pos
value, value_end_pos = getNextField(line, delim,value_start_pos)
#print "value = ", value, value_end_pos
next_key = ""
next_pos = value_end_pos
while (not next_key.isdigit()):
next_key, next_pos = getNextField(line, delim, next_pos)
#print "next_key = ", next_key ,next_pos
if (next_pos == 0):
break
if (not next_key.isdigit()):
value_end_pos = next_pos
value = line[value_start_pos:value_end_pos-1]
current_pos = value_end_pos
yield key, value
def parseLine(line):
global currPrices
global genType
global startTime, endTime
if (len(genType)<=0):
return ""
keyvalue_map = tokenizeLine(line,"|")
fields = line.rstrip("\n").split("|")
updateInfo=""
updatetime = ""
#currID = fields[1]
# if fields[0] == "image":
currID = keyvalue_map["subscription_id"]
if (keyvalue_map["header"] == "image"):
initID(currID)
hasOMSTrade = False
deal_src_id = -1
# for i in range(2,len(fields)-1,2):
for keyf, value in keyvalue_map.items():
if (keyf.isdigit()):
keyf = int(keyf)
else:
continue
if keyf == 1 or keyf == 81 or keyf == 82 or keyf == 83 or keyf == 84:
updateDepthPrice(currID, True, [keyf-80, 0][keyf < 80], value)
updateInfo = updateInfo + "bid_price %s " %(value)
if keyf == 2 or keyf == 91 or keyf == 92 or keyf == 93 or keyf == 94:
updateDepthPrice(currID, False, [keyf-90, 0][keyf < 90], value)
updateInfo = updateInfo + "ask_price %s " %(value)
if keyf == 16 or keyf == 51 or keyf == 52 or keyf == 53 or keyf == 54:
updateDepthSize(currID, True, [keyf-50, 0][keyf < 50], value)
updateInfo = updateInfo + "bid_size %s " %(value)
if keyf == 19 or keyf == 61 or keyf == 62 or keyf == 63 or keyf == 64:
updateDepthSize(currID, False, [keyf-60, 0][keyf < 60], value)
updateInfo = updateInfo + "ask_size %s " %(value)
if keyf == 17 and (genType == "BidAsk" or genType == "BidAskTrade" or genType == "BidAskTrade_DH_DL_PC_TO"):
updateAccSize(currID,value)
if (genType == "BidAskTrade" or genType == "BidAskTrade_DH_DL_PC_TO"):
updateInfo = updateInfo + "acc_size %s " %(value)
if keyf == 3 and (genType == "BidAsk" or genType == "BidAskTrade_DH_DL_PC_TO" or genType == "BidAskTrade"):
updateLastPrice(currID,value)
if (genType == "BidAskTrade_DH_DL_PC_TO" or genType == "BidAskTrade"):
updateInfo = updateInfo + "last_traded %s " %(value)
# if keyf == 41 and (genType == "BidAskTrade"):
# updateLastPrice(currID,fields[i+1])
# updateInfo = updateInfo + "last_traded %s " %(fields[i+1])
if keyf == 110 and (genType == "BidAskTradeOMS"):
OMS=re.sub(' +',' ',value)
updateLastPriceOMS(currID,OMS)
if not keyvalue_map["header"] == "image":
notoffExchange=updateAccSizeOMS(currID,OMS)
if (notoffExchange):
updateInfo = updateInfo + "OMS %s " %(value)
deal_src_id = OMS.split(" ")[5]
hasOMSTrade = True
if keyf == 33:
updatetime = value
if keyf == 31 and (genType=="BidAskTrade_DH_DL_PC_TO"):
updatePreviousClose(currID,value)
updateInfo = updateInfo + "previous_close %s " %(value)
if keyf == 32 and (genType=="BidAskTrade_DH_DL_PC_TO"):
updateDayLow(currID,value)
updateInfo = updateInfo + "day_low %s " %(value)
if keyf == 37 and (genType=="BidAskTrade_DH_DL_PC_TO"):
updateDayHigh(currID,value)
updateInfo = updateInfo + "day_high %s " %(value)
if keyf == 38 and (genType=="BidAskTrade_DH_DL_PC_TO"):
updateTurnOver(currID,value)
updateInfo = updateInfo + "turnover %s " %(value)
if (len(updateInfo)>0):
currTime = updatetime.replace(":","")
if startTime < currTime and endTime > currTime:
#print "Updated : (%s) - %s - %s" %(updatetime, currID, updateInfo)
resultstr=printDepthInfo(currID, updatetime) # + ","+ str(deal_src_id) + "," + str(hasOMSTrade)
tick=resultstr.split(",")
currPrices[currID]["candlestick_generator"].process_and_gen_period_from_market_data(tick)
return resultstr
else:
return ""
else:
return ""
return ""
def parseLogfile():
global datafile
global currPrices
if (followFile):
loglines = follow(datafile)
for line in loglines:
resultstr=parseLine(line)
if not resultstr == "":
print resultstr
else:
for line in datafile.readlines():
resultstr=parseLine(line)
if not resultstr == "":
print resultstr
#testfollow(sys.argv[1])
if __name__ == "__main__":
global genType
logfile=sys.argv[1]
#"EBrokerSample.log"
startTime=sys.argv[2]
#"080000"
endTime=sys.argv[3]
#"090000"
genType="BidAskTradeOMS"
if len(sys.argv) >= 6:
genType=sys.argv[5]
if len(sys.argv) >=5:
followFile=(sys.argv[4] == "Yes")
else:
followFile = False
init()
parseLogfile()
| [
"hfyan0@gmail.com"
] | hfyan0@gmail.com |
7a6c3780f0af337afc14ae8694d810e09fe88f1e | 659d6e67281480ca7f3c6970444144878e86d6f3 | /graphwave/benchmark_algorithms/roleX.py | 58d6220758f94fa0392be4339e29953400c72264 | [] | no_license | Sephora-M/StateFeaturesLearning | 3b2ff6783fb48708911c674e6c5c0a9a70c3e1d1 | 349b001305195c0971b2486f90d9122e197a8cbe | refs/heads/master | 2022-06-22T09:20:49.223407 | 2020-01-19T22:41:16 | 2020-01-19T22:41:16 | 234,719,173 | 2 | 0 | null | 2022-06-22T00:13:27 | 2020-01-18T10:41:04 | Python | UTF-8 | Python | false | false | 13,518 | py | # -*- coding: utf-8 -*-
"""
Created on Wed May 3 09:57:52 2017
@author: Lab41: Github: Circulo/circulo/algorithms/rolx.py
#### https://github.com/Lab41/Circulo/blob/master/circulo/algorithms/rolx.py
Set of functions to compute the RolX featurization
"""
import sys
import math
import igraph
import numpy as np
from numpy.linalg import lstsq
from numpy import dot
from scipy.cluster.vq import kmeans2, vq
from scipy.linalg import norm
from scipy.optimize import minimize
from sklearn.decomposition import NMF
def extract_rolx_roles(G, roles=2):
"""
Top-level function. Extracts node-role matrix and sensemaking role-feature matrix as necessary.
"""
print("Creating Vertex Features matrix")
V = vertex_features(G)
# print("V is a %s by %s matrix." % V.shape)
basis, coef = get_factorization(V, roles)
H = basis
# print("Node-role matrix is of dimensions %s by %s" % H.shape)
# print(H)
K = make_sense(G, H)
# print("Role-feature matrix is of dimensions %s by %s" % K.shape)
# print(K)
return H, K
def extract_rolx_roles_bis(G, V, roles=2):
"""
Top-level function. Extracts node-role matrix and sensemaking role-feature matrix as necessary.
Inputs a matrux
"""
basis, coef = get_factorization(V, roles)
H = basis
print("Node-role matrix is of dimensions %s by %s" % H.shape)
# print(H)
K = make_sense(G, H)
print("Role-feature matrix is of dimensions %s by %s" % K.shape)
# print(K)
return H, K
def recursive_feature(G, f, n):
"""
G: iGraph graph with annotations
func: string containing function name
n: int, recursion level
Computes the given function recursively on each vertex
Current precondition: already have run the computation for G, func, n-1.
"""
return np.matrix(recursive_feature_array(G, f, n))
def recursive_feature_array(G, func, n):
"""
Computes recursive features of the graph G for the provided function of G, returning
the matrix representing the nth level of the recursion.
"""
attr_name = "_rolx_" + func.__name__ + "_" + str(n)
if attr_name in G.vs.attributes():
result = np.array(G.vs[attr_name])
return result
if n == 0:
stats = func(G)
result = np.array([[x] for x in stats])
result = result * 1.0
G.vs[attr_name] = result
return result
prev_stats = recursive_feature_array(G, func, n - 1)
all_neighbor_stats = []
for v in G.vs:
neighbors = G.neighbors(v)
degree = len(neighbors)
if degree == 0:
neighbor_avgs = neighbor_sums = np.zeros(prev_stats[0].size)
else:
prev_neighbor_stats = [prev_stats[x] for x in neighbors]
neighbor_sums_vec = sum(prev_neighbor_stats)
neighbor_avgs_vec = neighbor_sums_vec / degree
v_stats = np.concatenate((neighbor_sums_vec, neighbor_avgs_vec), axis=0)
all_neighbor_stats.append(v_stats)
G.vs[attr_name] = all_neighbor_stats
return all_neighbor_stats
def approx_linear_solution(w, A, threshold=1e-15):
'''
Checks if w is linearly dependent on the columns of A, this is done by solving the least squares problem (LSP)
min || w - Ax ||_2^2
x
and checking if || w - Ax_star || <= threshold, where x_star is the arg_minimizer of the LSP
w: column vector
A: matrix
threshold: int
'''
x0 = np.zeros(A.shape[1])
x_star, residuals, rank, s = lstsq(A, w)
norm_residual = norm(residuals)
result = True if norm_residual <= threshold else False
return (result, norm_residual, x_star)
def degree(G):
""" Auxiliary function to calculate the degree of each element of G. """
return G.degree()
def vertex_egonet(G, v):
""" Computes the number of edges in the ego network of the vertex v. """
ego_network = G.induced_subgraph(G.neighborhood(v))
ego_edges = ego_network.ecount()
return ego_edges
def egonet(G):
""" Computes the ego network for all vertices v in G. """
return [vertex_egonet(G, v) for v in G.vs]
def vertex_egonet_out(G, v):
""" Computes the outgoing edges from the ego network of the vertex v in G. """
neighbors = G.neighborhood(v)
ego_network = G.induced_subgraph(neighbors)
ego_edges = ego_network.ecount()
degree_sum = sum([G.degree(v) for v in neighbors])
out_edges = degree_sum - 2 * ego_edges # Summing over degree will doublecount every edge within the ego network
return out_edges
def egonet_out(G):
""" Computes the number of outgoing ego network edges for every vertex in G. """
return [vertex_egonet_out(G, v) for v in G.vs]
def vertex_features(g):
"""
Constructs a vertex feature matrix using recursive feature generation, then uses least-squares solving
to eliminate those exhibiting approximate linear dependence.
"""
G = g.copy()
num_rows = G.vcount()
features = [degree, egonet, egonet_out]
V = np.matrix(np.zeros((num_rows, 16 * len(features))))
next_feature_col = 0
for feature in features:
base = recursive_feature(G, feature, 0)
base = base / norm(base)
V = add_col(V, base, next_feature_col)
next_feature_col += 1
level = 1
accepted_features = True
while accepted_features:
accepted_features = False
feature_matrix = recursive_feature(G, feature, level)
rows, cols = feature_matrix.shape
for i in range(cols):
b = feature_matrix[:, i]
b = b / norm(b)
mat = V[:, :next_feature_col]
threshold = 10.0 ** (-15 + level)
(is_approx_soln, _, _) = approx_linear_solution(b, mat, threshold)
if not is_approx_soln:
V = add_col(V, b, next_feature_col)
next_feature_col += 1
accepted_features = True
level += 1
return V[:, :next_feature_col]
def add_col(V, b, insert_col):
""" Add the given column b to the matrix V, enlarging the matrix if necessary. """
rows, cols = V.shape
if insert_col == cols: # need to resize V
zeros = np.matrix(np.zeros((rows, 1)))
V = np.concatenate((V, zeros), axis=1)
V[:, insert_col] = b
return V
def kmeans_quantize(M, bits):
""" Performs k-means quantization on the given matrix. Returns the encoded matrix and the number of bits needed for encoding it. """
k = 2 ** bits
obs = np.asarray(M).reshape(-1)
centroid, label = kmeans2(obs, k)
enc_M = [centroid[v] for v in label]
enc_M = np.matrix(enc_M).reshape(M.shape)
return enc_M, (bits * enc_M.size)
def kl_divergence(A, B):
""" Computes the Kullback-Leibler divergence of the two matrices A and B. """
a = np.asarray(A, dtype=np.float)
b = np.asarray(B, dtype=np.float)
return np.sum(np.where(a != 0, a * np.log(a / b), 0))
def description_length(V, fctr_res, bits=10):
""" Computes the length necessary to describe the given model with the given number of bits. """
W = fctr_res[0]
H = fctr_res[1]
enc_W, enc_W_cost = kmeans_quantize(W, bits)
enc_H, enc_H_cost = kmeans_quantize(H, bits)
enc_cost = enc_W_cost + enc_H_cost
err_cost = kl_divergence(V, enc_W * enc_H)
return enc_W, enc_H, enc_cost, err_cost
def standardize_rows(M):
""" Distribute the rows of the cost matrix normally to allow for accurate comparisons of error and description
cost. """
rv = np.matrix(M)
for i in range(rv.shape[0]):
mean = np.mean(M[i, :])
stdev = np.std(M[i, :])
rv[i, :] = (M[i, :] - mean) / stdev
return rv
# def standardize(M):
# m_flat = np.asarray(M).reshape(-1)
# mean = np.mean(m_flat)
# stdev = np.std(m_flat)
# m_flat = (m_flat - mean)/stdev
#
# return m_flat.reshape(M.shape)
def get_factorization(V, num_roles):
""" Obtains a nonnegative matrix factorization of the matrix V with num_roles intermediate roles. """
model = NMF(n_components=num_roles, init='random', random_state=0)
model.fit(V)
node_roles = model.transform(V)
role_features = model.components_
return np.matrix(node_roles), np.matrix(role_features)
def get_optimal_factorization(V, min_roles=2, max_roles=6, min_bits=1, max_bits=10):
""" Uses grid search to find the optimal parameter number and encoding of the given matrix factorization. """
max_roles = min(max_roles, V.shape[1]) # Can't have more possible roles than features
num_role_options = max_roles - min_roles
num_bit_options = max_bits - min_bits
mat_enc_cost = np.zeros((num_role_options, num_bit_options))
mat_err_cost = np.zeros((num_role_options, num_bit_options))
mat_fctr_res = [[0] * num_bit_options] * num_role_options
# Setup and run the factorization problem
for i in range(num_role_options):
rank = min_roles + i
fctr_res = get_factorization(V, rank)
for j in range(num_bit_options):
bits = min_bits + j
enc_W, enc_H, enc_cost, err_cost = description_length(V, fctr_res, bits)
mat_enc_cost[i, j] = enc_cost
mat_err_cost[i, j] = err_cost
mat_fctr_res[i][j] = (enc_W, enc_H)
mat_std_enc_cost = standardize_rows(mat_enc_cost)
mat_std_err_cost = standardize_rows(mat_err_cost)
mat_total_cost = mat_enc_cost + mat_err_cost
mat_total_std_cost = mat_std_enc_cost + mat_std_err_cost
# print mat_total_cost
print('min cost @', idx, ' or at ', min_coord)
print("rank, bits, enc_cost, err_cost, total_cost, std_enc_cost, std_err_cost, std_total_cost")
for i in range(num_role_options):
for j in range(num_bit_options):
rank = min_roles + i
bits = min_bits + j
enc_cost = mat_enc_cost[i, j]
err_cost = mat_err_cost[i, j]
std_enc_cost = mat_std_enc_cost[i, j]
std_err_cost = mat_std_err_cost[i, j]
total_cost = mat_total_cost[i, j]
total_std_cost = mat_total_std_cost[i, j]
print("%s, %s, (%s, %s, %s), (%s, %s, %s)" % (rank, bits,
enc_cost, err_cost, total_cost, std_enc_cost, std_err_cost,
total_std_cost))
min_idx = mat_total_std_cost.argmin()
min_coord = np.unravel_index(min_idx, mat_total_std_cost.shape)
min_role_index, min_bit_index = min_coord
min_role_value = min_role_index + min_roles
min_bit_value = min_bit_index + min_bits
min_std_enc_cost = mat_std_enc_cost[min_coord]
min_std_err_cost = mat_std_err_cost[min_coord]
min_total_std_cost = mat_total_std_cost[min_coord]
print("%s, %s, (%s, %s, %s)" % (
min_role_value, min_bit_value, min_std_enc_cost, min_std_err_cost, min_total_std_cost))
return mat_fctr_res[min_role_index][min_bit_index]
def make_sense(G, H):
""" Given graph G and node-role matrix H, returns a role-feature matrix K for sensemaking analyses of roles. """
features = ['betweenness', 'closeness', 'degree', 'diversity', 'eccentricity', 'pagerank', 'personalized_pagerank',
'strength']
feature_fns = [getattr(G, f) for f in features]
feature_matrix = [func() for func in feature_fns]
feature_matrix = np.matrix(feature_matrix).transpose()
# print(feature_matrix)
M = feature_matrix
for i in range(M.shape[1]):
M[:, i] = M[:, i] / norm(M[:, i])
K = complete_factor(H, M, h_on_left=True)
# print(K)
return K
def sense_residual_left_factor(W, H, M):
W = np.matrix(W).reshape((M.shape[0], H.shape[0]))
return norm(M - W * H)
def sense_residual_right_factor(K, H, M):
K = np.matrix(K).reshape((H.shape[1], M.shape[1]))
# print(M.shape,H.shape,K.shape)
return norm(M - H * K)
def complete_factor(H, M, h_on_left=True):
"""Given nonnegative matrix M and a nonnegative factor H of M, finds the other (nonnegative) factor of M.
H: known factor of matrix M.
M: product matrix.
h_on_left: boolean, true if H is the left factor of M, false if H is the right factor.
If H is left factor, find the matrix K such that HK=M. If H is the right factor, finds W such that WH=M
Result is an appropriately-sized matrix. """
if h_on_left:
shape = (H.shape[1], M.shape[1])
residual = sense_residual_right_factor
else:
shape = (M.shape[0], H.shape[0])
residual = sense_residual_left_factor
size = shape[0] * shape[1]
guess = np.random.rand(size)
bounds = [(0, None)] * size # (all elements of matrix must be nonnegative)
result = minimize(residual, guess, args=(H, M), method='L-BFGS-B', bounds=bounds)
x = result["x"]
G = np.matrix(x).reshape(shape)
return G
def main(argv):
G = igraph.Graph.Read_GML(argv[0])
if len(argv) > 1:
roles = int(argv[1])
A = nx.adjacency_matrix(G).todense()
Gi = igraph.Graph.Adjacency((A > 0).tolist())
test = extract_rolx_roles(Gi, roles=roles)
### Define a distance based on these distribution over roles
D_roleX = distance_nodes(test)
return extract_rolx_roles(Gi, roles=roles)
else:
return extract_rolx_roles(G)
return H, K
# if __name__ == "__main__":
# main(sys.argv[1:])
| [
"sephora.madjiheurem@gmail.com"
] | sephora.madjiheurem@gmail.com |
b95ad6aaa1502bf5b398071790eda12f55ab22b4 | bcfbf38b1367ba46ea7000afce190ca3e17a0820 | /packages/core.py | f332ff7e327c60fbf7de0f0072fe7c7b712ca488 | [
"MIT"
] | permissive | georgeyjm/Dumpling-Bot | ce5a6ba3fec654a7256c2d8956ded66854fe50ae | 3f875e665a530cd8777f65def8c72b728ab0414e | refs/heads/master | 2021-09-08T18:42:52.175338 | 2017-03-18T08:51:10 | 2017-03-18T08:51:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | def log(infoType, info):
'''记录任何信息或错误。\ninfoType参数只支持INFO或ERROR。'''
if infoType not in ('INFO', 'ERROR'):
log('ERROR', 'Unknown Info Type -> {} ({})'.format(infoType, info))
return -1
print('[{}] {}'.format(infoType, info))
| [
"noreply@github.com"
] | georgeyjm.noreply@github.com |
2874ae6f2c2ba711a43128b0a96e35ae83ae3f40 | 298df017f47c6ad55c0845ceb3359df011d52e2c | /ch7_list_004.py | 973ee83b42a6000a59ee274bbf8cae627542b7c9 | [] | no_license | cmjao/Python-practicespace | 6b8af76d36de3c9afd8acd50601f73c810738723 | 3a69eb3cb593ab1005a112fc498b22711f0c3725 | refs/heads/master | 2022-12-01T20:41:33.750971 | 2020-08-05T08:57:14 | 2020-08-05T08:57:14 | 279,794,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | start1 = ['fee', 'fie', 'foe']
# (first, second)
rhymes = [
('flop', 'get a mop'),
('fope', 'turn the rope'),
('fa', 'get your ma'),
('fudge', 'call the judge'),
('fat', 'pet the cat'),
('fog', 'walk the dog'),
('fun', 'say we are done'),
]
start2 = "Someone better"
# Line1 start1 (首字大寫、後面加上驚嘆號與空格) + first (首字大寫、後面加上驚嘆號)
# Line2 start2 + 空格 + second + 句點
for t in rhymes:
print('! '.join(start1).title() + '!', t[0].capitalize() + '!')
print(start2, t[1] + '.') | [
"gtmin1118@gmail.com.tw"
] | gtmin1118@gmail.com.tw |
dd7e3a62a5b216c5ebcc9450423021cdcf93c847 | 08870aa021eecd19dd7ff26d9bfa9a05806eb4db | /Compliant_control/Modifications/PILCO/controllers.py | 88bf8f0ca08c0245a0d21fe98f17e0778a33130d | [
"Apache-2.0"
] | permissive | soumayya-plaif/Compliant_control | 03e9c4c024b3c013ae1218832a7428a5516b3b33 | 485f627fa83d59f414f41bd57c5d37528ef5f1ec | refs/heads/main | 2023-07-16T23:38:16.546231 | 2021-08-27T18:01:06 | 2021-08-27T18:01:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,488 | py | import tensorflow as tf
from tensorflow_probability import distributions as tfd
import numpy as np
import gpflow
from gpflow import Parameter
from gpflow import set_trainable
from gpflow.utilities import positive
f64 = gpflow.utilities.to_default_float
from .models import MGPR
float_type = gpflow.config.default_float()
def squash_sin(m, s, max_action=None, realtime = False):
'''
Squashing function, passing the controls mean and variance
through a sinus, as in gSin.m. The output is in [-max_action, max_action].
IN: mean (m) and variance(s) of the control input, max_action
OUT: mean (M) variance (S) and input-output (C) covariance of the squashed
control input
'''
if realtime == False: #tf
k = tf.shape(m)[1]
if max_action is None:
max_action = tf.ones((1,k), dtype=float_type) #squashes in [-1,1] by default
else:
max_action = max_action * tf.ones((1,k), dtype=float_type)
M = max_action * tf.exp(-tf.linalg.diag_part(s) / 2) * tf.sin(m)
lq = -( tf.linalg.diag_part(s)[:, None] + tf.linalg.diag_part(s)[None, :]) / 2
q = tf.exp(lq)
S = (tf.exp(lq + s) - q) * tf.cos(tf.transpose(m) - m) \
- (tf.exp(lq - s) - q) * tf.cos(tf.transpose(m) + m)
S = max_action * tf.transpose(max_action) * S / 2
C = max_action * tf.linalg.diag( tf.exp(-tf.linalg.diag_part(s)/2) * tf.cos(m))
return M, S, tf.reshape(C,shape=[k,k])
if realtime: #np
k = np.shape(m)[1]
if max_action is None:
max_action = np.ones((1,k), dtype=float_type) #squashes in [-1,1] by default
else:
max_action = max_action *np.ones((1,k), dtype=float_type)
M = max_action * np.exp(-np.diag(s) / 2) * np.sin(m)
return M, None, None
class LinearController(gpflow.Module):
def __init__(self, state_dim, control_dim, max_action=1.0):
gpflow.Module.__init__(self)
self.W = Parameter(np.random.rand(control_dim, state_dim))
self.b = Parameter(np.random.rand(1, control_dim))
"""
self.W = np.random.rand(control_dim, state_dim)
self.b = np.random.rand(1, control_dim)
"""
self.max_action = max_action
def compute_action(self, m, s, squash=True, realtime = False):#True!!!
'''
Simple affine action: M <- W(m-t) - b
IN: mean (m) and variance (s) of the state
OUT: mean (M) and variance (S) of the action
'''
#tf (used in policy optimization)
if realtime == False:
M = m @ tf.transpose(self.W) + self.b # mean output
S = self.W @ s @ tf.transpose(self.W) # output variance
V = tf.transpose(self.W) #input output covariance
if squash:
M, S, V2 = squash_sin(M, S, self.max_action, realtime=realtime)
V = V @ V2
return M, S, V
#np (used when running the trained system)
if realtime:
np_W = self.W.numpy()
np_b = self.b.numpy()
M = np.dot(m, np_W.T) + np_b
S = np.linalg.multi_dot([np_W, s,np_W.T])
if squash:
M, _, _ = squash_sin(M, S, self.max_action, realtime=realtime)
return M, None, None
def randomize(self):
mean = 0; sigma = 1
self.W.assign(mean + sigma*np.random.normal(size=self.W.shape))
self.b.assign(mean + sigma*np.random.normal(size=self.b.shape))
class FakeGPR(gpflow.Module):
def __init__(self, data, kernel, X=None, likelihood_variance=1e-4):
gpflow.Module.__init__(self)
if X is None:
self.X = Parameter(data[0], name="DataX", dtype=gpflow.default_float())
else:
self.X = X
self.Y = Parameter(data[1], name="DataY", dtype=gpflow.default_float())
self.data = [self.X, self.Y]
self.kernel = kernel
self.likelihood = gpflow.likelihoods.Gaussian()
self.likelihood.variance.assign(likelihood_variance)
set_trainable(self.likelihood.variance, False)
class RbfController(MGPR):
'''
An RBF Controller implemented as a deterministic GP
See Deisenroth et al 2015: Gaussian Processes for Data-Efficient Learning in Robotics and Control
Section 5.3.2.
'''
def __init__(self, state_dim, control_dim, num_basis_functions, max_action=1.0):
MGPR.__init__(self,
[np.random.randn(num_basis_functions, state_dim),
0.1*np.random.randn(num_basis_functions, control_dim)]
)
for model in self.models:
model.kernel.variance.assign(1.0)
set_trainable(model.kernel.variance, False)
self.max_action = max_action
def create_models(self, data):
self.models = []
for i in range(self.num_outputs):
kernel = gpflow.kernels.SquaredExponential(lengthscales=tf.ones([data[0].shape[1],], dtype=float_type))
transformed_lengthscales = Parameter(kernel.lengthscales, transform=positive(lower=1e-3))
kernel.lengthscales = transformed_lengthscales
kernel.lengthscales.prior = tfd.Gamma(f64(1.1),f64(1/10.0))
if i == 0:
self.models.append(FakeGPR((data[0], data[1][:,i:i+1]), kernel))
else:
self.models.append(FakeGPR((data[0], data[1][:,i:i+1]), kernel, self.models[-1].X))
def compute_action(self, m, s, squash=True, realtime = False): #True
'''
RBF Controller. See Deisenroth's Thesis Section
IN: mean (m) and variance (s) of the state
OUT: mean (M) and variance (S) of the action
'''
with tf.name_scope("controller") as scope:
iK, beta = self.calculate_factorizations()
M, S, V = self.predict_given_factorizations(m, s, 0.0 * iK, beta)
S = S - tf.linalg.diag(self.variance - 1e-6)
if squash:
M, S, V2 = squash_sin(M, S, self.max_action, realtime=False)
V = V @ V2
return M, S, V
def randomize(self):
print("Randomising controller")
for m in self.models:
m.X.assign(np.random.normal(size=m.data[0].shape))
m.Y.assign(self.max_action / 10 * np.random.normal(size=m.data[1].shape))
mean = 1; sigma = 0.1
m.kernel.lengthscales.assign(mean + sigma*np.random.normal(size=m.kernel.lengthscales.shape))
| [
"martihmy@stud.ntnu.no"
] | martihmy@stud.ntnu.no |
dcc354de5bac5131f98794f55fa6a1ebafd31fda | acabd628b26a165d84c325a1d0978b312f03caa2 | /code/factorial_no.py | 3513efa6f9540a8357073d9e400807b961773a27 | [] | no_license | Muthulekshmi-99/guvi | 7e298f44515fbafa373349c547e82e84866425b2 | 2a628cf69bc3beac295d36160e42329499a116af | refs/heads/master | 2020-06-03T00:10:37.702907 | 2019-06-19T17:26:20 | 2019-06-19T17:26:20 | 191,355,090 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | m=int(input())
n=1
for i in range (1,m+1):
n=n*i
print(n)
| [
"noreply@github.com"
] | Muthulekshmi-99.noreply@github.com |
8e8ae9bfed60b43e8a113dae5cd5686a87ef0c24 | 6dd7fb197d43bc15f24b3474c0b0bc1acbb5f489 | /no15_three_sum.py | 99435d38289f1e30d1a59d6c619565d5187d86db | [] | no_license | jackneer/my-leetcode | 27dbf04ce3431fadf98055a3fe432c4420f4c242 | dda5fe0a0ff6a89a7fc4b1a43e4c15ba0c53ac0d | refs/heads/master | 2020-12-01T19:40:55.501124 | 2020-02-15T10:07:53 | 2020-02-15T10:07:53 | 230,745,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | def three_sum(nums):
check_sums = []
result = []
for i in range(0, len(nums) - 2):
for j in range(i + 1, len(nums) - 1):
for k in range( j + 1, len(nums)):
sum = nums[i] + nums[j] + nums[k]
check_sum = abs(nums[i]) + abs(nums[j]) + abs(nums[k])
temp = [nums[i], nums[j], nums[k]]
if sum == 0 and check_sum not in check_sums:
result.append(temp)
check_sums.append(check_sum)
return result
def main():
print(three_sum([-1, 0, 1, 2, -1, -4]))
if __name__ == '__main__':
main() | [
"p500.wu@gmail.com"
] | p500.wu@gmail.com |
a21099da7c91dee4b39c5939673f21b41c5bc5a1 | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/notifications/client/controls/notificationSettingList.py | 7b0f568634a511abff787bcc94b24bb894ef9d24 | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 8,450 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\notifications\client\controls\notificationSettingList.py
from carbonui.control.scrollContainer import ScrollContainer
from carbonui.primitives.sprite import Sprite
from eve.client.script.ui.control.eveLabel import EveLabelMediumBold
from carbonui.primitives.container import Container
from notifications.client.controls.notificationSettingEntityDeco import NotificationSettingEntityDeco
from notifications.client.notificationSettings.notificationSettingHandler import NotificationSettingHandler
from notifications.client.notificationSettings.notificationSettingConst import ExpandAlignmentConst
import localization
import carbonui.const as uiconst
from carbonui.primitives.line import Line
import eve.common.script.util.notificationconst as notificationConst
from notifications.client.controls.treeViewSettingsItem import TreeViewSettingsItem
from notifications.common.formatters.mailsummary import MailSummaryFormatter
from notifications.common.formatting.notificationFormatMapping import NotificationFormatMapper
class NotificationSettingList(Container):
def ApplyAttributes(self, attributes):
Container.ApplyAttributes(self, attributes)
self.lastVerticalBarEnabledStatus = False
self.notificationSettingHandler = NotificationSettingHandler()
self.notificationSettingData = self.notificationSettingHandler.LoadSettings()
self.isDeveloperMode = attributes.get('developerMode', False)
self._SetupUI()
def _SetupUI(self):
self.settingsDescriptionRowContainer = Container(name='Settings', height=16, align=uiconst.TOTOP, parent=self, padding=(5, 5, 10, 0))
EveLabelMediumBold(name='Settings', align=uiconst.TOLEFT, parent=self.settingsDescriptionRowContainer, text=localization.GetByLabel('Notifications/NotificationSettings/CategorySubscriptions'), bold=True)
Sprite(name='popupIcon', parent=self.settingsDescriptionRowContainer, align=uiconst.TORIGHT, texturePath='res:/UI/Texture/classes/Notifications/settingsPopupIcon.png', width=16, heigh=16, hint=localization.GetByLabel('Notifications/NotificationSettings/PopupVisibilityTooltip'))
Sprite(name='visibilityIcon', parent=self.settingsDescriptionRowContainer, align=uiconst.TORIGHT, texturePath='res:/UI/Texture/classes/Notifications/settingsVisibleIcon.png', width=16, heigh=16, hint=localization.GetByLabel('Notifications/NotificationSettings/HistoryVisibilityTooltip'), padding=(0, 0, 6, 0))
self._MakeSeperationLine(self)
self.scrollList = ScrollContainer(name='scrollContainer', parent=self, align=uiconst.TOALL, padding=(5, 5, 5, 5))
self.scrollList.OnScrolledVertical = self.VerticalScrollInject
def _MakeSeperationLine(self, parent):
Line(name='topLine', parent=parent, align=uiconst.TOTOP, weight=1, padBottom=2, opacity=0.3)
def VerticalScrollInject(self, scrollTo):
self.AdjustCategoryHeaderForScrollBar()
def AdjustCategoryHeaderForScrollBar(self):
if self.lastVerticalBarEnabledStatus == self.scrollList.verticalScrollBar.display:
return
if self.scrollList.verticalScrollBar.display:
self.settingsDescriptionRowContainer.padRight = 10 + self.scrollList.verticalScrollBar.width
else:
self.settingsDescriptionRowContainer.padRight = 10
self.lastVerticalBarEnabledStatus = self.scrollList.verticalScrollBar.display
def _GetGroupScrollEntries(self):
entries = []
for group, list in notificationConst.groupTypes.iteritems():
groupName = localization.GetByLabel(notificationConst.groupNamePathsNewNotifications[group])
entries.append(self.GetGroupEntry(fakeID=group, groupName=groupName))
return entries
def PopulateScroll(self):
entries = self._GetGroupScrollEntries()
entries.sort(key=lambda entr: entr.data.GetLabel().lower())
for entry in entries:
self.scrollList.children.append(entry)
def ReloadScroll(self):
self.notificationSettingHandler = NotificationSettingHandler()
self.notificationSettingData = self.notificationSettingHandler.LoadSettings()
self.scrollList.Flush()
self.PopulateScroll()
def GetGroupEntry(self, fakeID, groupName):
from eve.client.script.ui.control.treeData import TreeData
rawNotificationList = notificationConst.groupTypes[fakeID]
groupSettings = {}
self.AppendEntryData(data=groupSettings, visibilityChecked=self.notificationSettingHandler.GetVisibilityStatusForGroup(fakeID, self.notificationSettingData), showPopupChecked=self.notificationSettingHandler.GetShowPopupStatusForGroup(fakeID, self.notificationSettingData), isGroup=True, id=fakeID)
childrenData = []
for notification in rawNotificationList:
settingLabel = notificationConst.notificationToSettingDescription.get(notification, None)
settingName = localization.GetByLabel(settingLabel)
params = {}
setting = self.notificationSettingData[notification]
self.AppendEntryData(data=params, visibilityChecked=setting.showAtAll, showPopupChecked=setting.showPopup, isGroup=False, id=notification)
notificationData = TreeData(label=settingName, parent=None, isRemovable=False, settings=params, settingsID=notification)
childrenData.append(notificationData)
childrenData.sort(key=lambda childData: childData.GetLabel().lower())
data = TreeData(label=groupName, parent=None, children=childrenData, icon=None, isRemovable=False, settings=groupSettings)
entry = TreeViewSettingsItem(level=0, eventListener=self, data=data, settingsID=fakeID, defaultExpanded=False)
return entry
def AppendEntryData(self, data, visibilityChecked, showPopupChecked, isGroup, id):
data.update({NotificationSettingEntityDeco.VISIBILITY_CHECKED_KEY: visibilityChecked,
NotificationSettingEntityDeco.POPUP_CHECKED_KEY: showPopupChecked,
NotificationSettingEntityDeco.VISIBILITY_CHANGED_CALLBACK_KEY: self.OnVisibilityEntryChangedNew,
NotificationSettingEntityDeco.POPUP_CHANGED_CALLBACK_KEY: self.OnShowPopupEntryChangedNew,
NotificationSettingEntityDeco.GETMENU_CALLBACK: self.GetMenuForEntry,
'isGroup': isGroup,
'id': id})
def OnVisibilityEntryChangedNew(self, isGroup, id, checked):
if not isGroup:
self._setVisibilitySettingForNotification(id, checked)
def OnShowPopupEntryChangedNew(self, isGroup, id, checked):
if not isGroup:
self._setPopupSettingForNotification(id, checked)
def _setVisibilitySettingForNotification(self, id, on):
notificationData = self.notificationSettingData[id]
notificationData.showAtAll = on
self.SaveAllData()
def _setPopupSettingForNotification(self, id, on):
notificationData = self.notificationSettingData[id]
notificationData.showPopup = on
self.SaveAllData()
def SaveAllData(self):
self.notificationSettingHandler.SaveSettings(self.notificationSettingData)
def GetMenuForEntry(self, isGroup, nodeID):
if isGroup or not self.isDeveloperMode:
return []
else:
return [('spawnNotification %s' % nodeID, self.OnSpawnNotificationClick, [nodeID])]
def OnSpawnNotificationClick(self, notificationID):
mapper = NotificationFormatMapper()
newFormatter = mapper.GetFormatterForType(notificationID)
if newFormatter:
import blue
data = newFormatter.MakeSampleData()
sm.ScatterEvent('OnNotificationReceived', 123, notificationID, 98000001, blue.os.GetWallclockTime(), data=data)
else:
from notifications.client.development.notificationDevUI import FakeNotificationMaker
maker = FakeNotificationMaker()
counter = 1
agentStartID = 3008416
someAgentID = agentStartID + counter
senderID = 98000001
corpStartID = 1000089
someCorp = corpStartID + counter
maker.ScatterSingleNotification(counter, notificationID, senderID, someAgentID, someCorp)
| [
"masaho.shiro@gmail.com"
] | masaho.shiro@gmail.com |
2744e132cf91f846554720a928a3317943904bd2 | 40e3ee2f6048c518b5bc8ddb3b128c0f3e5d92a3 | /utils.py | 2c5cbb61173976d8a4fca6bb03cc95b4c4f67a1f | [
"MIT"
] | permissive | shubhampachori12110095/TextSentimentClassification | d1b731ff4ca1dc04aa776ab4087d0753c65d485d | fbc67289f40cf01a737ac4ff8face7d75777a226 | refs/heads/master | 2020-04-07T09:35:54.663798 | 2018-04-24T02:49:53 | 2018-04-24T02:49:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,813 | py | import pandas as pd
import numpy as np
import os
from configs import general_config
from data_helpers.utils import readNewFile,loadDict
import logging
import tensorflow as tf
def get_num_params():
# for v in tf.trainable_variables():
# print(v.name)
# print(np.prod(v.get_shape().as_list()))
return np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()
if "embedding_matrix" not in v.name.split(":")[0].split("/")
and "embedding_matrix_" not in v.name.split(":")[0].split("/")])
class PaddedDataIterator(object):
def __init__(self, loadPath,vocab2intPath,sent_len_cut=None):
indices, sentences, labels = readNewFile(file=loadPath, vocab2intPath=vocab2intPath)
num_words = [len(sentence) for sentence in sentences]
if isinstance(sent_len_cut, int):
num_words_=[min(len(sentence),sent_len_cut) for sentence in sentences]
else:
num_words_=num_words[:]
self.df = pd.DataFrame({"id": indices, "sentence": sentences, "label": labels,
"sentence_length": num_words,"sentence_length_":num_words_})
self.total_size=len(self.df)
self.cursor=0
self.loop=0
self.max_len=general_config.max_seq_len
self.shuffle()
def shuffle(self):
self.df=self.df.sample(frac=1).reset_index(drop=True)
self.cursor=0
def next(self,batch_size,need_all=False):
if need_all: # 完整遍历所有数据一轮,常test时用。
if self.cursor>=self.total_size:
self.shuffle()
self.loop+=1
else:
batch_size = min(batch_size, self.total_size - self.cursor)
else:
if self.cursor+batch_size>self.total_size:
self.shuffle()
self.loop += 1
res=self.df.ix[self.cursor:self.cursor+batch_size-1,:]
self.cursor+=batch_size
res_=np.zeros(shape=[batch_size,self.max_len],dtype=np.int32)
for idx,res_r in enumerate(res_):
# 少的pad,多的cut。
tmp_len=min(self.max_len,res["sentence_length"].values[idx])
res_r[:tmp_len]=res["sentence"].values[idx][:tmp_len]
return res["id"].values,res_,res["label"].values,res["sentence_length_"].values
class BucketedDataIterator(object):
def __init__(self, loadPath,vocab2intPath,num_buckets=5):
indices, sentences, labels = readNewFile(file=loadPath, vocab2intPath=vocab2intPath)
num_words = [len(sentence) for sentence in sentences]
self.df = pd.DataFrame({"id": indices, "sentence": sentences, "label": labels,
"sentence_length": num_words})
df=self.df.sort_values("sentence_length").reset_index(drop=True)
self.total_size=len(df)
part_size=self.total_size//num_buckets
self.dfs=[]
for i in range(num_buckets):
self.dfs.append(df.ix[i*part_size:(i+1)*part_size-1])
self.dfs[num_buckets-1].append(df.ix[num_buckets*part_size:self.total_size-1])
self.num_buckets=num_buckets
self.cursor=np.array([0]*num_buckets)
self.p_list=[1/self.num_buckets]*self.num_buckets
self.loop=0
self.max_len=general_config.max_seq_len
self.shuffle()
def shuffle(self):
for i in range(self.num_buckets):
self.dfs[i]=self.dfs[i].sample(frac=1).reset_index(drop=True)
self.cursor[i]=0
def next(self,batch_size,need_all=False):
for i in range(self.num_buckets):
if need_all:
if self.cursor[i]>=len(self.dfs[i]):
self.p_list[i]=0
else:
if self.cursor[i]+batch_size>len(self.dfs[i]):
self.p_list[i] = 0
if sum(self.p_list) == 0:
self.shuffle()
self.loop += 1
self.p_list = [1 / self.num_buckets] * self.num_buckets
else:
times = 1 / sum(self.p_list)
self.p_list = [times * p for p in self.p_list]
selected=np.random.choice(a=np.arange(self.num_buckets),size=1,p=self.p_list)[0]
if need_all:
batch_size=min(batch_size,len(self.dfs[selected])-self.cursor[selected])
res=self.dfs[selected].ix[self.cursor[selected]:self.cursor[selected]+batch_size-1,:]
self.cursor[selected]+=batch_size
tmp_max_len=max(res["sentence_length"].values)
max_len=min(tmp_max_len,self.max_len)
res_=np.zeros(shape=[batch_size,max_len],dtype=np.int32)
for idx,res_r in enumerate(res_):
# 少的pad,多的cut。
tmp_len=min(max_len,res["sentence_length"].values[idx])
res_r[:tmp_len]=res["sentence"].values[idx][:tmp_len]
return res["id"].values,res_,res["label"].values,res["sentence_length"].values
def ensure_dir_exist(dir):
if not os.path.exists(dir):
os.makedirs(dir)
return dir
def WriteToSubmission(res,fileName):
fileDir=os.path.dirname(fileName)
ensure_dir_exist(fileDir)
tmp=pd.DataFrame(res,columns=["id","label"])
tmp=tmp.sort_values(by="id",axis=0,ascending=True)
tmp.to_csv(fileName,index=False)
"""
将单词列表形式的句子转为句子列表形式的文档,
以"."、"?"、"!"为句子分隔符。
"""
def sentence2doc(words,v2i=None):
if v2i is None:
selected=[".","?","!"]
else:
selected=[v2i["."],v2i["?"],v2i["!"]]
doc=[]
sentence=[]
for word in words:
sentence.append(word)
if word in selected:
doc.append(sentence)
sentence=[]
if len(sentence)>0:
doc.append(sentence)
if len(doc)==0:
print(words)
return doc
class BucketedDataIteratorForDoc(object):
def __init__(self, loadPath,vocab2intPath,num_buckets=5):
indices, sentences, labels = readNewFile(file=loadPath, vocab2intPath=vocab2intPath)
v2i=loadDict(vocab2intPath)
docs=[]
num_sentences=[]
num_words=[]
num_words_flat=[]
for sentence in sentences:
doc=sentence2doc(sentence,v2i)
docs.append(doc)
num_sentences.append(len(doc))
num_words_=[len(_) for _ in doc]
num_words.append(num_words_)
num_words_flat.extend(num_words_)
# print(max(num_sentences))
# print(max(num_words_flat))
# print(num_words[:5])
self.df = pd.DataFrame({"id": indices, "doc":docs, "label": labels,
"doc_length": num_sentences,"sentence_length":num_words})
df=self.df.sort_values("doc_length").reset_index(drop=True)
self.total_size=len(df)
part_size=self.total_size//num_buckets
self.dfs=[]
for i in range(num_buckets):
self.dfs.append(df.ix[i*part_size:(i+1)*part_size-1])
self.dfs[num_buckets-1].append(df.ix[num_buckets*part_size:self.total_size-1])
self.num_buckets=num_buckets
self.cursor=np.array([0]*num_buckets)
self.p_list=[1/self.num_buckets]*self.num_buckets
self.loop=0
self.shuffle()
def shuffle(self):
for i in range(self.num_buckets):
self.dfs[i]=self.dfs[i].sample(frac=1).reset_index(drop=True)
self.cursor[i]=0
def next(self,batch_size,need_all=False):
for i in range(self.num_buckets):
if need_all:
if self.cursor[i]>=len(self.dfs[i]):
self.p_list[i]=0
else:
if self.cursor[i]+batch_size>len(self.dfs[i]):
self.p_list[i] = 0
if sum(self.p_list) == 0:
self.shuffle()
self.loop += 1
self.p_list = [1 / self.num_buckets] * self.num_buckets
else:
times = 1 / sum(self.p_list)
self.p_list = [times * p for p in self.p_list]
selected=np.random.choice(a=np.arange(self.num_buckets),size=1,p=self.p_list)[0]
if need_all:
batch_size=min(batch_size,len(self.dfs[selected])-self.cursor[selected])
res=self.dfs[selected].ix[self.cursor[selected]:self.cursor[selected]+batch_size-1,:]
self.cursor[selected]+=batch_size
max_doc_len=np.max(res["doc_length"].values)
sentence_length_flat=[]
for l in res["sentence_length"].values:
sentence_length_flat.extend(l)
max_sen_len=np.max(sentence_length_flat)
res_=np.zeros(shape=[batch_size,max_doc_len,max_sen_len],dtype=np.int32)
res_sen_len=np.zeros(shape=[batch_size,max_doc_len],dtype=np.int32)
for b in range(batch_size):
doc_len=res["doc_length"].values[b]
for d in range(doc_len):
sen_len=res["sentence_length"].values[b][d]
# 少的pad。
res_[b,d,:sen_len]=res["doc"].values[b][d]
res_sen_len[b,d]=res["sentence_length"].values[b][d]
# res_=np.reshape(res_,newshape=(batch_size,-1))
# print(res_.shape)
# print(res_sen_len.shape)
return res["id"].values,res_,res["label"].values,res["doc_length"].values,res_sen_len
def my_logger(logging_path):
# 生成日志
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
logger.handlers = []
assert len(logger.handlers) == 0
handler = logging.FileHandler(logging_path)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# console.setFormatter(formatter)
logger.addHandler(handler)
logger.addHandler(console)
return logger | [
"wslc1314@gmail.com"
] | wslc1314@gmail.com |
24eca002962dcec2c9b5fda770719d430b59cf20 | 919afc2e687d72b11f51619bd1c05aec7cd48654 | /app.py | dcd7a9434e4d0bb57d2ce95adc3da2e19a8b4ef3 | [] | no_license | himanshu0137/dataPeaceAPI | 6c288a9a25c8878100f3badf531030b268d4a413 | 2dcd701923ab7b94f001da61df40d1b570d2b160 | refs/heads/master | 2021-07-23T04:11:58.113913 | 2020-03-04T19:57:35 | 2020-03-04T19:57:35 | 244,989,142 | 0 | 0 | null | 2021-03-20T03:03:18 | 2020-03-04T19:44:28 | Python | UTF-8 | Python | false | false | 437 | py | from flask_api import FlaskAPI
from controllers import Routes
from dal import initDb
# Initializing Flask API
app = FlaskAPI(__name__, instance_relative_config=True)
app.config.from_pyfile('config.py', silent=True)
# Created Blueprint to separate modules from main file
for route in Routes:
app.register_blueprint(route, url_prefix=f'/api/{route.name}')
#Initializing DataBase
initDb(app)
if __name__ == "__main__":
app.run() | [
"himanshu.bansal@quovantis.com"
] | himanshu.bansal@quovantis.com |
0a01687df2c089a195511f68aa806930a226fa5e | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /gaussiana/ch3_2019_08_28_17_27_12_718860.py | 40dd7d3f989b5e4d580c39c5026bf8cd7af9b70f | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | import math
def calcula_gaussiana(x,mi,sigma):
expo=(-0.5)*((x-mi)/sigma)**2
f=(1/(sigma*(2*pi)**0.5))*exp(expo)
return f
| [
"you@example.com"
] | you@example.com |
445c279e9840d42246cf9d5793b71853026baf52 | f00eb2b355aa5b24a6b931c32ba9736de95d18c3 | /programs/basic_joystick/lib/sqrt.py | c47c71fd61ce64506c36b27f10cf6bfb42b3d772 | [] | no_license | toomone/orb9k_circuitpython | edb9ffd20c620444bcee6a739d0786b97015602d | 4c505a955162614056408b6820afe203eb6934b5 | refs/heads/master | 2023-02-18T14:51:11.679817 | 2021-01-24T20:40:06 | 2021-01-24T20:40:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | # from https://codegolf.stackexchange.com/questions/85555/the-fastest-square-root-calculator so we don't have to import math
def sqrt(n):
# 3 iterations of newton's method, hard-coded
# normalize
# find highest bit
highest = 1
sqrt_highest = 1
while highest < n:
highest <<= 2
sqrt_highest <<= 1
n /= highest+0.0
result = (n/4) + 1
result = (result/2) + (n/(result*2))
result = (result/2) + (n/(result*2))
return result*sqrt_highest
| [
"vbputz@gmail.com"
] | vbputz@gmail.com |
fc2a32429ee94a2b10f1abdb867af943035ad10e | be0ed66aff9e77785421f7631bea94c7c6994331 | /python/669_TrimaBinarySearchTree.py | 729649148b722d53b16a1c93af9713eff3a17758 | [] | no_license | FLameSunRisE/leetcode | adf91c386686768337c855381274c227862c26ec | 8f7fc41bfaf1c86c19075be83f135449c6d7fe07 | refs/heads/master | 2023-08-16T18:44:42.633045 | 2023-08-07T09:36:06 | 2023-08-07T09:36:06 | 167,931,797 | 0 | 3 | null | 2021-02-13T03:12:16 | 2019-01-28T09:08:55 | Python | UTF-8 | Python | false | false | 2,124 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def stringToTreeNode(input):
input = input.strip()
input = input[1:-1]
if not input:
return None
inputValues = [s.strip() for s in input.split(',')]
root = TreeNode(int(inputValues[0]))
nodeQueue = [root]
front = 0
index = 1
while index < len(inputValues):
node = nodeQueue[front]
front = front + 1
item = inputValues[index]
index = index + 1
if item != "null":
leftNumber = int(item)
node.left = TreeNode(leftNumber)
nodeQueue.append(node.left)
if index >= len(inputValues):
break
item = inputValues[index]
index = index + 1
if item != "null":
rightNumber = int(item)
node.right = TreeNode(rightNumber)
nodeQueue.append(node.right)
return root
def treeNodeToString(root):
if not root:
return "[]"
output = ""
queue = [root]
current = 0
while current != len(queue):
node = queue[current]
current = current + 1
if not node:
output += "null, "
continue
output += str(node.val) + ", "
queue.append(node.left)
queue.append(node.right)
return "[" + output[:-2] + "]"
class Solution:
def trimBST(self, root: 'TreeNode', L: 'int', R: 'int') -> 'TreeNode':
if not root:
return root
if root.val < L:
return self.trimBST(root.right, L, R)
if root.val > R:
return self.trimBST(root.left, L, R)
root.left = self.trimBST(root.left, L, R)
root.right = self.trimBST(root.right, L, R)
return root
def main():
root = stringToTreeNode("[1,0,2]")
L = 1
R = 2
ret = Solution().trimBST(root, L, R)
out = treeNodeToString(ret)
print(out)
if __name__ == '__main__':
main()
| [
"flamesunrises@gmail.com"
] | flamesunrises@gmail.com |
b52b272677a9e03f4419a4ff839ac72a88d3320a | 0b9c154323ffe73679d28abae0a449140ebea074 | /main.py | 7170ea278c8ac1919bcb1a295b5f31002285dac6 | [] | no_license | AkshachRd/dwh-edu-task | c4b43ffbcf4886431c58d9156ab5d2e7ece057f6 | b3e3995474408b8e43a603b403bb086167402ae7 | refs/heads/master | 2023-06-21T11:39:29.316233 | 2021-08-05T05:44:40 | 2021-08-05T05:44:40 | 381,762,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | from dotenv import load_dotenv
import requests
from os import environ
load_dotenv()
CURRENCY_API_KEY = environ.get("CURRENCY_API_KEY")
url = "https://currency-converter5.p.rapidapi.com/currency/convert"
querystring = {"format": "json", "from": "USD", "to": "RUB, EUR, CNY", "amount": "1"}
headers = {
'x-rapidapi-key': CURRENCY_API_KEY,
'x-rapidapi-host': "currency-converter5.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text) | [
"dr.chashka@gmail.com"
] | dr.chashka@gmail.com |
bfebf9a045f1ac38ea25c1b9ab4fa817a6e389bd | ec4f4aa5e22131bb094e6afc5af35dd37d68d3df | /python-flask/swagger_server/models/egress_mapping_schema.py | 5cf5410b16a8a833feba168862c1df63e77c77e2 | [] | no_license | ajragusa/OpenConfigAPI | e4224212dac3fb125ebff2ebedda930c9c979e71 | 485da3b2b96d568f857ccc931a86d7e7e9f3cab4 | refs/heads/master | 2020-05-29T23:01:05.287841 | 2019-05-30T17:16:39 | 2019-05-30T17:16:39 | 189,425,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,272 | py | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server.models.vlan_logical_egress_mapping_config import VlanLogicalEgressMappingConfig # noqa: F401,E501
from swagger_server import util
class EgressMappingSchema(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, state: VlanLogicalEgressMappingConfig=None, config: VlanLogicalEgressMappingConfig=None): # noqa: E501
"""EgressMappingSchema - a model defined in Swagger
:param state: The state of this EgressMappingSchema. # noqa: E501
:type state: VlanLogicalEgressMappingConfig
:param config: The config of this EgressMappingSchema. # noqa: E501
:type config: VlanLogicalEgressMappingConfig
"""
self.swagger_types = {
'state': VlanLogicalEgressMappingConfig,
'config': VlanLogicalEgressMappingConfig
}
self.attribute_map = {
'state': 'state',
'config': 'config'
}
self._state = state
self._config = config
@classmethod
def from_dict(cls, dikt) -> 'EgressMappingSchema':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The EgressMappingSchema of this EgressMappingSchema. # noqa: E501
:rtype: EgressMappingSchema
"""
return util.deserialize_model(dikt, cls)
@property
def state(self) -> VlanLogicalEgressMappingConfig:
"""Gets the state of this EgressMappingSchema.
State for engress VLAN stack behaviors for packets that are destined for output via this subinterface. # noqa: E501
:return: The state of this EgressMappingSchema.
:rtype: VlanLogicalEgressMappingConfig
"""
return self._state
@state.setter
def state(self, state: VlanLogicalEgressMappingConfig):
"""Sets the state of this EgressMappingSchema.
State for engress VLAN stack behaviors for packets that are destined for output via this subinterface. # noqa: E501
:param state: The state of this EgressMappingSchema.
:type state: VlanLogicalEgressMappingConfig
"""
self._state = state
@property
def config(self) -> VlanLogicalEgressMappingConfig:
"""Gets the config of this EgressMappingSchema.
Configuration for egress VLAN stack behaviors for packets that are destined for output via this subinterface. # noqa: E501
:return: The config of this EgressMappingSchema.
:rtype: VlanLogicalEgressMappingConfig
"""
return self._config
@config.setter
def config(self, config: VlanLogicalEgressMappingConfig):
"""Sets the config of this EgressMappingSchema.
Configuration for egress VLAN stack behaviors for packets that are destined for output via this subinterface. # noqa: E501
:param config: The config of this EgressMappingSchema.
:type config: VlanLogicalEgressMappingConfig
"""
self._config = config
| [
"aragusa@globalnoc.iu.edu"
] | aragusa@globalnoc.iu.edu |
57a99dee4b6da30f4e1c4ad270261ca0246f6c77 | fe5ed850257cc8af4df10de5cffe89472eb7ae0b | /小小的Python编程 源代码/b代码/bak_code2-32/Chapter16/16.1queue.py | 3c1c6d17d437d81edd5a203eb016cec33806edb3 | [] | no_license | hujianli94/Python-code | a0e6fe6362868407f31f1daf9704063049042d9e | fe7fbf59f1bdcbb6ad95a199262dd967fb04846c | refs/heads/master | 2020-09-13T01:11:34.480999 | 2019-11-19T05:29:59 | 2019-11-19T05:29:59 | 222,614,662 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | #队列演示
import queue #引入队列模块
line=queue.Queue() #创建队列
if line.empty(): #测试队列是否为空
print("队列line为空")
#向队列放入元素
for person in range(50):
line.put("路人"+str(person))
line.put("小小")
line.put("爸爸")
#get()示例
x=line.get()
print(x)
x=line.get()
print(x)
#取出并依次打印队列里的元素
person=[]
for i in range(line.qsize()):
person.append(line.get())
print(person)
| [
"1879324764@qq.com"
] | 1879324764@qq.com |
5415155cfc7c687c70d6bc97f0e21bd94815c8e4 | f69030f82976394dbb1a9337658b065f02f4b307 | /roles/copy_directories/files/load_testing_python_scripts/random_data_generator/source/random_data_generator.py | cfb4357c2b8f45a6447710d0a6b452c966f817b5 | [] | no_license | RGirard94/TimescaleDB_Test | 644070d9921de8a4eb53920c4f636f55d649e594 | 0db912032dc4680fc580f5cc347d0b5222d70285 | refs/heads/master | 2020-06-30T15:19:12.584952 | 2019-08-09T12:51:36 | 2019-08-09T12:51:36 | 200,869,589 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,033 | py | # coding: utf-8
import argparse
import configparser
import datetime
from datetime import date
import string
import random
import os
import shutil
import logging
# CREATE LOGGERS #
LOG_FORMAT = '%(asctime)s :: %(levelname)s :: %(message)s'
RR_INTERVAL = 'RrInterval'
MOTION_ACCELEROMETER = 'MotionAccelerometer'
MOTION_GYROSCOPE = 'MotionGyroscope'
TIMESTAMP_PROBABLE_INTERVAL_BY_MEASUREMENT_DICT = {
RR_INTERVAL: [200, 400],
MOTION_ACCELEROMETER: [10, 30],
MOTION_GYROSCOPE: [10, 30]
}
RANDOM_STRING_PARAMETER = {
RR_INTERVAL: [1, '{:.0f}', ''],
MOTION_ACCELEROMETER: [3, '{:.8f}', '2G'],
MOTION_GYROSCOPE: [3, '{:.6f}', '']
}
USER = {
'SIZE': [8, 4, 4, 4, 12],
'CHAR_SET': string.ascii_lowercase + string.digits,
'SEPARATOR': '-'
}
DEVICE = {
'SIZE': [2, 2, 2, 2, 2, 2],
'CHAR_SET': string.ascii_uppercase + string.digits,
'SEPARATOR': ':'
}
def string_generator(size: int, chars: str) -> str:
"""
Function generating random strings
:param size: Output string size
:param chars: set of value from which the value is randomly chosen
:return: random string
"""
return ''.join(random.choice(chars) for _ in range(size))
def timestamp_generator(number_of_data: int, measurement_type: str, logs: bool, year=date.today().year, month=date.today().month,
day=date.today().day, delta=1) -> list:
"""
Function generating a list of timestamp
:param number_of_data: number of generated data
:param measurement_type: type of measurement, can be either RR_INTERVAL, MOTION_ACCELEROMETER or MOTION_GYROSCOPE
:param year: chosen year for date
:param month: chosen month for date
:param day: chosen day for date
:param delta: random time computed in this interval
:return: list of timestamp
"""
# CREATE LOGGER FOR DEBUG ABOUT TIMESTAMP #
logger_timestamp = logging.getLogger('timestamp_debug')
if logs:
logger_timestamp.setLevel(logging.DEBUG)
else:
logger_timestamp.setLevel(logging.INFO)
stream_handler_timestamp = logging.StreamHandler()
stream_handler_timestamp.setLevel(logging.DEBUG)
stream_handler_timestamp.setFormatter(logging.Formatter(LOG_FORMAT))
logger_timestamp.addHandler(stream_handler_timestamp)
if number_of_data > 0:
logger_timestamp.info('list of {} timestamp to create'.format(number_of_data))
start_timestamp = (datetime.datetime(year, month, day) + random.random() * datetime.timedelta(days=delta))
logger_timestamp.debug('timestamp number 1 : {}'.format(start_timestamp))
timestamp_list = list()
timestamp_list.append(start_timestamp)
timestamp = start_timestamp
timedelta_lower_bound = TIMESTAMP_PROBABLE_INTERVAL_BY_MEASUREMENT_DICT[measurement_type][0]
timedelta_upper_bound = TIMESTAMP_PROBABLE_INTERVAL_BY_MEASUREMENT_DICT[measurement_type][1]
for i in range(number_of_data - 1):
timestamp = timestamp + datetime.timedelta(
milliseconds=random.randint(timedelta_lower_bound, timedelta_upper_bound))
timestamp_list.append(timestamp)
logger_timestamp.debug('timestamp number {} : {}'.format(i+2, timestamp))
timestamp_list = [i.strftime('%Y-%m-%dT%H:%M:%S.%f')[0:-3] for i in timestamp_list]
logger_timestamp.removeHandler(stream_handler_timestamp)
return timestamp_list
else:
logger_timestamp.warning("No timestamp needed or erroneous number of data : {}".format(number_of_data))
logger_timestamp.removeHandler(stream_handler_timestamp)
return []
def persist_file_to_disk(user_id: str, timestamp: str, build_str: str, measure_type: str):
"""
Function storing string in a JSON file
:param user_id: user's id
:param timestamp: generated timestamp for the user
:param build_str: JSON string
:param measure_type: chosen field type (RrInterval, MotionGyroscope, MotionAccelerometer)
:return: JSON file's name. The main goal is to store the string in a JSON file.
"""
json_file_name = user_id + '_' + measure_type + '_' + timestamp.replace(':', '').replace('.', '') + ".json"
json_file_name = json_file_name.replace('"', '')
with open(json_file_name, 'w') as outfile:
outfile.write(build_str)
shutil.move(json_file_name, files_processing_paths["generated_files_directory"] + json_file_name)
logger_process.info("\nfile {} moved in directory {}\n".format(json_file_name, files_processing_paths["generated_files_directory"]))
def generate_random_string(string_type: dict) -> str:
"""
Function generating a random string based on sub sequences whose size are stored in a list
:param string_type: dictionary with following shape
EXAMPLE = {
'SIZE': [2, 2, 2, 2],
'CHAR_SET': string.ascii_lowercase + string.digits,
'SEPARATOR': '-'
}
:return: string with following shape "l5-23-kj-9m"
"""
if string_type['SIZE'] != [] and not all(v == 0 for v in string_type['SIZE']):
random_string = '"'
for value in string_type['SIZE']:
random_string = random_string + string_generator(size=value, chars=string_type['CHAR_SET']) + string_type['SEPARATOR']
random_string = random_string[0:-1]
random_string = random_string + '"'
return random_string
else:
print("{} is empty or only has sub sequences with length 0".format(string_type['SIZE']))
return '""'
def generate_random_data_point(measurement: str, timestamp: str, logs: bool) -> str:
"""
Function generating a random data point function of measurement
:param measurement: RrInterval, MotionGyroscope or MotionAccelerometer
:param timestamp:
:return: a random data point
"""
# CREATE LOGGER FOR DEBUG ABOUT DATA #
logger_data = logging.getLogger('data_debug')
if logs:
logger_data.setLevel(logging.DEBUG)
else:
logger_data.setLevel(logging.INFO)
stream_handler_data = logging.StreamHandler()
stream_handler_data.setLevel(logging.DEBUG)
stream_handler_data.setFormatter(logging.Formatter(LOG_FORMAT))
logger_data.addHandler(stream_handler_data)
if measurement in (RR_INTERVAL, MOTION_ACCELEROMETER, MOTION_GYROSCOPE):
data = '"'
data = data + timestamp + " "
for i in range(RANDOM_STRING_PARAMETER[measurement][0]):
if measurement == RR_INTERVAL:
random_value = random.randint(300, 2000)
else:
random_value = random.uniform(-2, 2)
data = data + str(RANDOM_STRING_PARAMETER[measurement][1].format(random_value)) + " "
if RANDOM_STRING_PARAMETER[measurement][2] != '':
data = data + RANDOM_STRING_PARAMETER[measurement][2] + " "
data = data[0:-1] + '"'
logger_data.debug('{} created'.format(data))
logger_data.removeHandler(stream_handler_data)
return data
else:
raise ValueError("Unknown measurement name : ", measurement)
def build_signal_to_data_points_count_dict(nb_data_rr: str, nb_data_ma: str, nb_data_mg: str) -> dict:
"""
Function converting lists of strings in lists of integer
:param nb_data_rr: Number of data AND Number of file for RrInterval 8000
:param nb_data_ma: Number of data AND Number of file for MotionAccelerometer 7000
:param nb_data_mg: Number of data AND Number of file for MotionGyroscope 6000
:return: dictionary with measurement as key, number of data as for_loop[key]
"""
signal_to_data_points_count_dict = {
RR_INTERVAL: nb_data_rr,
MOTION_ACCELEROMETER: nb_data_ma,
MOTION_GYROSCOPE: nb_data_mg
}
return signal_to_data_points_count_dict
def generate_data_files(requirement_dict: dict, logs: bool):
"""
Function creating a directory containing JSON files
:param requirement_dict: dictionary with measurement as key and amount of data per measurement as value
:return: directory with JSON files containing data. Each file contains 5000 data
"""
# Creates random user and device
user_id = generate_random_string(USER)
device_address = generate_random_string(DEVICE)
logger_process.info('\n### start process for USER : {} and DEVICE {} ###\n'.format(user_id, device_address))
for measurement in requirement_dict:
if requirement_dict[measurement] > 0:
logger_process.info('\t # measurement {} requires {} data'.format(measurement, requirement_dict[measurement]))
# Creates a list of timestamps for the measurement
starting_date = config["Data Generation Starting Date"]
timestamp_list = timestamp_generator(requirement_dict[measurement], measurement, logs, int(starting_date["year"]),
int(starting_date["month"]), int(starting_date["day"]))
# Variables created to split create a JSON file every 5000 data
nb_data_per_file = config["Number Of Data Per Files"]
quotient, rest = requirement_dict[measurement] // int(nb_data_per_file["nb_data"]), requirement_dict[
measurement] % int(nb_data_per_file["nb_data"])
logger_process.info('\t{} file(s) with {} data and 1 file with {} data'.format(quotient, nb_data_per_file["nb_data"],
rest))
for i in range(quotient):
build_str = '{"user":' + user_id + ',"type":"' + measurement + '","device_address":' + device_address + ',"data":['
for j in range(int(nb_data_per_file["nb_data"])):
build_str = build_str + generate_random_data_point(measurement, timestamp_list[
i * int(nb_data_per_file["nb_data"]) + j], logs) + ','
build_str = build_str[0:-1] + ']}'
persist_file_to_disk(user_id, timestamp_list[i * int(nb_data_per_file["nb_data"])],
build_str, measurement)
# Creates a file containing the remaining data if exist
if rest != 0:
build_str = '{"user":' + user_id + ',"type":"' + measurement + '","device_address":' + device_address + ',"data":['
for j in range(rest):
build_str = build_str + generate_random_data_point(measurement, timestamp_list[
quotient * int(nb_data_per_file["nb_data"]) + j], logs) + ','
build_str = build_str[0:-1] + ']}'
persist_file_to_disk(user_id, timestamp_list[quotient * int(nb_data_per_file["nb_data"])],
build_str, measurement)
else:
print('No data required for measurement {}'.format(measurement))
def convert_hours_to_number_of_data_points(hours: int) -> int:
return {RR_INTERVAL: hours*60*70, MOTION_ACCELEROMETER: hours*60*60*50, MOTION_GYROSCOPE: hours*60*60*50}
if __name__ == "__main__":
RR_INTERVAL_DATA_POINTS_COUNT = 'RrInterval_datapoints_count'
MOTION_ACC_DATA_POINTS_COUNT = 'MotionAcc_datapoints_count'
MOTION_GYR_DATA_POINTS_COUNT = 'MotionGyr_datapoints_count'
NB_HOURS_OF_GENERATED_DATA_POINTS = 'Hours_of_data_points'
NB_OF_USERS = 'Nb_of_users'
ap = argparse.ArgumentParser()
ap.add_argument('-rr', '--' + RR_INTERVAL_DATA_POINTS_COUNT, type=int, required=False, default=0,
help="number of data point(s) for RrInterval")
ap.add_argument('-ma', '--' + MOTION_ACC_DATA_POINTS_COUNT, type=int, required=False, default=0,
help="number of data point(s) for MotionAccelerometer")
ap.add_argument('-mg', '--' + MOTION_GYR_DATA_POINTS_COUNT, type=int, required=False, default=0,
help="number of data point(s) for MotionGyroscope")
ap.add_argument('-hr', '--' + NB_HOURS_OF_GENERATED_DATA_POINTS, type=int, required=False, default=0,
help="number of hour(s) during which data have been generated")
ap.add_argument('-nbu', '--' + NB_OF_USERS, type=int, required=False, default=0,
help="number of users")
args = vars(ap.parse_args())
config = configparser.ConfigParser()
config.read('/opt/docker-data/tests/load_testing_python_scripts/random_data_generator/source/config.conf')
files_processing_paths = config["Paths"]
if os.path.exists(files_processing_paths["generated_files_directory"]):
shutil.rmtree(files_processing_paths["generated_files_directory"])
os.makedirs(files_processing_paths["generated_files_directory"])
else:
os.makedirs(files_processing_paths["generated_files_directory"])
# CREATE LOGGER FOR INFO ABOUT PROCESSING #
logger_process = logging.getLogger('process_info')
logger_process.setLevel(logging.INFO)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
logger_process.addHandler(stream_handler)
hours_of_generated_data = convert_hours_to_number_of_data_points(args[NB_HOURS_OF_GENERATED_DATA_POINTS])
# PROCESS #
"""data_points_requirement = build_signal_to_data_points_count_dict(args[RR_INTERVAL_DATA_POINTS_COUNT],
args[MOTION_ACC_DATA_POINTS_COUNT],
args[MOTION_GYR_DATA_POINTS_COUNT])"""
for i in range(args[NB_OF_USERS]):
data_points_requirement = build_signal_to_data_points_count_dict(hours_of_generated_data[RR_INTERVAL],
hours_of_generated_data[MOTION_ACCELEROMETER],
hours_of_generated_data[MOTION_GYROSCOPE])
generate_data_files(data_points_requirement, config.getboolean('Logs', 'bool'))
| [
"rgirard@octo.com"
] | rgirard@octo.com |
d3f18aa4bdaa8a91e26996c66b3d19356a8227f1 | fa8fc38ad86ee96810fcf4ca8d5cc873ef5cdf9c | /settings.py | 7bb05380cb0f864ae42d3b545962f053be575386 | [] | no_license | pekkajauhi/alien_invaders | dfa7c4e5bb635485c999f3e9d4012943e43d4abd | f40e268d65734064b168af4a1a34d2c3ed3cd72f | refs/heads/master | 2020-06-22T22:02:10.865968 | 2019-07-23T13:17:03 | 2019-07-23T13:17:03 | 198,410,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | class Settings():
"""A class to store all settings for Alien Invasion"""
def __init__(self):
"""Initialize the game's static settings."""
# Screen settings
self.screen_width = 1200
self.screen_height = 600
#self.bg_color = (230,230,230)
self.bg_color = (0,0,25)
# Ship settings
self.ship_limit = 1
# Bullet settings
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = 60,60,60
self.bullets_allowed = 3
# Alien settings
self.fleet_drop_speed = 10
# How quickly the game speeds up
self.speedup_scale = 1.1
# How quickly alien point values increase
self.score_scale = 1.5
self.initialize_dynamic_settings()
def initialize_dynamic_settings(self):
"""Initialize settings that change throughout the game."""
self.ship_speed_factor = 1.5
self.bullet_speed_factor = 3
self.alien_speed_factor = 1
# Scoring
self.alien_points = 50
# fleet direction of 1 represents right; -1 represents left.
self.fleet_direction = 1
def increase_speed(self):
"""Increase speed settings and alien point values."""
self.ship_speed_factor *= self.speedup_scale
self.bullet_speed_factor *= self.speedup_scale
self.alien_speed_factor *= self.speedup_scale
self.alien_points = int(self.alien_points * self.score_scale)
| [
"pekkajauh@gmail.com"
] | pekkajauh@gmail.com |
12d00efdb514f1d4ed0ee3a133482a16b185c430 | fd001df16f819f657526c4824bc77aae8c368f0b | /vip/main.py | 6f6853003391a49cd76ae3c4ff7ea145546a812e | [] | no_license | juntalis/vip | c852d8a2f1688d827d48f8a0b6a304786fdf6a1a | 62318661ac5c1297a01add3b584bffd45ab56891 | refs/heads/master | 2020-12-28T22:22:07.250997 | 2012-12-10T02:23:23 | 2012-12-10T02:23:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,587 | py | # -*- coding: utf-8 -*-
import argparse
import contextlib
import sys
import vip
from vip import core
@contextlib.contextmanager
def protect_from_VipError():
try:
yield
except core.VipError as e:
core.logger.exception("fatal: " + str(e))
def create_argument_parser():
usage = """
%(prog)s command ...
%(prog)s --init [directory]
%(prog)s --locate [directory]
"""
parser = argparse.ArgumentParser(description=vip.__doc__, usage=usage)
# Command execution
parser.add_argument('command', metavar='command', type=str, nargs='?',
help='an executable in .vip/bin directory')
parser.add_argument('arguments', type=str, nargs=argparse.REMAINDER,
help='arguments passed to a given command')
parser.add_argument('-i', '--init', dest="init", metavar="directory",
nargs="?", const=".",
help='initializes a brand new virtualenv in '
'given directory, using "." by default')
parser.add_argument('-l', '--locate', metavar="directory",
nargs="?", const=".",
help='shows where the .vip directory is')
parser.add_argument('-v', '--verbose', action='store_true',
help='verbose error messages')
parser.add_argument('-V', '--version', action='store_true',
help='prints version and exits')
return parser, parser.parse_args()
def main():
parser, args = create_argument_parser()
commands = ["init", "locate", "command"]
# Configure logger using --verbose option
core.logger.verbose = bool(args.verbose)
with protect_from_VipError():
if args.version:
sys.stdout.write("%s\n" % vip.VERSION)
sys.exit(0)
# Check for only one command
used_commands = [int(bool(getattr(args, cmd))) for cmd in commands]
if sum(used_commands) > 1:
parser.print_help()
elif args.init:
directory = core.create_virtualenv(args.init)
core.logger.info("Initialized virtualenv in %s" % directory)
elif args.locate:
sys.stdout.write(core.find_vip_directory(args.locate) + "\n")
elif args.command:
directory = core.find_vip_directory()
return_code = core.execute_virtualenv_command(
directory, args.command, args.arguments)
sys.exit(return_code)
else:
parser.print_help()
if __name__ == "__main__":
main()
| [
"dawid.fatyga@gmail.com"
] | dawid.fatyga@gmail.com |
a6443714e78c514f91288fa7d1228269064a5eb1 | 82a39bab1ce10e01739bc7dbd17d823662b89222 | /w9Main(지하철역 최단거리).py | f124feb9348a8b453ad1e7f93b6cc0288af402ef | [] | no_license | JeonHeeSang/p2_201611103 | 1658499dbc575f09ba95faab2e82167cc303cfc2 | d2fc35bf6da8907ffc868272d1f4c5cba4f0e61a | refs/heads/master | 2021-01-17T04:47:54.436893 | 2016-06-14T09:41:46 | 2016-06-14T09:41:46 | 54,961,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | import math
Locations=tuple()
myList=list()
(x1,y1)=(37.575765,126.973626)
Locations=[(37.576573,126.985536),(37.588826,126.944051),(37.574577,126.957754),(37.619012,126.921141),(37.571618,126.976551)]
mylist=list()
for i in Locations:
mylist.append(math.sqrt((x1-i[0])**2+(y1-i[1])**2))
print min(mylist)
input()
| [
"gmltkd1302@naver.com"
] | gmltkd1302@naver.com |
bfd141a0f30ebd17b92bff39da89b6b302f60c69 | e7ed154e504e60a48ba11cbde115c073036e6ffb | /booker/tickets_querier.py | 9fccac81c79176dbd2c692dea5a0804831aed697 | [
"MIT"
] | permissive | kgd1987/12306-auto-book | 1f6412dbf47210f13127b40c3b789ded5c5b3a39 | a2a876016f4826baf5d880fffca7f7260cc717dc | refs/heads/master | 2020-04-10T21:11:36.410275 | 2018-12-03T12:09:50 | 2018-12-03T12:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,943 | py | # -*- coding: utf-8 -*-
import requests
from booker.consts import urls
# An object that implements central inquiry funcitionality
class Querier():
def __init__(self, departure, destination, date, ticket_type = 'ADULT', **kwargs):
self.departure = departure
self.destination = destination
self.date = date
self.ticket_type = ticket_type if ticket_type != 'STUDENT' else '0X00'
self.details = kwargs
self.departure_code, self.destination_code = None, None
# get station-code
def get_sc(self):
res = requests.get(urls['sc_map'])
station_list = res.text.split("'")[1].split('@')
station_list.pop(0)
for s in station_list:
s_info = s.split('|')
if s_info[1] == self.departure:
self.departure_code = s_info[2]
elif s_info[1] == self.destination:
self.destination_code = s_info[2]
if self.destination_code and self.departure_code:
return True
return False
# query tickets
def query_tickets(self, session):
# the result that would be return
res = {
'ticket_type': self.ticket_type,
'departure': self.departure,
'departure_code': self.departure_code,
'destination': self.destination,
'destination_code': self.destination_code,
'date': self.date,
'details': self.details,
'items': []
}
params = {
'leftTicketDTO.train_date': self.date,
'leftTicketDTO.from_station': self.departure_code,
'leftTicketDTO.to_station': self.destination_code,
'purpose_codes': self.ticket_type
}
resp = session.get(urls['query_tickets'], params=params)
data = resp.json()['data']
items, maps = data['result'], data['map']
for i in items:
# item_info gonna be a list of infomation of a train
# 0 -> secret_key, 1->book, 2->train_uid, 3->train_id(or name, G40 for example),
i_info = i.split('|')
item = {
'train_num': i_info[2],
'train_id': i_info[3],
'schedual': f"from: {maps[i_info[6]]} to: {maps[i_info[7]]}, \
time: {i_info[8]} -- {i_info[9]}, duration: {i_info[10]}",
'depart_time': i_info[8],
'arrive_time': i_info[9],
'商务座': i_info[-5],
'一等座': i_info[-6],
'二等座': i_info[-7],
'硬座': i_info[-8],
'硬卧': i_info[-9],
'无座': i_info[-11],
'软卧': i_info[-14],
'其他': i_info[-15],
'could_buy': i_info[11],
'secret_str': i_info[0]
}
res['items'].append(item)
return res
| [
"tomatokillar@gmail.com"
] | tomatokillar@gmail.com |
15646181db241c6d6f3a9e9f418a536aa95263f0 | 433c8104a6a114fe5aa4f28ac3ea05ab21e258c7 | /web_api/yonyou/apis/enum.py | 42a6b7924d10d9393d463cca2e194b060a5babf7 | [
"MIT"
] | permissive | zhanghe06/flask_restful | dda5a77cc8f6d6ad3de4b86c337fded12faa0057 | 6ef54f3f7efbbaff6169e963dcf45ab25e11e593 | refs/heads/master | 2022-12-10T13:50:15.268373 | 2018-08-28T11:57:39 | 2018-08-28T11:57:39 | 139,790,305 | 2 | 2 | MIT | 2022-12-08T02:15:24 | 2018-07-05T03:27:38 | Python | UTF-8 | Python | false | false | 2,754 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: enum.py
@time: 2018-08-23 15:50
"""
from libs.mysql_orm_op import DbInstance
from web_api.databases.yonyou import db
from web_api.models.yonyou import EapEnum
db_instance = DbInstance(db)
def get_enum_row_by_id(enum_id):
"""
通过 id 获取信息
:param enum_id:
:return: None/object
"""
return db_instance.get_row_by_id(EapEnum, enum_id)
def get_enum_row(*args, **kwargs):
"""
获取信息
:param args:
:param kwargs:
:return: None/object
"""
return db_instance.get_row(EapEnum, *args, **kwargs)
def get_enum_rows(*args, **kwargs):
"""
获取列表
:param args:
:param kwargs:
:return:
"""
return db_instance.get_rows(EapEnum, *args, **kwargs)
def get_enum_limit_rows_by_last_id(last_pk, limit_num, *args, **kwargs):
"""
通过最后一个主键 id 获取最新信息列表
:param last_pk:
:param limit_num:
:param args:
:param kwargs:
:return:
"""
return db_instance.get_limit_rows_by_last_id(EapEnum, last_pk, limit_num, *args, **kwargs)
def add_enum(enum_data):
"""
添加信息
:param enum_data:
:return: None/Value of user.id
:except:
"""
return db_instance.add(EapEnum, enum_data)
def edit_enum(enum_id, enum_data):
"""
修改信息
:param enum_id:
:param enum_data:
:return: Number of affected rows (Example: 0/1)
:except:
"""
return db_instance.edit(EapEnum, enum_id, enum_data)
def delete_enum(enum_id):
"""
删除信息
:param enum_id:
:return: Number of affected rows (Example: 0/1)
:except:
"""
return db_instance.delete(EapEnum, enum_id)
def get_enum_pagination(page=1, per_page=10, *args, **kwargs):
"""
获取列表(分页)
Usage:
items: 信息列表
has_next: 如果本页之后还有超过一个分页,则返回True
has_prev: 如果本页之前还有超过一个分页,则返回True
next_num: 返回下一页的页码
prev_num: 返回上一页的页码
iter_pages(): 页码列表
iter_pages(left_edge=2, left_current=2, right_current=5, right_edge=2) 页码列表默认参数
:param page:
:param per_page:
:param args:
:param kwargs:
:return:
"""
rows = db_instance.get_pagination(EapEnum, page, per_page, *args, **kwargs)
return rows
def delete_enum_table():
"""
清空表
:return:
"""
return db_instance.delete_table(EapEnum)
def count_enum(*args, **kwargs):
"""
计数
:param args:
:param kwargs:
:return:
"""
return db_instance.count(EapEnum, *args, **kwargs)
| [
"zhang_he06@163.com"
] | zhang_he06@163.com |
1be3d3576a39cb6f1efb3f5ca688fbe06e2125ad | d1835b1fa65adea42b883fa234197b1aa5e2b8e1 | /usb_import.py | 4d90eb076acd732b0d908035556b764c8a83ede1 | [] | no_license | rafaeljegundo/xbowtocitec | 88b103fae1ac1c20993b4d8aab9fcfcd830e711e | a0d2bf834657b2646cfb24a9f5961a39c9dd67bb | refs/heads/master | 2020-05-17T13:06:12.124163 | 2011-07-01T14:51:23 | 2011-07-01T14:51:23 | 1,582,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | py | # -*- coding: cp1252 -*-
import time
import signal
import sys
import serial
ser = serial.Serial()
ser.port = 3 # May change from system to system. Validate to be sure.
ser.baudrate = 57600
if ser.isOpen():
ser.close()
ser.open()
else:
ser.open()
def decode(code):
deco = []
for a in code:
deco.append(ord(a))
return deco
def listen(b):
msg = b
while 1:
a = ser.read(1)
if a == '\x7E':
msg += a
return msg
else:
msg += a
return msg
def main():
d = open('messages.txt','w')
while True:
try:
b = ser.read(1)
if b == '\x7E':
print "7E found"
msg_array = decode(listen(b))
print map(hex,msg_array)
stringtofile = ""
for a in msg_array:
stringtofile += str(hex(a)) + " "
d.write(stringtofile + "\n")
except KeyboardInterrupt:
print "Bye"
print map(hex,msg_array)
ser.close()
d.close()
sys.exit()
if __name__ == '__main__':
main()
| [
"rafael.jegundo@gmail.com"
] | rafael.jegundo@gmail.com |
7802adee41606de0ec1ff1fc15514f63d6937454 | e354add7dd04582cc4227bcf95a34f097b3c5d34 | /coinaddress/networks/bitcoin.py | e57d26021823befb4b64e355a78c5d047c7ab4b3 | [
"MIT"
] | permissive | GDPR-gg/coinaddress | e9dab4cf0cec2a155b623d2e47080000b71b1fc5 | 09f95c8622992da2a3e634be95fc10114276b8eb | refs/heads/master | 2023-07-30T17:49:23.341617 | 2021-09-28T00:00:12 | 2021-09-28T00:00:12 | 411,075,512 | 0 | 0 | MIT | 2021-09-27T23:44:42 | 2021-09-27T23:44:42 | null | UTF-8 | Python | false | false | 137 | py | from .base import BaseNetwork
from .registry import registry
@registry.register('bitcoin', 'BTC')
class Bitcoin(BaseNetwork):
pass
| [
"roman@tolkachyov.name"
] | roman@tolkachyov.name |
04e7c53a7c383222f9c0e97164420b372bd0ed45 | c49196dc86e30512973782937a08501381790141 | /plugins/dice.py | e4aaeec7bfbc22be83ec2badde446593e6220747 | [] | no_license | atsumin/slack-todo | 5d2fa6d9910a236ef423eb90fc70d1676fdb377d | 26b33c6b40b13f9f1d9e09ec34543b4740629faf | refs/heads/master | 2022-11-30T02:20:48.897555 | 2020-08-16T09:54:19 | 2020-08-16T09:54:19 | 278,641,577 | 0 | 0 | null | 2020-08-16T09:54:20 | 2020-07-10T13:33:12 | Python | UTF-8 | Python | false | false | 2,484 | py | import random
from slackbot.bot import respond_to
from slackbot.bot import listen_to
def dice(num,m):
mess = ""
h = 0
for i in range(num):
f = random.randint(1,m)
h = h + f
if num == 1:
mess = "(" + str(f) + ")"
elif i == 0:
mess = "(" + str(f) + ","
elif i == num-1:
mess = mess+ str(f) + ")"
else:
mess = mess + str(f) + ","
mess = str(num) + "d" + str(m) + "=" + str(h) + mess
return mess
def dice_u(num):
messu = ""
mess1 = 0
mess2 = 0
mess3 = 0
mess4 = 0
mess5 = 0
mess6 = 0
for i in range(num):
f = random.randint(1,6)
if f == 1:
mess1 += 1
elif f == 2:
mess2 += 1
elif f == 3:
mess3 += 1
elif f == 4:
mess4 += 1
elif f == 5:
mess5 += 1
else:
mess6 += 1
messu = str(num) + "d6=" + \
" " + "`1`" + str(mess1)+ \
" " + "`2`" + str(mess2)+ \
" " + "`3`" + str(mess3)+ \
" " + "`4`" + str(mess4)+ \
" " + "`5`" + str(mess5)+ \
" " + "`6`" + str(mess6)
listu = messu.split()
string = "\n".join(listu)
return string
@respond_to(r'^dice\s(\d+)(d)(\d+)$')
def diceroll(message,roll,d,sty):
message.reply(dice(int(roll),int(sty)))
@respond_to(r'^dice\s(d)(\d+)$')
def diceroll_once(message,d,roll):
message.reply(dice(1,int(roll)))
@respond_to(r'^dice$')
def diceroll_unselected(message):
message.reply(dice(1,100))
@respond_to(r'^dice\s(u)\s(\d+)$')
def diceroll_utakaze(message,comm,roll):
message.reply(dice_u(int(roll)))
@respond_to(r'^dice\s(help)$')
def dice_help(message,comm):
msg = "\n〇Dice機能について使用可能なコマンド\n"\
"`dice (ダイスを振る回数)d(ダイスの面数)`\n"\
"これが正規の表現です。指定したとおりにダイスを振ります。この表現を省略した形が以下にあります\n"\
"`dice d(ダイスの面数)`\n"\
"ダイスを振る回数を省略すると、指定した面数のダイスを1回振ります\n"\
"`dice`\n"\
"単にdiceと入力すると100面ダイスを1回振ります\n"\
"`dice u (ダイスを振る回数)`\n"\
"指定されただけ6面ダイスを振り、その内訳をダイス目ごとに表示します。ウタカゼTRPGにどうぞ\n"
message.reply(msg)
| [
"{earthmiran@gmail.com}"
] | {earthmiran@gmail.com} |
bb416b08bb75ad5743c5818b2f526e869a3dc034 | f573bdebd7b3489a90f6031f529b91718d169950 | /crimson_forge/__init__.py | 09085deefb723dc2536bb99fab5029ed2cb3ec62 | [] | no_license | miralayipouya/crimson-forge | 67b6aa4a87815169ae6987ed5c90cde66308b709 | a4a2b5a8f7024aa17b1b5c93a1bcc9f3af7ded21 | refs/heads/master | 2023-04-23T20:50:30.742235 | 2021-05-19T20:11:11 | 2021-05-19T20:11:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,787 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# crimson_forge/__init__.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
__version__ = '0.4.0'
from .block import BasicBlock, DataBlock
from .instruction import Instruction
from .segment import ExecutableSegment
from .utilities import print_error, print_good, print_status, print_warning
| [
"zeroSteiner@gmail.com"
] | zeroSteiner@gmail.com |
ca105c2e0f4626dfd831c53a2fc9910355fb5b0b | 660c36d33a8d73b2a08d2bbd0b624dee56366843 | /PythonTest/flask_test.py | 2670170b06a1ee8c7914723aee82231d87bf54e0 | [] | no_license | mpp100579/test_case | 78dd68341643c354f744c32d19ed3f54dadb9c0a | 2c8062f73ed16d098770d9f45e08454b451c1c68 | refs/heads/master | 2020-04-13T08:31:11.018112 | 2019-07-05T06:41:44 | 2019-07-05T06:41:44 | 163,083,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,928 | py | # -*- coding:utf-8 -*-
'''
import flask
from flask import request
from flask import jsonify
import tools
import OP_db
import settings
# 创建一个服务,把当前这个python文件当做一个服务
server = flask.Flask(__name__)
# server.config['JSON_AS_ASCII'] = False
# @server.route()可以将普通函数转变为服务 登录接口的路径、请求方式
@server.route('/login', methods=['get'])
def login():
# 获取通过url请求传参的数据
username = request.values.get('name')
# 获取url请求传的密码,明文
pwd = request.values.get('pwd')
# 判断用户名、密码都不为空,如果不传用户名、密码则username和pwd为None
if username and pwd:
# 获取加密后的密码
password = tools.md5_pwd(pwd)
# 执行sql,如果查询的username和password不为空,说明数据库存在admin的账号
sql = 'select name,password from test where name= "%s" and password= "%s";' % (username, password)
# 从数据查询结果后,res返回是元组
res = OP_db.getconn(
host=settings.mysql_info['host'],
user=settings.mysql_info['user'],
passwd=settings.mysql_info['pwd'],
db=settings.mysql_info['db'],
port=settings.mysql_info['port'],
sql=sql
)
if res: # res的结果不为空,说明找到了username=admin的用户,且password为加密前的123456
resu = {'code': 200, 'message': '登录成功'}
return jsonify(resu) # 将字典转换为json串, json是字符串
else:
resu = {'code': -1, 'message': '账号/密码错误'}
return jsonify(resu)
else:
res = {'code': 999, 'message': '必填参数未填写'}
return jsonify(res)
if __name__ == '__main__':
server.run(debug=True, port=8888, host=127.0.0.1)
# 指定端口、host,0.0.0.0代表不管几个网卡,任何ip都可以访问
# coding:utf-8
import json
from urlparse import parse_qs
from wsgiref.simple_server import make_server
# 定义函数,参数是函数的两个参数,都是python本身定义的,默认就行了。
def application(environ, start_response):
# 定义文件请求的类型和当前请求成功的code
start_response('200 OK', [('Content-Type', 'text/html')])
# environ是当前请求的所有数据,包括Header和URL,body,这里只涉及到get
# 获取当前get请求的所有数据,返回是string类型
params = parse_qs(environ['QUERY_STRING'])
# 获取get中key为name的值
name = params.get('name', [''])[0]
no = params.get('no', [''])[0]
# 组成一个数组,数组中只有一个字典
dic = {'name': name, 'no': no}
return [json.dumps(dic)]
if __name__ == "__main__":
port = 5088
httpd = make_server("0.0.0.0", port, application)
print "serving http on port {0}...".format(str(port))
httpd.serve_forever()
'''
# coding:utf-8
import json
from wsgiref.simple_server import make_server
# 定义函数,参数是函数的两个参数,都是python本身定义的,默认就行了。
def application(environ, start_response):
# 定义文件请求的类型和当前请求成功的code
start_response('200 OK', [('Content-Type', 'application/json')])
# environ是当前请求的所有数据,包括Header和URL,body
request_body = environ["wsgi.input"].read(int(environ.get("CONTENT_LENGTH", 0)))
request_body = json.loads(request_body)
name = request_body["name"]
no = request_body["no"]
# input your method here
# for instance:
# 增删改查
dic = {'myNameIs': name, 'myNoIs': no}
return [json.dumps(dic)]
if __name__ == "__main__":
port = 6088
httpd = make_server("0.0.0.0", port, application)
print "serving http on port {0}...".format(str(port))
httpd.serve_forever()
| [
"2906211933@qq.com"
] | 2906211933@qq.com |
1b7cd915223a2064e1a33ecdbe508f602974a7cf | c1973f51924be8fb0a71c3556f4214ad6c769ad3 | /matrix_helper.py | b03db9a996ca4b03c477f35e9db404db030e8b1f | [] | no_license | brandonlogan/python_opengl_learning | ff27c3e94821bb142a510acf15dde0bd262316e3 | 56d403d103496f4e50b836c0e99505364ada859a | refs/heads/master | 2021-01-15T11:48:59.633146 | 2013-07-05T07:46:59 | 2013-07-05T07:46:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,819 | py | import glm
import math
import numpy
def translate(matrix, x, y, z):
translation = glm.types.mat4x4.identity()
translation.col3_vec4(glm.vec4(x, y, z, 1))
return translation.mul_mat(matrix)
def scale(matrix, x, y, z):
scaling = glm.mat4x4.identity()
scaling.i00 = x
scaling.i11 = y
scaling.i22 = z
scaling.i33 = 1
return scaling.mul_mat(matrix)
def rotate_about_x(matrix, angle):
angle = math.radians(angle)
rotation = glm.types.mat4x4.identity()
rotation.i11 = math.cos(angle)
rotation.i12 = -math.sin(angle)
rotation.i21 = math.sin(angle)
rotation.i22 = math.cos(angle)
return rotation.mul_mat(matrix)
def rotate_about_y(matrix, angle):
angle = math.radians(angle)
rotation = glm.types.mat4x4.identity()
rotation.i00 = math.cos(angle)
rotation.i02 = math.sin(angle)
rotation.i20 = -math.sin(angle)
rotation.i22 = math.cos(angle)
return rotation.mul_mat(matrix)
def rotate_about_z(matrix, angle):
angle = math.radians(angle)
rotation = glm.types.mat4x4.identity()
rotation.i00 = math.cos(angle)
rotation.i01 = -math.sin(angle)
rotation.i10 = math.sin(angle)
rotation.i11 = math.cos(angle)
return rotation.mul_mat(matrix)
def projection(fov, aspect_ratio, z_near, z_far):
fov = math.radians(fov)
f = 1.0 / math.tan(fov / 2.0)
p_matrix = numpy.array([f / aspect_ratio, 0.0, 0.0, 0.0,
0.0, f, 0.0, 0.0,
0.0, 0.0, (z_far + z_near) / (z_near - z_far), -1.0,
0.0, 0.0, 2.0 * z_far * z_near / (z_near - z_far), 0.0], numpy.float32)
return p_matrix
def identity():
return [1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0]
| [
"johnbrandonlogan@gmail.com"
] | johnbrandonlogan@gmail.com |
c282c9be34a54448c3e932b51203545d2d511cc7 | 6a95112805b64322953429270a305d01fef3faea | /dist/weewx-4.6.0b7/bin/weewx/cheetahgenerator.py | 9f827c995c99b80e9986cec85047e89cb9371ce5 | [
"GPL-1.0-or-later",
"GPL-3.0-only",
"Apache-2.0"
] | permissive | tomdotorg/docker-weewx | c6d59dc492a9e53f3bc898f7b9f593717092d72c | 7085654f455d39b06acc688738fde27e1f78ad1e | refs/heads/main | 2023-06-08T17:57:44.184399 | 2023-01-30T11:21:23 | 2023-01-30T11:21:23 | 54,113,384 | 21 | 16 | Apache-2.0 | 2022-10-19T23:46:26 | 2016-03-17T11:39:29 | Dockerfile | UTF-8 | Python | false | false | 33,943 | py | #
# Copyright (c) 2009-2020 Tom Keffer <tkeffer@gmail.com>
#
# Class Gettext is Copyright (C) 2021 Johanna Karen Roedenbeck
#
# See the file LICENSE.txt for your full rights.
#
"""Generate files from templates using the Cheetah template engine.
For more information about Cheetah, see http://www.cheetahtemplate.org
Configuration Options
encoding = (html_entities|utf8|strict_ascii|normalized_ascii)
template = filename.tmpl # must end with .tmpl
stale_age = s # age in seconds
search_list = a, b, c
search_list_extensions = d, e, f
The strings YYYY, MM, DD and WW will be replaced if they appear in the filename.
search_list will override the default search_list
search_list_extensions will be appended to search_list
Both search_list and search_list_extensions must be lists of classes. Each
class in the list must be derived from SearchList.
Generally it is better to extend by using search_list_extensions rather than
search_list, just in case the default search list changes.
Example:
[CheetahGenerator]
# How to specify search list extensions:
search_list_extensions = user.forecast.ForecastVariables, user.extstats.ExtStatsVariables
encoding = html_entities
[[SummaryByMonth]] # period
[[[NOAA_month]]] # report
encoding = normalized_ascii
template = NOAA-YYYY-MM.txt.tmpl
[[SummaryByYear]]
[[[NOAA_year]]]]
encoding = normalized_ascii
template = NOAA-YYYY.txt.tmpl
[[ToDate]]
[[[day]]]
template = index.html.tmpl
[[[week]]]
template = week.html.tmpl
[[wuforecast_details]] # period/report
stale_age = 3600 # how old before regenerating
template = wuforecast.html.tmpl
[[nwsforecast_details]] # period/report
stale_age = 10800 # how old before generating
template = nwsforecast.html.tmpl
"""
from __future__ import absolute_import
import datetime
import json
import logging
import os.path
import time
import unicodedata
import Cheetah.Filters
import Cheetah.Template
import six
import weedb
import weeutil.logger
import weeutil.weeutil
import weewx.almanac
import weewx.reportengine
import weewx.station
import weewx.tags
import weewx.units
from weeutil.config import search_up, accumulateLeaves, deep_copy
from weeutil.weeutil import to_bool, to_int, timestamp_to_string
log = logging.getLogger(__name__)
# The default search list includes standard information sources that should be
# useful in most templates.
default_search_list = [
"weewx.cheetahgenerator.Almanac",
"weewx.cheetahgenerator.Current",
"weewx.cheetahgenerator.DisplayOptions",
"weewx.cheetahgenerator.Extras",
"weewx.cheetahgenerator.Gettext",
"weewx.cheetahgenerator.JSONHelpers",
"weewx.cheetahgenerator.PlotInfo",
"weewx.cheetahgenerator.SkinInfo",
"weewx.cheetahgenerator.Station",
"weewx.cheetahgenerator.Stats",
"weewx.cheetahgenerator.UnitInfo",
]
# =============================================================================
# CheetahGenerator
# =============================================================================
class CheetahGenerator(weewx.reportengine.ReportGenerator):
"""Class for generating files from cheetah templates.
Useful attributes (some inherited from ReportGenerator):
config_dict: The weewx configuration dictionary
skin_dict: The dictionary for this skin
gen_ts: The generation time
first_run: Is this the first time the generator has been run?
stn_info: An instance of weewx.station.StationInfo
record: A copy of the "current" record. May be None.
formatter: An instance of weewx.units.Formatter
converter: An instance of weewx.units.Converter
search_list_objs: A list holding search list extensions
db_binder: An instance of weewx.manager.DBBinder from which the
data should be extracted
"""
generator_dict = {'SummaryByDay' : weeutil.weeutil.genDaySpans,
'SummaryByMonth': weeutil.weeutil.genMonthSpans,
'SummaryByYear' : weeutil.weeutil.genYearSpans}
format_dict = {'SummaryByDay' : "%Y-%m-%d",
'SummaryByMonth': "%Y-%m",
'SummaryByYear' : "%Y"}
def run(self):
"""Main entry point for file generation using Cheetah Templates."""
t1 = time.time()
self.setup()
# Make a deep copy of the skin dictionary (we will be modifying it):
gen_dict = deep_copy(self.skin_dict)
# Look for options in [CheetahGenerator],
section_name = "CheetahGenerator"
# but accept options from [FileGenerator] for backward compatibility.
if "FileGenerator" in gen_dict and "CheetahGenerator" not in gen_dict:
section_name = "FileGenerator"
# The default summary time span is 'None'.
gen_dict[section_name]['summarize_by'] = 'None'
# determine how much logging is desired
log_success = to_bool(search_up(gen_dict[section_name], 'log_success', True))
# configure the search list extensions
self.initExtensions(gen_dict[section_name])
# Generate any templates in the given dictionary:
ngen = self.generate(gen_dict[section_name], section_name, self.gen_ts)
self.teardown()
elapsed_time = time.time() - t1
if log_success:
log.info("Generated %d files for report %s in %.2f seconds",
ngen, self.skin_dict['REPORT_NAME'], elapsed_time)
def setup(self):
# This dictionary will hold the formatted dates of all generated files
self.outputted_dict = {k: [] for k in CheetahGenerator.generator_dict}
self.formatter = weewx.units.Formatter.fromSkinDict(self.skin_dict)
self.converter = weewx.units.Converter.fromSkinDict(self.skin_dict)
def initExtensions(self, gen_dict):
"""Load the search list"""
self.search_list_objs = []
# The option 'search_list' holds a starting search list (usually just the default).
# Because we'll be modifying search_list, make a copy of the default before assignment.
search_list = weeutil.weeutil.option_as_list(gen_dict.get('search_list',
list(default_search_list)))
# Option 'search_list_extensions' holds user extensions.
search_list.extend(weeutil.weeutil.option_as_list(gen_dict.get('search_list_extensions',
[])))
# provide feedback about the requested search list objects
log.debug("Using search list %s", search_list)
# Now go through search_list (which is a list of strings holding the
# names of the extensions):
for c in search_list:
x = c.strip()
if x:
# Get the class
klass = weeutil.weeutil.get_object(x)
# Then instantiate the class, passing self as the sole argument
self.search_list_objs.append(klass(self))
def teardown(self):
"""Delete any extension objects we created to prevent back references
from slowing garbage collection"""
while self.search_list_objs:
self.search_list_objs[-1].finalize()
del self.search_list_objs[-1]
def generate(self, section, section_name, gen_ts):
"""Generate one or more reports for the indicated section. Each
section in a period is a report. A report has one or more templates.
section: A ConfigObj dictionary, holding the templates to be
generated. Any subsections in the dictionary will be recursively
processed as well.
gen_ts: The report will be current to this time.
"""
ngen = 0
# Go through each subsection (if any) of this section,
# generating from any templates they may contain
for subsection in section.sections:
# Sections 'SummaryByMonth' and 'SummaryByYear' imply summarize_by
# certain time spans
if 'summarize_by' not in section[subsection]:
if subsection in CheetahGenerator.generator_dict:
section[subsection]['summarize_by'] = subsection
# Call recursively, to generate any templates in this subsection
ngen += self.generate(section[subsection], subsection, gen_ts)
# We have finished recursively processing any subsections in this
# section. Time to do the section itself. If there is no option
# 'template', then there isn't anything to do. Return.
if 'template' not in section:
return ngen
# Change directory to the skin subdirectory. We use absolute paths
# for cheetah, so the directory change is not necessary for generating
# files. However, changing to the skin directory provides a known
# location so that calls to os.getcwd() in any templates will return
# a predictable result.
os.chdir(os.path.join(self.config_dict['WEEWX_ROOT'],
self.skin_dict['SKIN_ROOT'],
self.skin_dict.get('skin', '')))
report_dict = accumulateLeaves(section)
(template, dest_dir, encoding, default_binding) = self._prepGen(report_dict)
# Get start and stop times
default_archive = self.db_binder.get_manager(default_binding)
start_ts = default_archive.firstGoodStamp()
if not start_ts:
log.info('Skipping template %s: cannot find start time', section['template'])
return ngen
if gen_ts:
record = default_archive.getRecord(gen_ts,
max_delta=to_int(report_dict.get('max_delta')))
if record:
stop_ts = record['dateTime']
else:
log.info('Skipping template %s: generate time %s not in database',
section['template'], timestamp_to_string(gen_ts))
return ngen
else:
stop_ts = default_archive.lastGoodStamp()
# Get an appropriate generator function
summarize_by = report_dict['summarize_by']
if summarize_by in CheetahGenerator.generator_dict:
_spangen = CheetahGenerator.generator_dict[summarize_by]
else:
# Just a single timespan to generate. Use a lambda expression.
_spangen = lambda start_ts, stop_ts: [weeutil.weeutil.TimeSpan(start_ts, stop_ts)]
# Use the generator function
for timespan in _spangen(start_ts, stop_ts):
start_tt = time.localtime(timespan.start)
stop_tt = time.localtime(timespan.stop)
if summarize_by in CheetahGenerator.format_dict:
# This is a "SummaryBy" type generation. If it hasn't been done already, save the
# date as a string, to be used inside the document
date_str = time.strftime(CheetahGenerator.format_dict[summarize_by], start_tt)
if date_str not in self.outputted_dict[summarize_by]:
self.outputted_dict[summarize_by].append(date_str)
# For these "SummaryBy" generations, the file name comes from the start of the timespan:
_filename = self._getFileName(template, start_tt)
else:
# This is a "ToDate" generation. File name comes
# from the stop (i.e., present) time:
_filename = self._getFileName(template, stop_tt)
# Get the absolute path for the target of this template
_fullname = os.path.join(dest_dir, _filename)
# Skip summary files outside the timespan
if report_dict['summarize_by'] in CheetahGenerator.generator_dict \
and os.path.exists(_fullname) \
and not timespan.includesArchiveTime(stop_ts):
continue
# skip files that are fresh, but only if staleness is defined
stale = to_int(report_dict.get('stale_age'))
if stale is not None:
t_now = time.time()
try:
last_mod = os.path.getmtime(_fullname)
if t_now - last_mod < stale:
log.debug("Skip '%s': last_mod=%s age=%s stale=%s",
_filename, last_mod, t_now - last_mod, stale)
continue
except os.error:
pass
searchList = self._getSearchList(encoding, timespan,
default_binding, section_name,
os.path.join(
os.path.dirname(report_dict['template']),
_filename))
# First, compile the template
try:
# TODO: Look into caching the compiled template.
# Under Python 2, Cheetah V2 will crash if given a template file name in Unicode,
# so make sure it's a string first, using six.ensure_str().
compiled_template = Cheetah.Template.Template(
file=six.ensure_str(template),
searchList=searchList,
filter='AssureUnicode',
filtersLib=weewx.cheetahgenerator)
except Exception as e:
log.error("Compilation of template %s failed with exception '%s'", template, type(e))
log.error("**** Ignoring template %s", template)
log.error("**** Reason: %s", e)
weeutil.logger.log_traceback(log.error, "**** ")
continue
# Second, evaluate the compiled template
try:
# We have a compiled template in hand. Evaluate it. The result will be a long
# Unicode string.
unicode_string = compiled_template.respond()
except Cheetah.Parser.ParseError as e:
log.error("Parse error while evaluating file %s", template)
log.error("**** Ignoring template %s", template)
log.error("**** Reason: %s", e)
continue
except Cheetah.NameMapper.NotFound as e:
log.error("Evaluation of template %s failed.", template)
log.error("**** Ignoring template %s", template)
log.error("**** Reason: %s", e)
log.error("**** To debug, try inserting '#errorCatcher Echo' at top of template")
continue
except Exception as e:
log.error("Evaluation of template %s failed with exception '%s'", template, type(e))
log.error("**** Ignoring template %s", template)
log.error("**** Reason: %s", e)
weeutil.logger.log_traceback(log.error, "**** ")
continue
# Third, convert the results to a byte string, using the strategy chosen by the user.
if encoding == 'html_entities':
byte_string = unicode_string.encode('ascii', 'xmlcharrefreplace')
elif encoding == 'strict_ascii':
byte_string = unicode_string.encode('ascii', 'ignore')
elif encoding == 'normalized_ascii':
# Normalize the string, replacing accented characters with non-accented
# equivalents
normalized = unicodedata.normalize('NFD', unicode_string)
byte_string = normalized.encode('ascii', 'ignore')
else:
byte_string = unicode_string.encode(encoding)
# Finally, write the byte string to the target file
try:
# Write to a temporary file first
tmpname = _fullname + '.tmp'
# Open it in binary mode. We are writing a byte-string, not a string
with open(tmpname, mode='wb') as fd:
fd.write(byte_string)
# Now move the temporary file into place
os.rename(tmpname, _fullname)
ngen += 1
finally:
try:
os.unlink(tmpname)
except OSError:
pass
return ngen
def _getSearchList(self, encoding, timespan, default_binding, section_name, file_name):
"""Get the complete search list to be used by Cheetah."""
# Get the basic search list
timespan_start_tt = time.localtime(timespan.start)
search_list = [{'month_name' : time.strftime("%b", timespan_start_tt),
'year_name' : timespan_start_tt[0],
'encoding' : encoding,
'page' : section_name,
'filename' : file_name},
self.outputted_dict]
# Bind to the default_binding:
db_lookup = self.db_binder.bind_default(default_binding)
# Then add the V3.X style search list extensions
for obj in self.search_list_objs:
search_list += obj.get_extension_list(timespan, db_lookup)
return search_list
def _getFileName(self, template, ref_tt):
"""Calculate a destination filename given a template filename.
For backwards compatibility replace 'YYYY' with the year, 'MM' with the
month, 'DD' with the day. Also observe any strftime format strings in
the filename. Finally, strip off any trailing .tmpl."""
_filename = os.path.basename(template).replace('.tmpl', '')
# If the filename contains YYYY, MM, DD or WW, then do the replacement
if 'YYYY' in _filename or 'MM' in _filename or 'DD' in _filename or 'WW' in _filename:
# Get strings representing year, month, and day
_yr_str = "%4d" % ref_tt[0]
_mo_str = "%02d" % ref_tt[1]
_day_str = "%02d" % ref_tt[2]
_week_str = "%02d" % datetime.date(ref_tt[0], ref_tt[1], ref_tt[2]).isocalendar()[1];
# Replace any instances of 'YYYY' with the year string
_filename = _filename.replace('YYYY', _yr_str)
# Do the same thing with the month...
_filename = _filename.replace('MM', _mo_str)
# ... the week ...
_filename = _filename.replace('WW', _week_str)
# ... and the day
_filename = _filename.replace('DD', _day_str)
# observe any strftime format strings in the base file name
# first obtain a datetime object from our timetuple
ref_dt = datetime.datetime.fromtimestamp(time.mktime(ref_tt))
# then apply any strftime formatting
_filename = ref_dt.strftime(_filename)
return _filename
def _prepGen(self, report_dict):
"""Get the template, destination directory, encoding, and default
binding."""
# -------- Template ---------
template = os.path.join(self.config_dict['WEEWX_ROOT'],
self.config_dict['StdReport']['SKIN_ROOT'],
report_dict['skin'],
report_dict['template'])
# ------ Destination directory --------
destination_dir = os.path.join(self.config_dict['WEEWX_ROOT'],
report_dict['HTML_ROOT'],
os.path.dirname(report_dict['template']))
try:
# Create the directory that is to receive the generated files. If
# it already exists an exception will be thrown, so be prepared to
# catch it.
os.makedirs(destination_dir)
except OSError:
pass
# ------ Encoding ------
encoding = report_dict.get('encoding', 'html_entities').strip().lower()
# Convert to 'utf8'. This is because 'utf-8' cannot be a class name
if encoding == 'utf-8':
encoding = 'utf8'
# ------ Default binding ---------
default_binding = report_dict['data_binding']
return (template, destination_dir, encoding, default_binding)
# =============================================================================
# Classes used to implement the Search list
# =============================================================================
class SearchList(object):
"""Abstract base class used for search list extensions."""
def __init__(self, generator):
"""Create an instance of SearchList.
generator: The generator that is using this search list
"""
self.generator = generator
def get_extension_list(self, timespan, db_lookup): # @UnusedVariable
"""For weewx V3.x extensions. Should return a list
of objects whose attributes or keys define the extension.
timespan: An instance of weeutil.weeutil.TimeSpan. This will hold the
start and stop times of the domain of valid times.
db_lookup: A function with call signature db_lookup(data_binding),
which returns a database manager and where data_binding is
an optional binding name. If not given, then a default
binding will be used.
"""
return [self]
def finalize(self):
"""Called when the extension is no longer needed"""
class Almanac(SearchList):
"""Class that implements the '$almanac' tag."""
def __init__(self, generator):
SearchList.__init__(self, generator)
celestial_ts = generator.gen_ts
# For better accuracy, the almanac requires the current temperature
# and barometric pressure, so retrieve them from the default archive,
# using celestial_ts as the time
# The default values of temperature and pressure
temperature_C = 15.0
pressure_mbar = 1010.0
# See if we can get more accurate values by looking them up in the
# weather database. The database might not exist, so be prepared for
# a KeyError exception.
try:
binding = self.generator.skin_dict.get('data_binding', 'wx_binding')
archive = self.generator.db_binder.get_manager(binding)
except (KeyError, weewx.UnknownBinding, weedb.NoDatabaseError):
pass
else:
# If a specific time has not been specified, then use the timestamp
# of the last record in the database.
if not celestial_ts:
celestial_ts = archive.lastGoodStamp()
# Check to see whether we have a good time. If so, retrieve the
# record from the database
if celestial_ts:
# Look for the record closest in time. Up to one hour off is
# acceptable:
rec = archive.getRecord(celestial_ts, max_delta=3600)
if rec is not None:
if 'outTemp' in rec:
temperature_C = weewx.units.convert(weewx.units.as_value_tuple(rec, 'outTemp'), "degree_C")[0]
if 'barometer' in rec:
pressure_mbar = weewx.units.convert(weewx.units.as_value_tuple(rec, 'barometer'), "mbar")[0]
self.moonphases = generator.skin_dict.get('Almanac', {}).get('moon_phases', weeutil.Moon.moon_phases)
altitude_vt = weewx.units.convert(generator.stn_info.altitude_vt, "meter")
self.almanac = weewx.almanac.Almanac(celestial_ts,
generator.stn_info.latitude_f,
generator.stn_info.longitude_f,
altitude=altitude_vt[0],
temperature=temperature_C,
pressure=pressure_mbar,
moon_phases=self.moonphases,
formatter=generator.formatter,
converter=generator.converter)
class Station(SearchList):
"""Class that implements the $station tag."""
def __init__(self, generator):
SearchList.__init__(self, generator)
self.station = weewx.station.Station(generator.stn_info,
generator.formatter,
generator.converter,
generator.skin_dict)
class Current(SearchList):
"""Class that implements the $current tag"""
def get_extension_list(self, timespan, db_lookup):
record_binder = weewx.tags.RecordBinder(db_lookup, timespan.stop,
self.generator.formatter, self.generator.converter,
record=self.generator.record)
return [record_binder]
class Stats(SearchList):
"""Class that implements the time-based statistical tags, such
as $day.outTemp.max"""
def get_extension_list(self, timespan, db_lookup):
try:
trend_dict = self.generator.skin_dict['Units']['Trend']
except KeyError:
trend_dict = {'time_delta': 10800,
'time_grace': 300}
stats = weewx.tags.TimeBinder(
db_lookup,
timespan.stop,
formatter=self.generator.formatter,
converter=self.generator.converter,
week_start=self.generator.stn_info.week_start,
rain_year_start=self.generator.stn_info.rain_year_start,
trend=trend_dict,
skin_dict=self.generator.skin_dict)
return [stats]
class UnitInfo(SearchList):
"""Class that implements the $unit and $obs tags."""
def __init__(self, generator):
SearchList.__init__(self, generator)
# This implements the $unit tag:
self.unit = weewx.units.UnitInfoHelper(generator.formatter,
generator.converter)
# This implements the $obs tag:
self.obs = weewx.units.ObsInfoHelper(generator.skin_dict)
if six.PY3:
# Dictionaries in Python 3 no longer have the "has_key()" function.
# This will break a lot of skins. Use a wrapper to provide it
class ExtraDict(dict):
def has_key(self, key):
return key in self
else:
# Not necessary in Python 2
ExtraDict = dict
class Extras(SearchList):
"""Class for exposing the [Extras] section in the skin config dictionary
as tag $Extras."""
def __init__(self, generator):
SearchList.__init__(self, generator)
# If the user has supplied an '[Extras]' section in the skin
# dictionary, include it in the search list. Otherwise, just include
# an empty dictionary.
self.Extras = ExtraDict(generator.skin_dict.get('Extras', {}))
class JSONHelpers(SearchList):
"""Helper functions for formatting JSON"""
@staticmethod
def jsonize(arg):
"""
Format my argument as JSON
Args:
arg (iterable): An iterable, such as a list, or zip structure
Returns:
str: The argument formatted as JSON.
"""
val = list(arg)
return json.dumps(val, cls=weewx.units.ComplexEncoder)
@staticmethod
def rnd(arg, ndigits):
"""Round a number, or sequence of numbers, to a specified number of decimal digits
Args:
arg (None, float, complex, list): The number or sequence of numbers to be rounded.
If the argument is None, then None will be returned.
ndigits (int): The number of decimal digits to retain.
Returns:
None, float, complex, list: Returns the number, or sequence of numbers, with the
requested number of decimal digits
"""
return weeutil.weeutil.rounder(arg, ndigits)
@staticmethod
def to_int(arg):
"""Convert the argument into an integer, honoring 'None'
Args:
arg (None, float, str):
Returns:
int: The argument converted to an integer.
"""
return weeutil.weeutil.to_int(arg)
class Gettext(SearchList):
"""Values provided by $gettext() are found in the [Texts] section of the localization file."""
def gettext(self, key):
try:
v = self.generator.skin_dict['Texts'].get(key, key)
except KeyError:
v = key
return v
def pgettext(self, context, key):
try:
v = self.generator.skin_dict['Texts'][context].get(key, key)
except KeyError:
v = key
return v
# An underscore is a common alias for gettext:
_ = gettext
class PlotInfo(SearchList):
"""Return information about plots, based on what's in the [ImageGenerator] section."""
def getobs(self, plot_name):
"""
Given a plot name, return the set of observations in that plot.
If there is no plot by the indicated name, return an empty set.
"""
obs = set()
# If there is no [ImageGenerator] section, return the empty set.
try:
timespan_names = self.generator.skin_dict['ImageGenerator'].sections
except (KeyError, AttributeError):
return obs
# Scan all the timespans, looking for plot_name
for timespan_name in timespan_names:
if plot_name in self.generator.skin_dict['ImageGenerator'][timespan_name]:
# Found it. To make things manageable, get just the plot dictionary:
plot_dict = self.generator.skin_dict['ImageGenerator'][timespan_name][plot_name]
# Now extract all observation names from it
for obs_name in plot_dict.sections:
# The observation name might be specified directly,
# or it might be specified by the data_type field.
if 'data_type' in plot_dict[obs_name]:
obs.add(plot_dict[obs_name]['data_type'])
else:
obs.add(obs_name)
break
return obs
class DisplayOptions(SearchList):
"""Class for exposing the [DisplayOptions] section in the skin config
dictionary as tag $DisplayOptions."""
def __init__(self, generator):
SearchList.__init__(self, generator)
# If the user has supplied an '[DisplayOptions]' section in the skin
# dictionary, include it in the search list. Otherwise, just include
# an empty dictionary.
display_options = generator.skin_dict.get('DisplayOptions', {})
# Make sure all entries are actually lists.
self.DisplayOptions = {k: weeutil.weeutil.option_as_list(display_options[k])
for k in display_options}
class SkinInfo(SearchList):
"""Class for exposing information about the skin."""
def __init__(self, generator):
SearchList.__init__(self, generator)
for k in ['HTML_ROOT', 'lang', 'REPORT_NAME', 'skin',
'SKIN_NAME', 'SKIN_ROOT', 'SKIN_VERSION', 'unit_system'
]:
setattr(self, k, generator.skin_dict.get(k, 'unknown'))
# =============================================================================
# Filter
# =============================================================================
class AssureUnicode(Cheetah.Filters.Filter):
"""Assures that whatever a search list extension might return, it will be converted into
Unicode. """
def filter(self, val, **kwargs):
"""Convert the expression 'val' to unicode."""
# There is a 2x4 matrix of possibilities:
# input PY2 PY3
# _____ ________ _______
# bytes decode() decode()
# str decode() -done-
# unicode -done- N/A
# object unicode() str()
if val is None:
return u''
# Is it already unicode? This takes care of cells 4 and 5.
if isinstance(val, six.text_type):
filtered = val
# This conditional covers cells 1,2, and 3. That is, val is a byte string
elif isinstance(val, six.binary_type):
filtered = val.decode('utf-8')
# That leaves cells 7 and 8, that is val is an object, such as a ValueHelper
else:
# Must be an object. Convert to unicode string
try:
# For late tag bindings under Python 2, the following forces the invocation of
# __unicode__(). Under Python 3, it invokes __str__(). Either way, it can force
# an XTypes query. For a tag such as $day.foobar.min, where 'foobar' is an unknown
# type, this will cause an attribute error. Be prepared to catch it.
filtered = six.text_type(val)
except AttributeError as e:
# Offer a debug message.
log.debug("Unrecognized: %s", kwargs.get('rawExpr', e))
# Return the raw expression, if available. Otherwise, the exception message
# concatenated with a question mark.
filtered = kwargs.get('rawExpr', str(e) + '?')
return filtered
| [
"tom@tom.org"
] | tom@tom.org |
c579c4b8c1ee1a5a4164749336b8dc4787659c92 | f2220e153715dc47aaf49244c393b662c44dc980 | /blackjack.py | 40ee54a6d7f720c6332f5a681463cd1f9ca5e521 | [
"MIT"
] | permissive | mattwbarry/py_blackjack | 018f52206a4c69920894e10891766ee150877592 | c35fd1492b9f8d50a36e152b83c56740eccc0b21 | refs/heads/master | 2021-01-18T07:44:55.391658 | 2014-09-09T17:41:11 | 2014-09-09T17:41:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,154 | py | from random import shuffle
y = 'y'
n = 'n'
class Blackjack():
def __init__(self, players=4):
self.deck = [
{ 'number': '2', 'suit': 'hearts', 'value': 2 },
{ 'number': '3', 'suit': 'hearts', 'value': 3 },
{ 'number': '4', 'suit': 'hearts', 'value': 4 },
{ 'number': '5', 'suit': 'hearts', 'value': 5 },
{ 'number': '6', 'suit': 'hearts', 'value': 6 },
{ 'number': '7', 'suit': 'hearts', 'value': 7 },
{ 'number': '8', 'suit': 'hearts', 'value': 8 },
{ 'number': '9', 'suit': 'hearts', 'value': 9 },
{ 'number': '10', 'suit': 'hearts', 'value': 10 },
{ 'number': 'jack', 'suit': 'hearts', 'value': 10 },
{ 'number': 'queen', 'suit': 'hearts', 'value': 10 },
{ 'number': 'king', 'suit': 'hearts', 'value': 10 },
{ 'number': 'ace', 'suit': 'hearts', 'value': 1 },
{ 'number': '2', 'suit': 'diamonds', 'value': 2 },
{ 'number': '3', 'suit': 'diamonds', 'value': 3 },
{ 'number': '4', 'suit': 'diamonds', 'value': 4 },
{ 'number': '5', 'suit': 'diamonds', 'value': 5 },
{ 'number': '6', 'suit': 'diamonds', 'value': 6 },
{ 'number': '7', 'suit': 'diamonds', 'value': 7 },
{ 'number': '8', 'suit': 'diamonds', 'value': 8 },
{ 'number': '9', 'suit': 'diamonds', 'value': 9 },
{ 'number': '10', 'suit': 'diamonds', 'value': 10 },
{ 'number': 'jack', 'suit': 'diamonds', 'value': 10 },
{ 'number': 'queen', 'suit': 'diamonds', 'value': 10 },
{ 'number': 'king', 'suit': 'diamonds', 'value': 10 },
{ 'number': 'ace', 'suit': 'diamonds', 'value': 1 },
{ 'number': '2', 'suit': 'clubs', 'value': 2 },
{ 'number': '3', 'suit': 'clubs', 'value': 3 },
{ 'number': '4', 'suit': 'clubs', 'value': 4 },
{ 'number': '5', 'suit': 'clubs', 'value': 5 },
{ 'number': '6', 'suit': 'clubs', 'value': 6 },
{ 'number': '7', 'suit': 'clubs', 'value': 7 },
{ 'number': '8', 'suit': 'clubs', 'value': 8 },
{ 'number': '9', 'suit': 'clubs', 'value': 9 },
{ 'number': '10', 'suit': 'clubs', 'value': 10 },
{ 'number': 'jack', 'suit': 'clubs', 'value': 10 },
{ 'number': 'queen', 'suit': 'clubs', 'value': 10 },
{ 'number': 'king', 'suit': 'clubs', 'value': 10 },
{ 'number': 'ace', 'suit': 'clubs', 'value': 1 },
{ 'number': '2', 'suit': 'spades', 'value': 2 },
{ 'number': '3', 'suit': 'spades', 'value': 3 },
{ 'number': '4', 'suit': 'spades', 'value': 4 },
{ 'number': '5', 'suit': 'spades', 'value': 5 },
{ 'number': '6', 'suit': 'spades', 'value': 6 },
{ 'number': '7', 'suit': 'spades', 'value': 7 },
{ 'number': '8', 'suit': 'spades', 'value': 8 },
{ 'number': '9', 'suit': 'spades', 'value': 9 },
{ 'number': '10', 'suit': 'spades', 'value': 10 },
{ 'number': 'jack', 'suit': 'spades', 'value': 10 },
{ 'number': 'queen', 'suit': 'spades', 'value': 10 },
{ 'number': 'king', 'suit': 'spades', 'value': 10 },
{ 'number': 'ace', 'suit': 'spades', 'value': 1 },
]
shuffle(self.deck)
# generate players
self.players = []
for player_id in range(players):
self.players.append(Player(player_id))
# deal hand to each player and start the game
self.deal()
def deal(self):
for player in self.players:
player.hand['face-down'].append(self.deck.pop())
player.hand['face-up'].append(self.deck.pop())
# check for auto winner
if player.check_win_lose() == 1:
return player.add_points(), player.id
return self.play()
def play(self):
max_val = 0
winner = None
# while list of players has not been exhausted
for player in self.players:
# keep asking for hits until denial
hit = self.ask_hit(player)
while hit == 'y':
self.deal_single(player)
# check if won or lost
victory = player.check_win_lose()
if victory == 0:
hit = 'n'
print 'you lose. your points add to ' + str(player.add_points())
elif victory == 1:
hit = 'n'
return self.check_for_winner()
else:
hit = self.ask_hit(player)
# return player with the highest score
return self.check_for_winner()
def ask_hit(self, player):
print '----------------------------------------------------'
print 'player id ' + str(player.id)
print 'your cards are: ' + player.view_own_cards()
print 'your points are: ' + str(player.add_points())
hit = input('take a hit? (y/n)')
return hit
def deal_single(self, player):
player.hand['face-up'].append(self.deck.pop())
print 'you were dealt a ' + player.hand['face-up'][-1]['number'] + ' of ' + player.hand['face-up'][-1]['suit']
def check_for_winner(self):
max_val = 0
winner = None
for player in self.players:
points = player.add_points()
if 21 in points:
return 21, player.id
else:
for point in points:
if point > max_val and point <= 21:
max_val = point
winner = player
if max_val == 0:
return False
print 'winner is ' + str(winner.id) + ' with ' + str(max_val) + ' points!'
return max_val, winner.id
class Player():
def __init__(self, id=0):
self.id = id
self.hand = {'face-down': [], 'face-up': []}
def add_points(self):
cards = self.hand['face-down'] + self.hand['face-up']
aces = 0
total = 0
totals = []
for card in cards:
if card['value'] == 1:
aces += 1
total += card['value']
totals.append(total)
for ace in range(aces):
total += 10
totals.append(total)
# return list of possible totals
return totals
def check_win_lose(self):
points = self.add_points()
if min(points) > 21:
# player loses
return 0
elif 21 in points:
# player wins
return 1
else:
return points
def view_own_cards(self):
card_text = ''
cards = self.hand['face-down'] + self.hand['face-up']
for card in cards:
card_text += card['number'] + ' of ' + card['suit'] + '\n'
return card_text
def view_others_cards(self, me):
cards_down = ''
cards_up = ''
for player in self.players:
if player != me:
cards_down = len(player.hand['face-down'])
for card in player.hand['face-up']:
cards_up += card['number'] + ' of ' + card['suit'] + '\n'
print 'player ' + player.id + ' has ' + str(cards_down) + ' cards face down'
print 'player ' + player.id + ' is showing: ' + cards_up | [
"mattwbarry@gmail.com"
] | mattwbarry@gmail.com |
391278daedcf36626a688fd8400b020bcd1efade | f825bea24f4cb5ee44828101611918b75f9ff13e | /meetups/migrations/0004_auto_20210705_2028.py | c9be474a92565870dfea797fce393642d4d696dd | [] | no_license | ethanl267/Django_course | 2735a8a982205b707445ce5fa24f216724be4914 | b08d57a531fcb3e7f37b513267b83075e70fb0f5 | refs/heads/main | 2023-06-12T16:08:59.874068 | 2021-07-06T15:42:08 | 2021-07-06T15:42:08 | 379,626,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | # Generated by Django 2.2.12 on 2021-07-05 20:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meetups', '0003_auto_20210705_1940'),
]
operations = [
migrations.CreateModel(
name='Participant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254, unique=True)),
],
),
migrations.AddField(
model_name='meetup',
name='participants',
field=models.ManyToManyField(blank=True, to='meetups.Participant'),
),
]
| [
"ethanlesch8@gmail.com"
] | ethanlesch8@gmail.com |
61acb92e717a198fb0f9c74aff8b85975263a920 | ee6fc02e8392ff780a4f0d1a5789776e4d0b6a29 | /code/practice/abc/abc078/c.py | bb3317cee6cc38832ee76b4f54c7679216a3610b | [] | no_license | mollinaca/ac | e99bb5d5c07159b3ef98cd7067424fa2751c0256 | 2f40dd4333c2b39573b75b45b06ad52cf36d75c3 | refs/heads/master | 2020-12-22T11:02:13.269855 | 2020-09-18T01:02:29 | 2020-09-18T01:02:29 | 236,757,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
n,m = map(int,input().split())
print (((1900*m)+100*(n-m))*(2**m))
| [
"github@mail.watarinohibi.tokyo"
] | github@mail.watarinohibi.tokyo |
747ad736df23783884812c7c474ad3c6f8c00a34 | 1b2a1f807b98034567e936b9b5c76c2fc89b908a | /adj_tf/benchmark/benchmark.py | 75e77abfdd19ec6f2d0c21eb8821a66ad3a33e6c | [] | no_license | Adreambottle/Transformer2GP | 48c955d8eb155caef4c24a3c03ee3aa9ab0bd3da | 5ba1a5005c2ad21066304cdeb1d7c2587c8191da | refs/heads/main | 2023-07-07T14:17:51.673437 | 2021-08-17T14:14:56 | 2021-08-17T14:14:56 | 397,279,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,611 | py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Benchmarking the library on inference and training in PyTorch.
"""
import timeit
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..file_utils import is_py3nvml_available, is_torch_available
from ..models.auto.modeling_auto import MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING
from ..utils import logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_torch_available():
import torch
from .benchmark_args import PyTorchBenchmarkArguments
if is_py3nvml_available():
import py3nvml.py3nvml as nvml
logger = logging.get_logger(__name__)
class PyTorchBenchmark(Benchmark):
args: PyTorchBenchmarkArguments
configs: PretrainedConfig
framework: str = "PyTorch"
@property
def framework_version(self):
return torch.__version__
def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
_inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
return self._measure_speed(_inference)
def _inference_memory(
self, model_name: str, batch_size: int, sequence_length: int
) -> [Memory, Optional[MemorySummary]]:
_inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
return self._measure_memory(_inference)
def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
_train = self._prepare_train_func(model_name, batch_size, sequence_length)
return self._measure_speed(_train)
def _train_memory(
self, model_name: str, batch_size: int, sequence_length: int
) -> [Memory, Optional[MemorySummary]]:
_train = self._prepare_train_func(model_name, batch_size, sequence_length)
return self._measure_memory(_train)
def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
config = self.config_dict[model_name]
if self.args.torchscript:
config.torchscript = True
has_model_class_in_config = (
hasattr(config, "architectures")
and isinstance(config.architectures, list)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
model_class = config.architectures[0]
transformers_module = __import__("adj_tf", fromlist=[model_class])
model_cls = getattr(transformers_module, model_class)
model = model_cls(config)
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`."
)
else:
model = MODEL_MAPPING[config.__class__](config)
model.eval()
model.to(self.args.device)
# encoder-decoder has vocab size saved differently
vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device)
if self.args.fp16:
logger.info("Running training in Mixed Precision...")
assert self.args.is_gpu, "Mixed precision is possible only for GPU."
# amp seems to have memory leaks so that memory usage
# is measured using .half() for now https://github.com/NVIDIA/apex/issues/439
model.half()
if self.args.torchscript:
with torch.no_grad():
inference_model = torch.jit.trace(model, input_ids)
else:
inference_model = model
def encoder_decoder_forward():
with torch.no_grad():
outputs = inference_model(input_ids, decoder_input_ids=input_ids)
return outputs
def encoder_forward():
with torch.no_grad():
outputs = inference_model(input_ids)
return outputs
_forward = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _forward
def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
config = self.config_dict[model_name]
has_model_class_in_config = (
hasattr(config, "architectures")
and isinstance(config.architectures, list)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
model_class = config.architectures[0]
transformers_module = __import__("adj_tf", fromlist=[model_class])
model_cls = getattr(transformers_module, model_class)
model = model_cls(config)
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`."
)
else:
model = MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config)
if self.args.torchscript:
raise NotImplementedError("Training for torchscript is currently not implemented")
else:
train_model = model
model.train()
model.to(self.args.device)
# encoder-decoder has vocab size saved differently
vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device)
if self.args.fp16:
logger.info("Running training in Mixed Precision...")
assert self.args.is_gpu, "Mixed precision is possible only for GPU."
# amp seems to have memory leaks so that memory usage
# is measured using .half() for now https://github.com/NVIDIA/apex/issues/439
model.half()
def compute_loss_and_backprob_encoder():
loss = train_model(input_ids, labels=input_ids)[0]
loss.backward()
return loss
def compute_loss_and_backprob_encoder_decoder():
loss = train_model(input_ids, decoder_input_ids=input_ids, labels=input_ids)[0]
loss.backward()
return loss
_train = (
compute_loss_and_backprob_encoder_decoder
if config.is_encoder_decoder
else compute_loss_and_backprob_encoder
)
return _train
def _measure_speed(self, func) -> float:
try:
if self.args.is_tpu or self.args.torchscript:
# run additional 10 times to stabilize compilation for tpu and torchscript
logger.info("Do inference on TPU or torchscript. Running model 5 times to stabilize compilation")
timeit.repeat(
func,
repeat=1,
number=5,
)
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
runtimes = timeit.repeat(
func,
repeat=self.args.repeat,
number=10,
)
if self.args.is_tpu and self.args.torch_xla_tpu_print_metrics:
import torch_xla.debug.metrics as met
self.print_fn(met.metrics_report())
return min(runtimes) / 10.0
except RuntimeError as e:
self.print_fn("Doesn't fit on GPU. {}".format(e))
return "N/A"
def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]:
try:
if self.args.trace_memory_line_by_line:
trace = start_memory_tracing("adj_tf")
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking with `--no-memory` or `args.memory=False`"
)
elif self.args.is_gpu:
if not is_py3nvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU."
)
memory = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes running on the same GPU."
)
# init nvml
nvml.nvmlInit()
func()
handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
max_bytes_in_use = meminfo.used
memory = Memory(max_bytes_in_use)
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
memory_bytes = measure_peak_memory_cpu(func)
memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes
if self.args.trace_memory_line_by_line:
summary = stop_memory_tracing(trace)
else:
summary = None
return memory, summary
except RuntimeError as e:
self.print_fn("Doesn't fit on GPU. {}".format(e))
return "N/A", None
| [
"adreambottle@outlook.com"
] | adreambottle@outlook.com |
83cc6528e430a0605e35d04ff62abfd58560ccc2 | 880b6f0b7d92f078ad20b6e3665fd50df280d53b | /specfemio/headers.py | feaf312d88128f0b1e587b679e0f44c1de74cfde | [
"MIT"
] | permissive | code-cullison/pyaspect | 6f704f78708e0bb302c8ff0e20f3927eb9e032ab | def81569d4cde9c2e4c9310b7323b81351dd46f6 | refs/heads/master | 2023-04-17T10:54:09.681644 | 2022-08-15T21:27:34 | 2022-08-15T21:27:34 | 386,649,496 | 0 | 1 | MIT | 2022-08-12T22:15:14 | 2021-07-16T13:38:07 | Jupyter Notebook | UTF-8 | Python | false | false | 43,013 | py | import copy
import importlib
import numpy as np
import pandas as pd
from pyaspect.moment_tensor import MomentTensor
################################################################################
#
# General Header Class
#
################################################################################
class Header(dict):
def __init__(self,name=None):
super(Header,self).__init__()
self['name'] = name
self['comp_val'] = None
def __getattr__(self, key):
if key in self.keys():
return self[key]
else:
raise AttributeError(f'Header: {key} not found' )
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
if key in self.keys():
del self[key]
else:
raise AttributeError(f'Header: {key} not found' )
def __str__(self):
ostr = ''
for key, value in self.items():
ostr += f'{key}: {value}\n'
return ostr
def __hash__(self):
return hash(self.hash_val())
def _is_valid_operand(self, other):
if type(self) != type(other):
raise Exception('wrong types when comparing')
return type(self) == type(other)
def __eq__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return (self.eq_comparator() == other.eq_comparator())
def __ne__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return (self.eq_comparator() != other.eq_comparator())
def __lt__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.comparator() < other.comparator()
def __le__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.comparator() <= other.comparator()
def __gt__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.comparator() > other.comparator()
def __ge__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.comparator() >= other.comparator()
def hash_val(self):
raise NotImplementedError
def comparator(self):
raise NotImplementedError
def eq_comparator(self):
raise NotImplementedError
def copy(self):
return copy.deepcopy(self)
@property
def name(self):
return self['name']
@name.setter
def name(self, value):
self['name'] = value
@property
def comp_val(self):
return self['comp_val']
@comp_val.setter
def comp_val(self, value):
self['comp_val'] = value
################################################################################
#
# General Coordinate Headers for SPECFEM3D files (STATION and SOLUTIONS)
#
################################################################################
class CoordHeader(Header):
def __init__(self,name=None,lat_yc=None,lon_xc=None,depth=None):
super(CoordHeader,self).__init__(name=name)
self['lat_yc'] = lat_yc
self['lon_xc'] = lon_xc
self['depth'] = depth
self['comp_val'] = self.depth
def hash_val(self):
return np.sqrt(self.lon_xc**2 + self.lat_yc**2 + self.depth**2)
def comparator(self):
return self.comp_val
def eq_comparator(self):
return (self.lat_yc, self.lon_xc, self.depth)
@property
def lat_yc(self):
return self['lat_yc']
@lat_yc.setter
def lat_yc(self, value):
self['lat_yc'] = value
@property
def lon_xc(self):
return self['lon_xc']
@lon_xc.setter
def lon_xc(self, value):
self['lon_xc'] = value
@property
def depth(self):
return self['depth']
@depth.setter
def depth(self, value):
self['depth'] = value
################################################################################
#
# Headers for SPECFEM3D files (STATION and SOLUTIONS)
#
################################################################################
class SpecHeader(CoordHeader):
def __init__(self,name=None,lat_yc=None,lon_xc=None,depth=None,proj_id=0,eid=0,sid=0):
super(SpecHeader,self).__init__(name=name,lat_yc=lat_yc,lon_xc=lon_xc,depth=depth)
self['proj_id'] = proj_id
self['eid'] = eid
self['sid'] = sid
def hash_val(self):
sup_hv = super().hash_val()
return np.sqrt(self.proj_id**2 + self.eid**2 + self.sid**2 + sup_hv**2)
#return np.sqrt(self.proj_id**2 + self.eid**2 + self.sid**2 + self.lon_xc**2 + self.lat_yc**2 + self.depth**2)
def eq_comparator(self):
return tuple(list(super().eq_comparator()) + [self.proj_id,self.eid,self.sid])
@property
def proj_id(self):
return self['proj_id']
@proj_id.setter
def proj_id(self, value):
self['proj_id'] = value
@property
def proj_id(self):
return self['proj_id']
@proj_id.setter
def proj_id(self, value):
self['proj_id'] = value
@property
def sid(self):
return self['sid']
@sid.setter
def sid(self, value):
self['sid'] = value
################################################################################
#
# STATION Classes: STATION[_ADJOINT] files and headers for SPECFEM3D Cart.
#
################################################################################
class StationHeader(SpecHeader):
def __init__(self,
name=None,
lat_yc=None,
lon_xc=None,
depth=None,
elevation=None,
network=None,
proj_id=0,
eid=0,
sid=0,
trid=0,
gid=0):
super(StationHeader,self).__init__(name=name,lat_yc=lat_yc,lon_xc=lon_xc,depth=depth,proj_id=proj_id,eid=eid,sid=sid)
if 32 < len(name):
raise Exception('Station.name cannot exceed 32 characters')
self['network'] = network
self['elevation'] = elevation
self['trid'] = trid # trace id
self['gid'] = gid # trace group id (trace decomp)
self['data_fqdn'] = None
self.comp_val = self.trid
def hash_val(self):
sup_hv = super().hash_val()
return np.sqrt(self.elevation**2 + self.trid**2 + self.gid**2 + sup_hv**2)
def eq_comparator(self):
return tuple(list(super().eq_comparator()) + [self.elevation])
@classmethod
def from_dict(cls,h_dict):
'''
Alternative constructor StationHeader
This constructor takes a :py:dict: object,
The ``h_dict`` argument must have 'key: value' pairs for:
* ``name``
* ``lat_yc``
* ``lon_xc``
* ``depth``
* ``elevation``
* ``network``
If the following 'key: value' pairs are not specfied, then
they will be set equal to 0:
* ``proj_id``
* ``eid``
* ``sid``
* ``trid``
* ``gid``
All other 'key: value' pairs will be added
'''
if not isinstance(h_dict,dict):
raise TypeError(f'arg: \'h_dict\' must be of type dict')
required_keys = ['name','lat_yc','lon_xc','depth','elevation','network']
# check and get required keys
args_dict = {}
for rkey in required_keys:
if rkey not in h_dict:
raise Exception(f'missing key: \'{rkey}\'')
else:
args_dict[rkey] = h_dict[rkey]
del h_dict[rkey]
# make StationHeader
new_header = StationHeader(**args_dict)
# insert additional key: value pairs
for key in h_dict:
new_header[key] = h_dict[key]
return new_header
@classmethod
def from_series(cls,h_series):
'''
Alternative constructor StationHeader
This constructor takes a :py:dict: object,
The ``h_series`` argument must have 'column names' for:
* ``name``
* ``lat_yc``
* ``lon_xc``
* ``depth``
* ``elevation``
* ``network``
If the following 'column names' are not specfied, then
their values will be set equal to 0:
* ``proj_id``
* ``eid``
* ``sid``
* ``trid``
* ``gid``
All other 'column names' will also be added
'''
if not isinstance(h_series,pd.Series):
raise TypeError(f'arg: \'h_series\' must be of type panda.Series')
return StationHeader.from_dict(h_series.to_dict())
@property
def network(self):
return self['network']
@network.setter
def network(self, value):
self['network'] = value
@property
def elevation(self):
return self['elevation']
@elevation.setter
def elevation(self, value):
self['elevation'] = value
@property
def trid(self):
return self['trid']
@trid.setter
def trid(self, value):
self['trid'] = value
@property
def gid(self):
return self['gid']
@gid.setter
def gid(self, value):
self['gid'] = value
@property
def data_fqdn(self):
return self['data_fqdn']
@data_fqdn.setter
def data_fqdn(self, value):
self['data_fqdn'] = value
################################################################################
#
# Source Classes: [CMT | FORCE]SOLUTION files and headers for SPECFEM3D Cart.
#
################################################################################
class SolutionHeader(SpecHeader):
def __init__(self,
name=None,
lat_yc=None,
lon_xc=None,
depth=None,
tshift=None,
date=None,
ename=None,
proj_id=0,
eid=0,
sid=0):
super(SolutionHeader,self).__init__(name=name,lat_yc=lat_yc,lon_xc=lon_xc,depth=depth,proj_id=proj_id,eid=eid,sid=sid)
self['tshift'] = tshift
self['date'] = date
self['ename'] = ename
self.comp_val = eid
def hash_val(self):
sup_hv = super().hash_val()
return np.sqrt(self.tshift**2 + sup_hv**2)
def eq_comparator(self):
return tuple(list(super().eq_comparator()) + [self.tshift])
@property
def tshift(self):
return self['tshift']
@tshift.setter
def tshift(self, value):
self['tshift'] = value
@property
def date(self):
return self['date']
@date.setter
def date(self, value):
self['date'] = value
@property
def ename(self):
return self['ename']
@ename.setter
def ename(self, value):
self['ename'] = value
@property
def depth_km(self):
return self['depth']/1000.0
class ForceSolutionHeader(SolutionHeader):
"""
FORCE 001
time shift: 0.0
f0: 0.0
latorUTM: 591000.0
longorUTM: 246000.0
depth: -100
factor force source: 1
component dir vect source E(x): 1
component dir vect source N(y): 0
component dir vect source Z_UP: 0
"""
def __init__(self,
ename=None,
lat_yc=None,
lon_xc=None,
depth=None,
tshift=None,
date=None,
f0=None,
factor_fs=None,
comp_src_EX=None, #FIXME: maybe comp_force_EX?
comp_src_NY=None,
comp_src_Zup=None,
proj_id=0,
eid=0,
sid=0):
#FIXME: make header name (kind of ugley way to do this)
hstr = ForceSolutionHeader.create_header_name(eid=eid,
sid=sid,
date=date,
lat_yc=lat_yc,
lon_xc=lon_xc,
depth=depth)
super(ForceSolutionHeader,self).__init__(name=hstr,
lat_yc=lat_yc,
lon_xc=lon_xc,
depth=depth,
tshift=tshift,
date=date,
ename=ename,
proj_id=proj_id,
eid=eid,
sid=sid)
self['f0'] = f0
self['factor_fs'] = factor_fs
self['comp_src_EX'] = comp_src_EX
self['comp_src_NY'] = comp_src_NY
self['comp_src_Zup'] = comp_src_Zup
def hash_val(self):
return np.sqrt(super().hash_val()**2 + self.f0**2)
def eq_comparator(self):
return tuple([*super().eq_comparator(), self.f0])
@staticmethod
def create_header_name(eid=None,sid=None,date=None,lat_yc=None,lon_xc=None,depth=None):
hstr = f'FORCE ' + str(sid).zfill(3)
hstr += f' {date.year} {date.month} {date.day} {date.hour} {date.minute} {date.second}'
hstr += f' {lat_yc} {lon_xc} {depth/1000} srcid_{eid}'
return hstr
@classmethod
def from_dict(cls,h_dict):
'''
Alternative constructor ForceSolutionHeader
This constructor takes a :py:dict: object,
The ``h_dict`` argument must have 'key: value' pairs for:
* ``ename``
* ``lat_yc``
* ``lon_xc``
* ``depth``
* ``tshift``
* ``date``
* ``f0``
* ``factor_fs``
* ``comp_src_EX``
* ``comp_src_NY``
* ``comp_src_Zup``
If the following 'key: value' pairs are not specfied, then
they will be set equal to 0:
* ``proj_id``
* ``eid``
* ``sid``
All other 'key: value' pairs will be added
'''
if not isinstance(h_dict,dict):
raise TypeError(f'arg: \'h_dict\' must be of type dict')
required_keys = ['ename',
'lat_yc','lon_xc','depth',
'tshift',
'date',
'f0',
'factor_fs',
'comp_src_EX','comp_src_NY','comp_src_Zup']
# check and get required keys
args_dict = {}
for rkey in required_keys:
if rkey not in h_dict:
raise Exception(f'missing key: \'{rkey}\'')
else:
args_dict[rkey] = h_dict[rkey]
del h_dict[rkey]
# make ForceSolutionHeader
new_header = ForceSolutionHeader(**args_dict)
# insert additional key: value pairs
for key in h_dict:
new_header[key] = h_dict[key]
return new_header
@classmethod
def from_series(cls,h_series):
'''
Alternative constructor ForceSolutionHeader
This constructor takes a :py:pandas:Series: object,
The ``h_series`` argument must have 'column names' for:
* ``ename``
* ``lat_yc``
* ``lon_xc``
* ``depth``
* ``tshift``
* ``date``
* ``f0``
* ``factor_fs``
* ``comp_src_EX``
* ``comp_src_NY``
* ``comp_src_Zup``
If the following 'column names' are not specfied, then
their values will be set equal to 0:
* ``proj_id``
* ``eid``
* ``sid``
All other 'column names' will be added
'''
if not isinstance(h_series,pd.Series):
raise TypeError(f'arg: \'h_series\' must be of type pandas.Series')
return ForceSolutionHeader.from_dict(h_series.to_dict())
@property
def f0(self):
return self['f0']
@f0.setter
def f0(self, value):
self['f0'] = value
@property
def factor_fs(self):
return self['factor_fs']
@factor_fs.setter
def factor_fs(self, value):
self['factor_fs'] = value
@property
def comp_src_EX(self):
return self['comp_src_EX']
@comp_src_EX.setter
def comp_src_EX(self, value):
self['comp_src_EX'] = value
@property
def comp_src_NY(self):
return self['comp_src_NY']
@comp_src_NY.setter
def comp_src_NY(self, value):
self['comp_src_NY'] = value
@property
def comp_src_Zup(self):
return self['comp_src_Zup']
@comp_src_Zup.setter
def comp_src_Zup(self, value):
self['comp_src_Zup'] = value
class CMTSolutionHeader(SolutionHeader):
"""
PDE 1999 01 01 00 00 00.00 23000 59000 -25000 4.2 4.2 homog_test
event name: test
time shift: 0.0
half duration: 0.0
latorUTM: 596000.0
longorUTM: 241000.0
depth: -2700
Mrr: 0
Mtt: 0
Mpp: 0
Mrt: 0
Mrp: 0
Mtp: -10000000.0
"""
def __init__(self,
ename=None,
lat_yc=None,
lon_xc=None,
depth=None,
tshift=None,
date=None,
hdur=None,
mt=None,
proj_id=0,
eid=0,
sid=0):
from pyaspect.moment_tensor import MomentTensor
if not isinstance(mt,MomentTensor):
raise TypeError('mt must be of type pyaspect.moment_tensor.MomentTensor')
hstr = f'PDE {date.year} {date.month} {date.day} {date.hour} {date.minute} {date.second}'
hstr += f' {lat_yc} {lon_xc} {depth/1000.0} {mt.magnitude} 0 srcid_{eid}'
super(CMTSolutionHeader,self).__init__(name=hstr,
lat_yc=lat_yc,
lon_xc=lon_xc,
depth=depth,
tshift=tshift,
date=date,
ename=ename,
proj_id=proj_id,
eid=eid,
sid=sid)
self['hdur'] = hdur
self['strike'] = mt.strike
self['dip'] = mt.dip
self['rake'] = mt.rake
self['mw'] = mt.magnitude
self['mrr'] = mt.m6_up_south_east()[0]
self['mtt'] = mt.m6_up_south_east()[1]
self['mpp'] = mt.m6_up_south_east()[2]
self['mrt'] = mt.m6_up_south_east()[3]
self['mrp'] = mt.m6_up_south_east()[4]
self['mtp'] = mt.m6_up_south_east()[5]
'''
self['mrr'] = mt.harvard_dcm_m6()[0]
self['mtt'] = mt.harvard_dcm_m6()[1]
self['mpp'] = mt.harvard_dcm_m6()[2]
self['mrt'] = mt.harvard_dcm_m6()[3]
self['mrp'] = mt.harvard_dcm_m6()[4]
self['mtp'] = mt.harvard_dcm_m6()[5]
'''
def hash_val(self):
hlist = self.mt
hlist.append(self.hdur)
hlist.append(super().hash_val())
return np.sqrt(np.sum(np.array(hlist)))
def eq_comparator(self):
hlist = self.mt
hlist.append(self.hdur)
hlist += [*super().eq_comparator()]
return tuple(hlist)
@classmethod
def from_dict(cls,h_dict):
'''
Alternative constructor CMTSolutionHeader
This constructor takes a :py:dict: object,
The ``h_dict`` argument must have 'key: value' pairs for:
* ``ename``
* ``lat_yc``
* ``lon_xc``
* ``depth``
* ``tshift``
* ``date``
* ``hdur``
* ``mrr``
* ``mtt``
* ``mpp``
* ``mrt``
* ``mrp``
* ``mtp``
If the following 'key: value' pairs are not specfied, then
they will be set equal to 0:
* ``proj_id``
* ``eid``
* ``sid``
All other 'key: value' pairs will be added
'''
if not isinstance(h_dict,dict):
raise TypeError(f'arg: \'h_dict\' must be of type dict')
required_keys = ['ename',
'lat_yc','lon_xc','depth',
'tshift',
'date',
'hdur']
required_mt_keys = ['mrr',
'mtt',
'mpp',
'mrt',
'mrp',
'mtp']
# check and get required keys
args_dict = {}
for rkey in required_keys:
if rkey not in h_dict:
raise Exception(f'missing key: \'{rkey}\'')
else:
args_dict[rkey] = h_dict[rkey]
del h_dict[rkey]
# check and get required moment-tensor keys
mt_args_dict = {}
for rkey in required_mt_keys:
if rkey not in h_dict:
raise Exception(f'missing mt-key: \'{rkey}\'')
else:
mt_args_dict[rkey] = h_dict[rkey]
del h_dict[rkey]
# add moment tensor
'''
args_dict['mt'] = MomentTensor(mw=mt_args_dict['mw'],
strike=mt_args_dict['strike'],
dip=mt_args_dict['dip'],
rake=mt_args_dict['rake'])
'''
mrr = mt_args_dict['mrr']
mtt = mt_args_dict['mtt']
mpp = mt_args_dict['mpp']
mrt = mt_args_dict['mrt']
mrp = mt_args_dict['mrp']
mtp = mt_args_dict['mtp']
h_matrix = np.array([[mrr,mrt,mrp],[mrt,mtt,mtp],[mrp,mtp,mpp]])
args_dict['mt'] = MomentTensor(m_up_south_east=h_matrix)
# make CMTSolutionHeader
new_header = CMTSolutionHeader(**args_dict)
# insert additional key: value pairs
for key in h_dict:
new_header[key] = h_dict[key]
return new_header
@classmethod
def from_series(cls,h_series):
'''
Alternative constructor CMTSolutionHeader
This constructor takes a :py:pandas:Series: object,
The ``h_series`` argument must have 'column names' for:
* ``ename``
* ``lat_yc``
* ``lon_xc``
* ``depth``
* ``tshift``
* ``date``
* ``hdur``
* ``strike``
* ``dip``
* ``rake``
* ``mw``
If the following 'column names' are not specfied, then
their values will be set equal to 0:
* ``proj_id``
* ``eid``
* ``sid``
All other 'column names' will be added
'''
if not isinstance(h_series,pd.Series):
raise TypeError(f'arg: \'h_series\' must be of type pandas.Series')
return CMTSolutionHeader.from_dict(h_series.to_dict())
@property
def date(self):
return self['date']
@date.setter
def date(self, value):
self['date'] = value
@property
def hdur(self):
return self['hdur']
@hdur.setter
def hdur(self, value):
self['hdur'] = value
@property
def mt(self):
return [self.mrr,self.mtt,self.mpp,self.mrt,self.mrp,self.mtp]
@property
def strike(self):
return self['strike']
@strike.setter
def strike(self, value):
self['strike'] = value
@property
def dip(self):
return self['dip']
@dip.setter
def dip(self, value):
self['dip'] = value
@property
def rake(self):
return self['rake']
@rake.setter
def rake(self, value):
self['rake'] = value
#no setter for Mw but there is for mw
@property
def Mw(self):
return self['mw']
@property
def mw(self):
return self['mw']
@mw.setter
def mw(self, value):
self['mw'] = value
@property
def mrr(self):
return self['mrr']
@mrr.setter
def mrr(self, value):
self['mrr'] = value
@property
def mtt(self):
return self['mtt']
@mtt.setter
def mtt(self, value):
self['mtt'] = value
@property
def mpp(self):
return self['mpp']
@mpp.setter
def mpp(self, value):
self['mpp'] = value
@property
def mrt(self):
return self['mrt']
@mrt.setter
def mrt(self, value):
self['mrt'] = value
@property
def mrp(self):
return self['mrp']
@mrp.setter
def mrp(self, value):
self['mrp'] = value
@property
def mtp(self):
return self['mtp']
@mtp.setter
def mtp(self, value):
self['mtp'] = value
################################################################################
#
# Record Header for pairing SPECFEM SOLUTIONS with STATIONS
#
################################################################################
#TODO this is the actual record. I need the header, then make record.py
class RecordHeader(Header):
def __init__(self,name=None,solutions_h=None,stations_h=None,proj_id=0,rid=0,iter_id=0,is_reciprocal=False):
super(RecordHeader,self).__init__(name=name)
l_solutions_h = solutions_h
if not isinstance(solutions_h,list):
l_solutions_h = [solutions_h]
check_all = False
check_all = all(isinstance(s,SolutionHeader) for s in l_solutions_h)
if not check_all:
raise Exception('elements in arg: \'solutions_h\' must be of type SolutionHeader')
check_s = l_solutions_h[0]
check_all = all(type(s) == type(check_s) for s in l_solutions_h)
if not check_all:
raise Exception('all elements in arg: \'solutions_h\' must be of type {type(check_s)}')
l_stations_h = stations_h
if not isinstance(stations_h,list):
l_stations_h = [stations_h]
check_all = all(isinstance(s,StationHeader) for s in l_stations_h)
if not check_all:
raise Exception('elements in arg: \'stations_h\' must be of type StationHeader')
self.comp_val = rid
self['proj_id'] = proj_id
self['rid'] = rid
self['iter_id'] = iter_id
self['is_reciprocal'] = is_reciprocal
#FIXME: see below. is this a good/safe trick?
self._solution_mod_name = l_solutions_h[0].__module__
self._solution_cls_name = l_solutions_h[0].__class__.__name__
self._station_mod_name = l_stations_h[0].__module__
self._station_cls_name = l_stations_h[0].__class__.__name__
self['default_solu_midx'] = ('eid','sid')
self['default_stat_midx'] = ('eid','sid','trid','gid')
smidx = list(self['default_solu_midx'])
rmidx = list(self['default_stat_midx'])
self['solutions_df'] = pd.DataFrame.from_records(l_solutions_h, index=smidx)
self['stations_df'] = pd.DataFrame.from_records(l_stations_h, index=rmidx)
l_nsrc_idx = []
for idx, df in self.solutions_df.groupby(level='eid'):
nsids = df.index.get_level_values('sid').unique()
l_nsrc_idx.append(nsids)
l_nrec_idx = []
for idx, df in self.stations_df.groupby(level='eid'):
nsids = df.index.get_level_values('sid').unique()
l_nrec_idx.append(nsids)
if len(l_nsrc_idx) != len(l_nrec_idx):
raise Exception('Number of events does not match between Solutions and Stations')
self['nevents'] = len(l_nsrc_idx)
#use different num_sids so that they can be checked
ns_solu = None
ns_stat = None
prev_solu = len(l_nsrc_idx[0])
for ie in range(len(l_nsrc_idx)):
ns_solu = len(l_nsrc_idx[ie])
ns_stat = len(l_nrec_idx[ie])
if ns_solu != ns_stat:
raise Exception(f'For event-{ie}: number of sid\'s differs between Solutions and Stations')
#this check enforces that "batch" src size is the same per event
if ns_solu != prev_solu:
raise Exception(f'Number sid\'s differs between events')
for isrc in range(len(l_nsrc_idx[ie])):
if l_nsrc_idx[ie][isrc] != l_nrec_idx[ie][isrc]:
raise Exception('src-id mismatch between Solutions and Stations')
if not isinstance(ns_solu,int):
raise Exception('class field "ns_solu" must be an int type')
self['nsrc'] = ns_solu
self['added_solution_header_words'] = []
self['added_station_header_words'] = []
def hash_val(self):
return np.sqrt(self.proj_id**2 + self.rid**2 + self.iter_id**2)
def comparator(self):
return self.comp_val
def eq_comparator(self,other):
are_solutions_eq = self.solutions_df.equals(other.solutions_df)
are_stations_eq = self.stations_df.equals(other.stations_df)
are_ids_eq = (self.proj_id == other.proj_id and
self.rid == other.rid and
self.iter_id == other.iter_id)
return are_solutions_eq and are_stations_eq and are_ids_eq
def __eq__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
req = self.eq_comparator(other)
return (self.eq_comparator(other))
def __ne__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return (not self.eq_comparator(other))
def __str__(self):
out_str = f'Solution Header(s):\n{self.solutions_df}\n\n'
out_str += f'Station Header(s):\n {self.stations_df}'
return out_str
def __repr__(self):
out_str = f'Solution Header(s):\n{self.solutions_df.__repr__()}\n\n'
out_str += f'Station Header(s):\n {self.stations_df.__repr__()}'
return out_str
# Helper function turns a sequence of boolean indices into
# a single boolean index for a dataframe
def _slice_to_bool(self,l_slice):
if not isinstance(l_slice,list):
raise Exception('arugment must be a list type')
if len(l_slice) == 0:
raise Exception('arugment must be a list of at least len=1')
if len(l_slice) == 1:
return l_slice[0]
else:
ibool = l_slice[0]
for i in range(1,len(l_slice)):
ibool = ibool & l_slice[i]
return ibool
# Helper function needed for make dataframe slices that return
# another dataframe instead of a Series even if only one row is
# the result of the slice
def _convert_slice(self,kslice,smax):
start = kslice.start
stop = kslice.stop
step = kslice.step
if kslice.start == None:
start = 0
if kslice.stop == None:
stop = smax
if kslice.step == None:
step = 1
return slice(start,stop,step)
# Helper function for slicing the dataframes
# Assumes that solutions_df has 2D multiindex: order=[eid,sid]
# Assumes that stations_df has 4D multiindex: order=[eid,sid,trid,gid]
def _get_df_slice_index(self,kslice,df,is_stations=False):
_df = df.copy()
nevents = _df.index.get_level_values('eid').nunique()
nsrcs = _df.index.get_level_values('sid').nunique()
ntrs = None
ngids = None
if is_stations:
ntrs = _df.index.get_level_values('trid').nunique()
ngids = _df.index.get_level_values('gid').nunique()
_df.reset_index(inplace=True)
l_slice = []
if isinstance(kslice,tuple):
if isinstance(kslice[0],int):
if kslice[0] < 0:
raise KeyError(kslice[0])
l_slice.append((_df['eid'] == kslice[0]))
else:
s = self._convert_slice(kslice[0],nevents)
l_slice.append( ((_df['eid'] >= s.start) &
(_df['eid'] < s.stop) &
(_df['eid']%s.step == 0)) )
if isinstance(kslice[1],int):
if kslice[1] < 0:
raise KeyError(kslice[1])
l_slice.append((_df['sid'] == kslice[1]))
else:
s = self._convert_slice(kslice[1],nsrcs)
l_slice.append( ((_df['sid'] >= s.start) &
(_df['sid'] < s.stop) &
(_df['sid']%s.step == 0)) )
if 3 <= len(kslice):
if isinstance(kslice[2],int):
if kslice[2] < 0:
raise KeyError(kslice[2])
l_slice.append((_df['trid'] == kslice[2]))
else:
s = self._convert_slice(kslice[2],ntrs)
l_slice.append( ((_df['trid'] >= s.start) &
(_df['trid'] < s.stop) &
(_df['trid']%s.step == 0)) )
if 4 == len(kslice):
if isinstance(kslice[3],int):
if kslice[3] < 0:
raise KeyError(kslice[3])
l_slice.append((_df['gid'] == kslice[3]))
else:
s = self._convert_slice(kslice[3],ngids)
l_slice.append( ((_df['gid'] >= s.start) &
(_df['gid'] < s.stop) &
(_df['gid']%s.step == 0)) )
if 4 < len(kslice):
raise Exception('too many indexers')
elif isinstance(kslice,int):
if kslice < 0:
raise KeyError(kslice)
l_slice.append((_df['eid'] == kslice))
elif isinstance(kslice,slice):
s = self._convert_slice(kslice,nevents)
l_slice.append(((_df['eid'] >= s.start) &
(_df['eid'] < s.stop) &
(_df['eid']%s.step == 0)))
else:
raise TypeError('wrong indexer type')
return self._slice_to_bool(l_slice)
# helper funciton for slicing the solutions_df
def _get_solutions_df_slice_index(self,kslice):
if isinstance(kslice,tuple):
if len(kslice) < 2 or 4 < len(kslice):
raise Exception('too many indexers for solutions_df')
else:
return self._get_df_slice_index(kslice[0:2],self.solutions_df,is_stations=False)
else:
return self._get_df_slice_index(kslice,self.solutions_df,is_stations=False)
# helper funciton for slicing the stations_df
def _get_stations_df_slice_index(self,kslice):
if isinstance(kslice,tuple):
if len(kslice) < 2 or 4 < len(kslice):
raise Exception('incorrect number of indexers for stations_df indexer')
else:
return self._get_df_slice_index(kslice,self.stations_df,is_stations=True)
else:
return self._get_df_slice_index(kslice,self.stations_df,is_stations=True)
# This will act like a dictionary if kslice is a string.
# Otherwise, it will act like slicing and return a NEW
# "sliced" RecordHeader
def __getitem__(self, kslice):
if isinstance(kslice, str):
return super(RecordHeader, self).__getitem__(kslice)
# get sliced dataframes
solu_slice_idx = self._get_solutions_df_slice_index(kslice)
stat_slice_idx = self._get_stations_df_slice_index(kslice)
c_solu_df = self.solutions_df.reset_index()[solu_slice_idx]
c_stat_df = self.stations_df.reset_index()[stat_slice_idx]
# make list of solution headers
#HeaderCls = self._get_header_class(is_stations=False)
HeaderCls = self.solution_cls
'''
print(f'IN RecordHeader: check c_solution_df type:{type(c_solu_df)}')
print(f'IN RecordHeader: _solution_mod_name:{self._solution_mod_name}')
print(f'IN RecordHeader: _solution_cls_name:{self._solution_cls_name}')
print(f'IN RecordHeader: SolHeaderCls:{HeaderCls}')
print(f'IN RecordHeader: isinstance(SolHeaderCls):{isinstance(HeaderCls,CMTSolutionHeader)}')
if self._solution_cls_name == 'CMTSolutionHeader':
print('***IN -- IS-Type')
for index, row in c_solu_df.iterrows():
print(f'index,row["mw"]:\n{index}\n{row["mw"]}')
'''
slice_sol_h = [HeaderCls.from_series(row) for index, row in c_solu_df.iterrows()]
# make list of station headers
#HeaderCls = self._get_header_class(is_stations=True)
HeaderCls = self.station_cls
slice_stat_h = [HeaderCls.from_series(row) for index, row in c_stat_df.iterrows()]
#make new record which is like it had been sliced
return RecordHeader(name=self.name,
solutions_h=slice_sol_h,
stations_h=slice_stat_h,
proj_id=self.proj_id,
rid=self.rid,
iter_id=self.iter_id)
# Helper function returns a "default-index" dataframe
# i.e. flattens the dataframe index -> multiindex-cols -> data-cols
def _get_reset_df(self,key=None,value=None,is_stations=True):
c_df = None
if is_stations:
c_df = self.stations_df.copy()
else:
c_df = self.solutions_df.copy()
c_df.reset_index(inplace=True)
if key != None:
if key not in c_df.columns:
raise KeyError('key: {key} is not a column in stations_df')
#FIXME: find a way to check dtype of value and those in columns
c_df = c_df.loc[c_df[key] == value]
return c_df
def _get_header_class(self,is_stations=True):
HeaderCls = None
if is_stations:
HeaderCls = getattr(importlib.import_module(self._station_mod_name), self._station_cls_name)
else:
HeaderCls = getattr(importlib.import_module(self._solution_mod_name), self._solution_cls_name)
return HeaderCls
def _get_list_from_df(self, key=None, value=None, is_stations=True):
c_df = self._get_reset_df(key=key,value=value,is_stations=is_stations)
#FIXME: Q. Is this a good trick or an ugly trick?
# A. It's a trick.
HeaderCls = self._get_header_class(is_stations=is_stations)
header_list = [HeaderCls.from_series(row) for index, row in c_df.iterrows()]
del c_df
return header_list
def get_solutions_header_list(self, key=None, value=None):
return self._get_list_from_df(key=key,value=value,is_stations=False)
def get_stations_header_list(self, key=None, value=None):
return self._get_list_from_df(key=key,value=value,is_stations=True)
def _add_header_word(self, key, h_values, is_stations=True):
if not isinstance(key,str):
raise ValueError('arg: \'key\' must be a str type')
if not isinstance(h_values,list):
raise ValueError('arg: \'h_values\' must be a list type')
if is_stations:
if len(h_values) != len(self.stations_df.index):
raise Exception('len(\'h_values\') must equal number of stations')
self.added_station_header_words.append(key)
self.stations_df[key] = h_values
else:
if len(h_values) != len(self.solutions_df.index):
raise Exception('len(\'h_values\') must equal number of solutions')
self.added_solution_header_words.append(key)
self.solutions_df[key] = h_values
def add_station_header_word(self, key, h_values):
self._add_header_word(key=key,h_values=h_values,is_stations=True)
def add_solution_header_word(self, key, h_values):
self._add_header_word(key=key,h_values=h_values,is_stations=False)
def get_event_nsolutions(self,ievent):
idx = pd.IndexSlice
ns_solu = self.solutions_df.loc[idx[:,ievent,:],:].index.get_level_values('sid').nunique()
ns_stat = self.stations_df.loc[idx[:,ievent,:,:,:],:].index.get_level_values('sid').nunique()
if ns_stat != ns_solu:
raise Exception(f'For event-{ievent}: number of sid\'s differs between Solutions and Stations')
return ns_stat
def reset_midx(self):
self.reset_solutions_midx()
self.reset_stations_midx()
def reset_solutions_midx(self):
self.solutions_df.reset_index(inplace=True)
def reset_stations_midx(self):
self.stations_df.reset_index(inplace=True)
def set_default_midx(self):
self.set_default_solutions_midx()
self.set_default_stations_midx()
def set_default_solutions_midx(self):
smidx = list(self['default_solu_midx'])
self.solutions_df.set_index(smidx,inplace=True)
def set_default_stations_midx(self):
rmidx = list(self['default_stat_midx'])
self.stations_df.set_index(rmidx,inplace=True)
@property
def proj_id(self):
return self['proj_id']
@property
def rid(self):
return self['rid']
@property
def iter_id(self):
return self['iter_id']
@property
def is_reciprocal(self):
return self['is_reciprocal']
@is_reciprocal.setter
def is_reciprocal(self, value):
self['is_reciprocal'] = value
@property
def solution_cls(self):
return self._get_header_class(is_stations=False)
@property
def station_cls(self):
return self._get_header_class(is_stations=True)
@property
def solutions_df(self):
return self['solutions_df']
@property
def stations_df(self):
return self['stations_df']
@property
def added_solution_header_words(self):
return self['added_solution_header_words']
@property
def added_station_header_words(self):
return self['added_station_header_words']
@property
def nevents(self):
return self['nevents']
@property
def nsrc(self):
return self['nsrc']
| [
"iamagoofymonkey@gmail.com"
] | iamagoofymonkey@gmail.com |
85f269b48f07a744203cc249f5dd3ef1050b583f | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/resources/azure-mgmt-msi/azure/mgmt/msi/_managed_service_identity_client.py | 46a47731fc02b8a9f4b3bc9caa1601be24b04373 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 10,721 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from azure.profiles import KnownProfiles, ProfileDefinition
from azure.profiles.multiapiclient import MultiApiClientMixin
from ._configuration import ManagedServiceIdentityClientConfiguration
from ._serialization import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class _SDKClient(object):
def __init__(self, *args, **kwargs):
"""This is a fake class to support current implemetation of MultiApiClientMixin."
Will be removed in final version of multiapi azure-core based client
"""
pass
class ManagedServiceIdentityClient(MultiApiClientMixin, _SDKClient):
"""The Managed Service Identity Client.
This ready contains multiple API versions, to help you deal with all of the Azure clouds
(Azure Stack, Azure Government, Azure China, etc.).
By default, it uses the latest API version available on public Azure.
For production, you should stick to a particular api-version and/or profile.
The profile sets a mapping between an operation group and its API version.
The api-version parameter sets the default API version if the operation
group is not described in the profile.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The Id of the Subscription to which the identity belongs. Required.
:type subscription_id: str
:param api_version: API version to use if no profile is provided, or if missing in profile.
:type api_version: str
:param base_url: Service URL
:type base_url: str
:param profile: A profile definition, from KnownProfiles to dict.
:type profile: azure.profiles.KnownProfiles
"""
DEFAULT_API_VERSION = '2023-01-31'
_PROFILE_TAG = "azure.mgmt.msi.ManagedServiceIdentityClient"
LATEST_PROFILE = ProfileDefinition({
_PROFILE_TAG: {
None: DEFAULT_API_VERSION,
}},
_PROFILE_TAG + " latest"
)
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
api_version: Optional[str]=None,
base_url: str = "https://management.azure.com",
profile: KnownProfiles=KnownProfiles.default,
**kwargs: Any
):
self._config = ManagedServiceIdentityClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
super(ManagedServiceIdentityClient, self).__init__(
api_version=api_version,
profile=profile
)
@classmethod
def _models_dict(cls, api_version):
return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)}
@classmethod
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2018-11-30: :mod:`v2018_11_30.models<azure.mgmt.msi.v2018_11_30.models>`
* 2021-09-30-preview: :mod:`v2021_09_30_preview.models<azure.mgmt.msi.v2021_09_30_preview.models>`
* 2022-01-31-preview: :mod:`v2022_01_31_preview.models<azure.mgmt.msi.v2022_01_31_preview.models>`
* 2023-01-31: :mod:`v2023_01_31.models<azure.mgmt.msi.v2023_01_31.models>`
"""
if api_version == '2018-11-30':
from .v2018_11_30 import models
return models
elif api_version == '2021-09-30-preview':
from .v2021_09_30_preview import models
return models
elif api_version == '2022-01-31-preview':
from .v2022_01_31_preview import models
return models
elif api_version == '2023-01-31':
from .v2023_01_31 import models
return models
raise ValueError("API version {} is not available".format(api_version))
@property
def federated_identity_credentials(self):
"""Instance depends on the API version:
* 2022-01-31-preview: :class:`FederatedIdentityCredentialsOperations<azure.mgmt.msi.v2022_01_31_preview.operations.FederatedIdentityCredentialsOperations>`
* 2023-01-31: :class:`FederatedIdentityCredentialsOperations<azure.mgmt.msi.v2023_01_31.operations.FederatedIdentityCredentialsOperations>`
"""
api_version = self._get_api_version('federated_identity_credentials')
if api_version == '2022-01-31-preview':
from .v2022_01_31_preview.operations import FederatedIdentityCredentialsOperations as OperationClass
elif api_version == '2023-01-31':
from .v2023_01_31.operations import FederatedIdentityCredentialsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'federated_identity_credentials'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def operations(self):
"""Instance depends on the API version:
* 2018-11-30: :class:`Operations<azure.mgmt.msi.v2018_11_30.operations.Operations>`
* 2021-09-30-preview: :class:`Operations<azure.mgmt.msi.v2021_09_30_preview.operations.Operations>`
* 2022-01-31-preview: :class:`Operations<azure.mgmt.msi.v2022_01_31_preview.operations.Operations>`
* 2023-01-31: :class:`Operations<azure.mgmt.msi.v2023_01_31.operations.Operations>`
"""
api_version = self._get_api_version('operations')
if api_version == '2018-11-30':
from .v2018_11_30.operations import Operations as OperationClass
elif api_version == '2021-09-30-preview':
from .v2021_09_30_preview.operations import Operations as OperationClass
elif api_version == '2022-01-31-preview':
from .v2022_01_31_preview.operations import Operations as OperationClass
elif api_version == '2023-01-31':
from .v2023_01_31.operations import Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'operations'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def system_assigned_identities(self):
"""Instance depends on the API version:
* 2018-11-30: :class:`SystemAssignedIdentitiesOperations<azure.mgmt.msi.v2018_11_30.operations.SystemAssignedIdentitiesOperations>`
* 2021-09-30-preview: :class:`SystemAssignedIdentitiesOperations<azure.mgmt.msi.v2021_09_30_preview.operations.SystemAssignedIdentitiesOperations>`
* 2022-01-31-preview: :class:`SystemAssignedIdentitiesOperations<azure.mgmt.msi.v2022_01_31_preview.operations.SystemAssignedIdentitiesOperations>`
* 2023-01-31: :class:`SystemAssignedIdentitiesOperations<azure.mgmt.msi.v2023_01_31.operations.SystemAssignedIdentitiesOperations>`
"""
api_version = self._get_api_version('system_assigned_identities')
if api_version == '2018-11-30':
from .v2018_11_30.operations import SystemAssignedIdentitiesOperations as OperationClass
elif api_version == '2021-09-30-preview':
from .v2021_09_30_preview.operations import SystemAssignedIdentitiesOperations as OperationClass
elif api_version == '2022-01-31-preview':
from .v2022_01_31_preview.operations import SystemAssignedIdentitiesOperations as OperationClass
elif api_version == '2023-01-31':
from .v2023_01_31.operations import SystemAssignedIdentitiesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'system_assigned_identities'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def user_assigned_identities(self):
"""Instance depends on the API version:
* 2018-11-30: :class:`UserAssignedIdentitiesOperations<azure.mgmt.msi.v2018_11_30.operations.UserAssignedIdentitiesOperations>`
* 2021-09-30-preview: :class:`UserAssignedIdentitiesOperations<azure.mgmt.msi.v2021_09_30_preview.operations.UserAssignedIdentitiesOperations>`
* 2022-01-31-preview: :class:`UserAssignedIdentitiesOperations<azure.mgmt.msi.v2022_01_31_preview.operations.UserAssignedIdentitiesOperations>`
* 2023-01-31: :class:`UserAssignedIdentitiesOperations<azure.mgmt.msi.v2023_01_31.operations.UserAssignedIdentitiesOperations>`
"""
api_version = self._get_api_version('user_assigned_identities')
if api_version == '2018-11-30':
from .v2018_11_30.operations import UserAssignedIdentitiesOperations as OperationClass
elif api_version == '2021-09-30-preview':
from .v2021_09_30_preview.operations import UserAssignedIdentitiesOperations as OperationClass
elif api_version == '2022-01-31-preview':
from .v2022_01_31_preview.operations import UserAssignedIdentitiesOperations as OperationClass
elif api_version == '2023-01-31':
from .v2023_01_31.operations import UserAssignedIdentitiesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'user_assigned_identities'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
def close(self):
self._client.close()
def __enter__(self):
self._client.__enter__()
return self
def __exit__(self, *exc_details):
self._client.__exit__(*exc_details)
| [
"noreply@github.com"
] | Azure.noreply@github.com |
f7e6341da4bc292e5510cb94accb12cdaf6b6f82 | 9b014785a72075980509c95e1cccd7bef23f677a | /source/MultiAgent/Regular/QLearning.py | 03a9b1cd25dd0cf78b226c21de06021e08d48273 | [] | no_license | camielv/UvA-MasterAI-AA | 65ad4f0d7120407989c1cb28010f6584aee5571f | a1bc1f82f2824055d3adcd0c33105556aa4099a8 | refs/heads/master | 2021-01-21T12:26:43.782401 | 2012-10-21T20:52:42 | 2012-10-21T20:52:42 | 5,686,236 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,593 | py | # Assignment: MultiAgent Planning/Learning
# Course: Autonomous Agents 2012-2013
# Education: Master Artificial Intelligence
# By: Steven Laan
# Auke Wiggers
# Camiel Verschoor
#
# File: QLearning.py
# Description: Implementation of Q-learning
from itertools import izip
from collections import defaultdict
import numpy
argmax = lambda d: max( izip( d.itervalues(), d.iterkeys() ) )[1]
class QLearning():
'''
Implementation of functions related to Q-learning.
'''
def __init__(self,
Agent,
alpha,
gamma,
epsilon):
'''
Fill all values of Q based on a given optimistic value.
'''
# Set the agent for this QLearning session
self.Agent = Agent
self.alpha = alpha
self.gamma = gamma
self.epsilon = epsilon
default = lambda : dict(zip([action for action in self.Agent.actions],
[numpy.float(0.0) for action in self.Agent.actions]))
self.Q = defaultdict(default)
def updateQ(self, s, a, s_prime, r):
'''
Perform one step for this agent for a given state s. Action, resulting
state s_prime, and observed reward r are also given.
'''
max_Q = self.Q[s_prime][argmax( self.Q[s_prime] )]
# Update Q. Q[s][a] should already be known to us.
self.Q[s][a] += self.alpha * (r + self.gamma * max_Q - self.Q[s][a]) | [
"wiggers.auke@gmail.com"
] | wiggers.auke@gmail.com |
7d8b4d71d0fd044530ce9b323e6fe0fae7e27d31 | 5f42b679f5a045f58a2434aeb7d28ea424a3e6d9 | /image_manager/manager.py | a84ded99a899e507b485302e94cdb77a01d36ff2 | [] | no_license | AddMob/django-image-manager | 4e623cde149c33e18943e5938dca92edfba528e2 | 43693f7ec7a1ea3524c42ed4c263a563523f6ef4 | refs/heads/master | 2020-04-03T17:51:16.369024 | 2013-07-11T23:04:09 | 2013-07-11T23:04:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | #coding: utf-8
from django.conf import settings
import glob
import os
def add(image, address, name):
try:
path_dir = settings.MEDIA_IMAGES + address
image_name, image_type = name.split('.')
path = path_dir + '/' + image_name
if not os.path.exists(path_dir):
os.makedirs(path_dir)
files = glob.glob(path + '*')
len_files = len(files)
if (len_files >= 1):
path += '_' + str(len_files + 1) + '.' + image_type
else:
path += '_1.' + image_type
destination = open(path, 'wb+')
for chunk in image.chunks():
destination.write(chunk)
destination.close()
return True
except:
return False
def load(address, name):
path_image = settings.MEDIA_IMAGES + address + '/' + name
if os.path.isfile(path_image):
return file(path_image, 'rb').read()
else:
return None
def list(address, name):
path_image = settings.MEDIA_IMAGES + address + '/' + name
images = glob.glob(path_image + '*')
list_images = []
for image in images:
list_images.append(image.split('/' + address + '/')[1])
return list_images
def delete(address, name):
path_image = settings.MEDIA_IMAGES + address + '/' + name
if os.path.isfile(path_image):
os.remove(path_image)
return True
return False | [
"charles.garrocho@gmail.com"
] | charles.garrocho@gmail.com |
5ee148fc1bf9ebadfcb9c8ca1374d2effa2d2039 | 72540dd6c566b62024995ed15082cf828f613a66 | /getstatus.py | fb76de5856e5247f2f8f3a81627e0bdb722d7753 | [
"MIT"
] | permissive | schwark/smartthings-hunterdouglasplatinum | 61a182206572c43c939dc6b3d97fdf497dfb57e8 | e9826f3bec648ac41c5e18422da127eca2ebbd29 | refs/heads/master | 2021-06-01T16:00:48.892429 | 2020-01-23T20:33:55 | 2020-01-23T20:33:55 | 40,951,612 | 9 | 8 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | import socket
import sys
HD_GATEWAY_PORT = 522
TIMEOUT = 10
def create_socket(server):
try:
sock = socket.create_connection((server, HD_GATEWAY_PORT), timeout=TIMEOUT)
helo = recv_until(sock, 'Shade Controller')
except socket.error:
sock.close()
sock = None
return sock
def socket_com(server, message, sentinel=None, sock=None):
content = None
try:
if not sock:
sock = create_socket(server)
sock.sendall(message)
content = recv_until(sock, sentinel)
except socket.error:
pass
finally:
if sock:
sock.close()
return content
def recv_until(sock, sentinel=None):
info = ""
while True:
try:
chunk = sock.recv(1)
except socket.timeout:
break
info += chunk
if info.endswith(sentinel): break
if not chunk: break
return info
def get_status(server):
return socket_com(server, "$dat", "upd01-")
def main():
if(len(sys.argv) < 2):
print "Usage: ",sys.argv[0],"<gateway-ip>"
else:
print get_status(sys.argv[1])
if __name__ == "__main__":
main()
| [
"schwark@alum.rpi.edu"
] | schwark@alum.rpi.edu |
787e695b02a9594ddb9c2cb122ed87808a9100f6 | 5bd46410c2dc4113e4003ea535e65b93da44dc4a | /HW4/test.py | b44858d6192361875544b9ae2677bd24d51e70b9 | [] | no_license | RyanHiatt/Becca_HW | 70fa6a3d8673384ce1a4138ba22bde9caf07f1d6 | 177b5a6a3ca146439de980f7d540c08394653e36 | refs/heads/master | 2023-04-19T22:14:25.018707 | 2021-05-05T01:09:52 | 2021-05-05T01:09:52 | 362,554,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | import numpy as np
power = 1
A = np.zeros((power + 1, power + 1)) # make a square array of zeros
b = np.zeros(power + 1)
print(A)
print(b)
| [
"rhiatt614@gmail.com"
] | rhiatt614@gmail.com |
a298ee76966af957524f81072e00c40d7a0df10e | f6d3a6562f18e1a8ec14d038b228dc43652a3b35 | /早期/learning python.py | 59ef362f99f720a3ad3ae528ac145b5413679e0f | [] | no_license | huangno27/learn | be19a65744b588b296961572e8a5292f1230332f | 93fe784a3127e76995e9ae018605efbe78238385 | refs/heads/master | 2020-04-26T12:52:07.692473 | 2019-03-10T10:31:55 | 2019-03-10T10:31:55 | 173,563,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | #!/use/bin/env python 3.7
def in_fridge():
try:
count = fridge[wanted_food]
except KeyError:
count = 0
return count
import random
secret = random.randint(1,10)
temp = input("猜猜看:")
guess = int(temp)
while guess != secret:
temp = input("猜错了,再来一次:")
guess = int(temp)
if guess == secret:
print('对哦')
print('聪明')
else:
if guess >=secret:
print('错错错,大大大!')
else:
print('错错错,小小小!')
print('GAME OVER')
strs = ("1", "2", "3")
def test(name):
if strs[0] == name:
print("First: " + strs[0])
elif strs[1] == name:
print("Second: " + strs[1])
else:
print(strs[2])
| [
"13436918611@163.com"
] | 13436918611@163.com |
ad71d0b4b6f72098b48bf452ac4deb043da2229f | a6b91d57b9f002bd1b75a6ba5ac2987246a2aa07 | /src/AggSilver_Price_mean.py | aa37e3fa0e549f774f694c0f8ceda0f8e5393553 | [] | no_license | teketekere/avito18 | 15badea9d72a2f0dd1e3c4e467e492801e343808 | c427a2ed9b4beaf57a3fa6106254d5f0181cc860 | refs/heads/master | 2020-03-17T14:05:54.149007 | 2018-06-28T00:14:40 | 2018-06-28T00:14:40 | 133,658,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,012 | py | import pandas as pd
import time
import gc
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler as SS
import itertools
import pickle
from copy import deepcopy
from collections import Counter
from tqdm import tqdm
from myutils import timer, reduce_mem_usage
categorical = ["user_id","region","city","parent_category_name","category_name","user_type","param_1","param_2","param_3"]
categorical_ex = categorical + ['param_123', 'weekofday']
aggfeats = ['region', 'city', 'parent_category_name', 'category_name', 'user_type', 'weekofday', 'param_1', 'param_123']
nonaggfeats = list(set(categorical_ex) - set(aggfeats))
lentrain = 1503424
lentest = 508438
lentrainactive = 14129821
lentestactive = 12824068
def count(df, group_cols, suffix='numcount', agg_type='uint32'):
aggname = '_'.join(group_cols) + '_' + suffix
gp = df[group_cols].groupby(group_cols).size().rename(aggname).to_frame().reset_index()
df = df.merge(gp, on=group_cols, how='left')
df[aggname] = df[aggname].astype(agg_type)
del gp; gc.collect()
return df
def scale_standard(df, ignorecols=[]):
for col in df.columns:
if not col in ignorecols:
df[col] = (SS().fit_transform(df[col].values.reshape(-1, 1))).flatten()
return df
def get_smalldiff(df1, df2):
assert (df1.columns == df2.columns).all(), 'inputs must have same columns'
th = np.uint(df1.shape[1] * 0.2)
criteria = {'mean': np.mean, 'var': np.var, 'median': np.median}
difflist = []
tempdiff = pd.DataFrame()
tempdiff['colname'] = [col for col in df1.columns]
for k, c in criteria.items():
tempdiff[k] = [np.abs(c(df1[col]) - c(df2[col])) for col in df1.columns]
tempdiff = scale_standard(tempdiff, ignorecols=['colname'])
sums = np.zeros(tempdiff.shape[0])
for key in criteria.keys():
sums += np.abs(tempdiff[key])
tempdiff['result'] = sums
sortdiff = tempdiff.sort_values(by='result')
return sortdiff['colname'][: th].tolist()
if __name__ == '__main__':
target = 'price'
train = pd.read_feather('../features/train/categorical_features_train.feather')
test = pd.read_feather('../features/test/categorical_features_test.feather')
trainp = pd.read_csv('../input/train.csv', usecols=[target])
testp = pd.read_csv('../input/test.csv', usecols=[target])
trainp.fillna(trainp.mean(), inplace=True)
testp.fillna(testp.mean(), inplace=True)
train = pd.concat([train, trainp], axis=1)
test = pd.concat([test, testp], axis=1)
del trainp, testp; gc.collect()
train_active = pd.read_feather('../features/train_active/categorical_features_train_active.feather')
test_active = pd.read_feather('../features/test_active/categorical_features_test_active.feather')
trainap = pd.read_csv('../input/train_active.csv', usecols=[target])
testap = pd.read_csv('../input/test_active.csv', usecols=[target])
trainap.fillna(trainap.mean(), inplace=True)
testap.fillna(testap.mean(), inplace=True)
train_active = pd.concat([train_active, trainap], axis=1)
test_active = pd.concat([test_active, testap], axis=1)
del trainap, testap; gc.collect()
print(train.shape)
print(test.shape)
print(train_active.shape)
print(test_active.shape)
df = pd.concat([train, test, train_active, test_active])
df.drop(nonaggfeats, axis=1, inplace=True)
print(df.shape)
for i in range(1, 5):
for comb in tqdm(list(itertools.combinations(aggfeats, i))):
group_cols = list(comb)
print(group_cols)
aggname = '-'.join(group_cols+[target]) + '-mean'
gp = df[group_cols+[target]].groupby(group_cols)[target].mean().rename(aggname).to_frame().reset_index()
df = df.merge(gp, on=group_cols, how='left')
df[aggname] = df[aggname].fillna(df[aggname].mean())
df[aggname] = df[aggname].astype('float32')
del gp; gc.collect()
df.drop(aggfeats+[target], axis=1, inplace=True)
train = df[:lentrain]
test = df[lentrain:lentrain+lentest]
train_active = df[lentrain+lentest: lentrain+lentest+lentrainactive]
test_active = df[lentrain+lentest+lentrainactive: lentrain+lentest+lentrainactive+lentestactive]
train.reset_index(drop=True, inplace=True)
test.reset_index(drop=True, inplace=True)
train_active.reset_index(drop=True, inplace=True)
test_active.reset_index(drop=True, inplace=True)
print(train.shape)
print(test.shape)
print(train_active.shape)
print(test_active.shape)
train = np.log(train+0.001)
train = scale_standard(train)
test = np.log(test+0.001)
test = scale_standard(test)
train_active = np.log(train_active+0.001)
train_active = scale_standard(train_active)
test_active = np.log(test_active+0.001)
test_active = scale_standard(test_active)
print(train.shape)
print(test.shape)
print(train_active.shape)
print(test_active.shape)
res = get_smalldiff(train, test)
train = train[res]
test = test[res]
train_active = train_active[res]
test_active = test_active[res]
train.reset_index(drop=True, inplace=True)
test.reset_index(drop=True, inplace=True)
train_active.reset_index(drop=True, inplace=True)
test_active.reset_index(drop=True, inplace=True)
print(train.shape)
print(test.shape)
print(train_active.shape)
print(test_active.shape)
train.to_feather('../features/train/Agg_Price_mean_Silver_train.feather')
test.to_feather('../features/test/Agg_Price_mean_Silver_test.feather')
train_active.to_feather('../features/train_active/Agg_Price_mean_Silver_train_active.feather')
test_active.to_feather('../features/test_active/Agg_Price_mean_Silver_test_active.feather')
print('done') | [
"noreply@github.com"
] | teketekere.noreply@github.com |
4fbaa46799abae47ff490cf403db45274a503327 | d9d61dc15dc2ad1779969a21d628498b2601cbd7 | /env/bin/easy_install | 1505671b08eef7da49e2323c9759f4a9f721ddf2 | [] | no_license | Munyola/stravagroupride | 49a6c91ba6444a6fc5114a1245a322917aff3429 | e2d2255efcac3b6f34f2dae6ec40e60da27d0ad6 | refs/heads/master | 2020-07-05T10:08:57.047783 | 2017-01-21T22:30:19 | 2017-01-21T22:30:19 | 202,618,770 | 0 | 0 | null | 2019-08-15T22:24:12 | 2019-08-15T22:20:51 | Python | UTF-8 | Python | false | false | 329 | #!/Users/samuelmoseley/Documents/stravagroupride/env/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==0.6c11','console_scripts','easy_install'
__requires__ = 'setuptools==0.6c11'
import sys
from pkg_resources import load_entry_point
sys.exit(
load_entry_point('setuptools==0.6c11', 'console_scripts', 'easy_install')()
)
| [
"samuelmoseley@Samuels-MacBook-Pro-2.local"
] | samuelmoseley@Samuels-MacBook-Pro-2.local | |
13102d7fc96476fe1a1aba37f5250693e99fd263 | 7a78225689f4ac346b0706b8ce71a3c4921e0e5d | /one_on_one/schedule.py | 137de4072e8abbecb49f88e0cd9f3a951f9c504a | [] | no_license | thieman/one_on_one | 3287f4a678733869abe01006f85a159dd60af7ec | 6ef1e63363eeaaeea0d5fc9fe6652b273322a725 | refs/heads/master | 2020-12-30T16:42:20.503339 | 2016-08-31T00:35:22 | 2016-08-31T00:35:22 | 91,018,168 | 0 | 0 | null | 2017-05-11T19:56:53 | 2017-05-11T19:56:53 | null | UTF-8 | Python | false | false | 6,680 | py | import httplib2
import datetime
import base64
from email.MIMEText import MIMEText
from oauth2client.client import SignedJwtAssertionCredentials
from apiclient import discovery
class Schedule(object):
def schedule(self, pairs, no_pair=None, meeting_dt=None):
""" This method is made to be overwritten by subclasses.
It should take in a list of pairs, and schedule a meeting between
the pairs of people
no_pair is an optional argument which represents a person who did not have a pair
meeting_dt is an optional datetime at which the meetings will start
"""
raise NotImplementedError
class GCSchedule(Schedule):
name_mappings = {'Jenny from the Lair': 'Jenny Trumbull'}
jenny_email = 'jenny@gc.io'
def get_credentials(self):
client_email = 'one-on-one-account@windy-raceway-118617.iam.gserviceaccount.com'
with open("ConvertedPrivateKey.pem") as f:
private_key = f.read()
credentials = SignedJwtAssertionCredentials(client_email,
private_key,
['https://www.googleapis.com/auth/calendar',
'https://www.googleapis.com/auth/admin.directory.user.readonly',
'https://mail.google.com/'],
sub=self.jenny_email)
return credentials
def get_real_name(self, full_name):
if full_name in self.name_mappings:
return self.name_mappings[full_name]
return full_name
@staticmethod
def get_gc_email(directory_access, full_name):
split_names = full_name.split(' ')
first_name = split_names[0]
last_name = split_names[1]
#Hope to uniquely identify first
for name in [full_name, last_name, first_name]:
results = directory_access.users().list(customer='my_customer', query="name:{}".format(name)).execute()
if len(results.get('users', [])) == 1:
return results['users'][0]['emails'][0]['address']
# If you cannot uniquely identify just grab the first email for that users name
for name in [full_name, last_name, first_name]:
results = directory_access.users().list(customer='my_customer', query="name:{}".format(name)).execute()
if len(results.get('users', [])) > 1:
return results['users'][0]['emails'][0]['address']
raise KeyError("Cannot identify user from name: {}".format(full_name))
def create_meeting(self, pair, meeting_start, meeting_end, calendar_access, directory_access):
email_1 = self.get_gc_email(directory_access, self.get_real_name(pair[0]))
email_2 = self.get_gc_email(directory_access, self.get_real_name(pair[1]))
start_doc = {'timeZone': 'America/New_York', 'dateTime': meeting_start}
end_doc = {'timeZone': 'America/New_York', 'dateTime': meeting_end}
attendees = [{'email': email_1}, {'email': email_2}]
body = {'attendees': attendees,
'start': start_doc,
'end': end_doc,
'summary': 'Peer One on One: {} and {}'.format(pair[0], pair[1]),
'description': 'This is a chance to meet and talk with someone else at GC. If you are not sure what to talk about, consult this link: http://jasonevanish.com/2014/05/29/101-questions-to-ask-in-1-on-1s/'}
calendar_access.events().insert(calendarId='gamechanger.io_pvrnqe6amftma1ful6vou0ctmo@group.calendar.google.com',
body=body,
sendNotifications=True).execute()
def send_no_meeting_email(self, user_name, mail_access, directory_access):
user_email = self.get_gc_email(directory_access, self.get_real_name(user_name))
message_text = "Hello {},\nThis week we had an odd number of people for peer one on ones. That means one person did not get paired up with someone. You happen to be that person this week. You should be paired up again next time!\n\nLet Jenny or Alex know if you have any questions.".format(user_name)
message = MIMEText(message_text)
message['to'] = user_email
message['from'] = self.jenny_email
message['subject'] = "Peer One on Ones this week"
encodeMessage = {'raw': base64.urlsafe_b64encode(message.as_string())}
mail_access.users().messages().send(userId='me', body=encodeMessage).execute()
def schedule(self, pairs, no_pair=None, meeting_dt=None):
"""
This schedule function is built around Googles Api. Its goal is to schedule
google calendar events for each set of pairs. To do this it uses the following
api resources:
Google Calendar: https://developers.google.com/google-apps/calendar/?hl=en
Google Directory: https://developers.google.com/admin-sdk/directory/
It also makes use of Google Service Accounts: https://developers.google.com/identity/protocols/OAuth2ServiceAccount
If meeting_dt is not passed, assume that the meetings should be schedule a week from today at 10 a.m.
"""
if meeting_dt is None:
now = datetime.datetime.now()
one_week_from_now = now + datetime.timedelta(7)
meeting_start = datetime.datetime(one_week_from_now.year, one_week_from_now.month, one_week_from_now.day, 10, 30, 0).isoformat()
meeting_end = datetime.datetime(one_week_from_now.year, one_week_from_now.month, one_week_from_now.day, 11, 0, 0).isoformat()
else:
dt_start = datetime.datetime(meeting_dt.year, meeting_dt.month, meeting_dt.day, meeting_dt.hour, meeting_dt.minute, meeting_dt.second)
dt_end = datetime.datetime(meeting_dt.year, meeting_dt.month, meeting_dt.day, meeting_dt.hour, meeting_dt.minute, meeting_dt.second)
meeting_start = dt_start.isoformat()
meeting_end = dt_end.isoformat()
credentials = self.get_credentials()
http = credentials.authorize(httplib2.Http())
calendar_access = discovery.build('calendar', 'v3', http=http)
directory_access = discovery.build('admin', 'directory_v1', http=http)
mail_access = discovery.build('gmail', 'v1', http=http)
for pair in pairs:
self.create_meeting(pair, meeting_start, meeting_end, calendar_access, directory_access)
if no_pair:
self.send_no_meeting_email(no_pair, mail_access, directory_access)
| [
"paetling@gmail.com"
] | paetling@gmail.com |
cb616defe7d275bd7447eac9d42bfe18bfcd3b8f | 1212c51860b5277dbf0e8a4c7d6e972ab2a21408 | /limb_manipulation/src/database.py | 045cd779278872623ba8d3c1061e1556189b51b7 | [] | no_license | hcrlab/access_teleop | f514f4d2383e82866dbf00e1ae830d36d66a1501 | 201dccc03c76e0d5189c04e8635f035a78da500c | refs/heads/master | 2021-01-16T18:12:00.922465 | 2020-06-01T05:04:39 | 2020-06-01T05:04:39 | 100,047,231 | 3 | 3 | null | 2017-10-12T00:58:50 | 2017-08-11T15:43:45 | JavaScript | UTF-8 | Python | false | false | 1,443 | py | #!/usr/bin/env python
import rospy
import os
import pickle
class Database(object):
def __init__(self):
# actions and their offsets
self._actions = {}
# file path of the database
script_path = os.path.abspath(__file__)
script_dir = os.path.split(script_path)[0]
self.db_path = os.path.join(script_dir, "action_db.p")
def get(self, name):
if name in self._actions:
return self._actions[name]
else:
return None
def add(self, name, offset):
## if name not in self._actions:
# always overwrite the previous entry
self._actions[name] = []
for o in offset:
self._actions[name].append(o)
def delete(self, name):
if name in self._actions:
del self._actions[name]
def list(self):
return self._actions.keys()
def load(self):
try:
with open(self.db_path, 'r') as f:
self._actions = pickle.load(f)
except EOFError as eof_e:
rospy.logwarn('Error when reading the file: {}'.format(eof_e))
except IOError as io_e:
rospy.logwarn('No storage information: {}'.format(io_e))
def save(self):
try:
with open(self.db_path, 'w') as f:
pickle.dump(self._actions, f)
except IOError as e:
rospy.logwarn('No storage information: {}'.format(e)) | [
"mecu@cs.washington.edu"
] | mecu@cs.washington.edu |
c847a80d49912a090cb89a16981cc505f936a115 | 5d4e22ddf6e1ab9fd77cf15f5ed8f98a64d8a471 | /crawler_sys/scheduler/pull_es_register_and_push_to_redis.py | 3b1d06869c89e6e66fda277295fd5b575eaa63bc | [] | no_license | daiyunbin/crawler | 11ef2a2577e41758c1267757d6610983702f5e5e | 39d7c84cdbd177f1eabc8f0f1176dfe78037f8f0 | refs/heads/master | 2022-08-05T10:40:00.570806 | 2020-05-22T19:38:58 | 2020-05-22T19:38:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,440 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 21 10:24:31 2018
@author: fangyucheng
"""
import argparse
from crawler.crawler_sys.utils.connect_with_redis import push_video_url_to_redis_set
from crawler.crawler_sys.utils.connect_with_es import pull_url_from_es
from crawler.crawler_sys.utils.date_calculator import calculator
parser = argparse.ArgumentParser(description='')
parser.add_argument('-p', '--platforms', default=[], action='append',
help=('Pass platform names, they will be assembled in python list.'))
parser.add_argument('-d', '--days_from_now', default=30, type=int,
help=('Specify days from now as the lower boundary for release_time, '
'default 30.'))
args = parser.parse_args()
platform_Lst = args.platforms
release_time_lower_bdr = calculator(args.days_from_now)
if platform_Lst == []:
print("Please input at least one platform")
else:
for platform in platform_Lst:
download_es_register = pull_url_from_es(platform=platform,
release_time_lower_bdr=release_time_lower_bdr)
print("successfully downloaded data from es, platform: %s" % platform)
push_to_redis = push_video_url_to_redis_set(platform=platform,
url_lst=download_es_register)
print("successfully pushed into redis, count of url: %s" % push_to_redis) | [
"360134299@qq.com"
] | 360134299@qq.com |
edd4fa29759616dc883f8b37ef99b251a6ef3bee | 3bce532523000205b25cef42599ac9337e75ed1e | /project/blog_server/tools/loging_decorator.py | 631b2dd875b5d4dd4ed105cb7d122903b68d911e | [] | no_license | NavasCSDN/python_learn_0701 | 46c4ec893780ca8362dee73838eda46b5e80b880 | 2ffc62c24a954912aac104cf528788d7150163c9 | refs/heads/master | 2020-06-13T19:27:14.859364 | 2019-08-06T08:12:01 | 2019-08-06T08:12:01 | 194,764,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py |
def loging_check(*methods):
def _loging_check(func):
def wrapper(request, *args, **kwargs):
return
return wrapper
return _loging_check | [
"jcmxdd@163.com"
] | jcmxdd@163.com |
e26dce7cc0cc970d69f7461bafbdc26197a68c05 | fc2447b91cbee82e74e939092ec1903678f3217a | /PythonPractice/hm_py/hm_multitasking/hm_Thread_lock.py | b9dc1ee26649bfc47e5e9227218d4689b6090054 | [] | no_license | yglj/learngit | 0eac654e7c49f2ede064b720e6ee621a702193b4 | 74fb4b93d5726c735b64829cafc99878d8082121 | refs/heads/master | 2022-12-24T10:01:56.705046 | 2019-05-27T21:04:08 | 2019-05-27T21:04:08 | 146,157,116 | 0 | 1 | null | 2022-12-12T07:01:25 | 2018-08-26T06:28:20 | HTML | UTF-8 | Python | false | false | 870 | py |
import threading
import time
gl_num = 0 # 多线程访问全局变量会发生资源竞争,影响数据安全
lock = threading.Lock() # 创建互斥锁对象,解决线程同步问题
def sum1(n):
global gl_num
print('Thread:%s' % threading.current_thread())
for i in range(n):
lock.acquire() # 上锁
gl_num += 1
lock.release() # 释放锁
def sum2(n):
global gl_num
print('Thread:%s' % threading.current_thread())
for i in range(n):
lock.acquire()
gl_num -= 1
lock.release()
def main():
n = 1000000
one = threading.Thread(target=sum1, args=(n,), name='tom')
two = threading.Thread(target=sum2, args=(n,))
one.start()
two.start()
one.join()
two.join()
print('执行%s次后gl_num的值:%s' % (n, gl_num))
if __name__ == '__main__':
main()
| [
"2365952530@qq.com"
] | 2365952530@qq.com |
0f06df8bd778867cda2cbddc78149ffab730985f | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/modular_tree/grease_pencil.py | 183fdeba8fc3de276497b0dad75e66819a0a7da8 | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,075 | py | from collections import deque
import bpy
from bpy.types import Operator
from bpy.props import IntProperty, BoolProperty, FloatProperty
from mathutils.geometry import intersect_point_line
from mathutils import Vector
from math import cos, inf, pi
from random import random
from.geometry import to_array
def distribute_evenly_along_curve(s, p_dist):
new_set = []
new_set.append(s[0])
i = 1
while i < len(s):
n_dist = (s[i] - new_set[-1]).length
if n_dist >= p_dist:
new_set.append(new_set[-1] + p_dist / n_dist * (s[i] - new_set[-1]))
else:
i += 1
new_set.append(s[-1])
return new_set
def smooth_stroke(iterations, smooth, points):
for i in range(iterations):
new_points = list()
new_points.append(points[0])
for j in range(1, len(points) - 1):
new_points.append(smooth / 2 * (points[j - 1] + points[j + 1]) + (1 - smooth) * points[j])
new_points.append(points[-1])
points = new_points
return points
class ConnectStrokes(Operator):
"""move grease pencil strokes so that they are connected to one another"""
bl_idname = "mtree.connect_strokes"
bl_label = "connect strokes"
bl_options = {"REGISTER", "UNDO"}
point_dist : FloatProperty(min=0.001, default=.8)
def execute(self, context):
process_gp_layer(self.point_dist)
return {'FINISHED'}
def find_closest_point(p, s):
min_dist = inf
min_index = 0
for i, p1 in enumerate(s): # finding closest point to p in s
dist = (p1 - p).length_squared
if dist < min_dist:
min_dist = dist
min_index = i
p1 = s[min_index]
if (s[min_index - 1] - p).length_squared < (s[(min_index + 1)%len(s)] - p).length_squared: # finding which one of the neigbours of p1 are closer to p
p2 = s[min_index - 1]
else:
p2 = s[min_index + 1]
closest_point, percent = intersect_point_line(p, p1, p2) # returns the point closest to p in the line p1 p2
if not (0 < percent < 1):
closest_point = p1
return closest_point, (closest_point - p).length_squared, min_index
def connect_all_strokes(strokes):
displaced_strokes = [strokes[0]]
splits = [] # list of (parent_stroke, point_index, child_stroke) used for building tree from gp strokes
for child_index, s in enumerate(strokes[1:]): # assuming first stroke is the trunk one
displaced_s = []
min_dist = inf
min_p = None
min_parent_index = 0
min_point_index = 0
for parent_index, s1 in enumerate(displaced_strokes): # find closest stroke to s in displaced strokes
p, dist, point_index = find_closest_point(s[0], s1)
if dist < min_dist:
min_dist = dist
min_p = p
min_parent_index = parent_index
min_point_index = point_index
first_point = s[0]
displaced_s = [p + (min_p-first_point) for p in s]
displaced_strokes.append(displaced_s)
splits.append((min_parent_index, min_point_index, child_index + 1))
return displaced_strokes, splits
def process_gp_layer(p_dist):
gp = bpy.context.scene.grease_pencil
if gp is not None and gp.layers.active is not None and gp.layers.active.active_frame is not None and len(gp.layers.active.active_frame.strokes) > 0 and len(gp.layers.active.active_frame.strokes[0].points) > 1:
strokes = [[i.co for i in j.points] for j in gp.layers.active.active_frame.strokes]
for i in range(len(strokes)):
strokes[i] = distribute_evenly_along_curve(strokes[i], p_dist)
strokes, splits = connect_all_strokes(strokes)
frame = gp.layers.active.active_frame
for stroke, s in zip(strokes, frame.strokes):
for i in range(len(s.points)-len(stroke)):
s.points.pop()
s.points.add(len(stroke) - len(s.points))
for i, p in enumerate(stroke):
s.points[i].co = p
return strokes, splits
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
f795af648856cc7d4fb41d3ff7db01f0228b024e | ffa433aafa09257c718e3931ff36c6b9c88b1722 | /mysite/migrations/0001_initial.py | 3a5e6dd30797187c725f4a62d80386172b019f22 | [] | no_license | RohaSpirit/homework02 | 2ebb1840de5439240d7fe54ab8f798b25391e861 | 7a253a05b2c053a959b1e575269be939c37ebf2a | refs/heads/main | 2023-06-23T01:28:00.948315 | 2021-07-16T13:45:34 | 2021-07-16T13:45:34 | 386,333,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | # Generated by Django 3.2.5 on 2021-07-13 07:30
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('content', models.TextField()),
('pub_date', models.DateTimeField(default=django.utils.timezone.now)),
],
options={
'ordering': ('-pub_date',),
},
),
]
| [
"malaysid00@gmail.com"
] | malaysid00@gmail.com |
bba93c7909a69bc1eb7106984fc28a98bc93ca50 | 0eded9a2c1f0bc7a7765cc3829b23a5df956f6fd | /10_regex/regex_christmas.py | 6816dc219f378eef1ce66b7ff6063d95981d6309 | [] | no_license | RickBahague/python-solutions | 6543e036bb4a408d831dacea3b233f4f2ddec519 | 66047d5bd6853dbbb26a1575b5c4f848d35d56bd | refs/heads/master | 2021-01-13T17:13:43.061353 | 2016-01-13T11:37:34 | 2016-01-13T11:37:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,869 | py | # task 19
import re
import json
import urllib.request
def get_data():
# STEP 1
# Get the Json from the URL
data = urllib.request.urlopen('http://fsr.github.io/\
python-lessons/misc/christmas.json')
wishlist = json.loads(data.read().decode('utf-8'))
return wishlist
def do_santas_work(wishlist):
# STEP 2
# Sort out the Spam
no_spam = re.compile(r'^Lieber Weihnachtsmann,')
nice = {}
naughty = []
for child in wishlist:
if no_spam.match(wishlist[child]):
# STEP 3
# If no spam check if child was nice
if nice_or_not(wishlist[child]):
# STEP 4
# Get the wishes
nice[child] = get_wishes(wishlist[child])
else:
naughty.append(child)
else:
naughty.append(child)
nice['naughty'] = naughty
return nice
def nice_or_not(letter):
# Check if child was nice
nice = re.compile(r'immer (lieb)|(artig)|(brav)')
if nice.search(letter) is not None:
return True
else:
return False
def get_wishes(letter):
# Get all wishes out of the letter
wish_pattern = re.compile(r'- (.*)')
wishes = wish_pattern.finditer(letter)
wishlist = []
for wish in wishes:
wishlist.append(wish.group(1))
return wishlist
def show_santa(results):
# STEP 5
# Finally present results to Santa
for child in results['naughty']:
print('{child}: unartig/keine Wünsche'.format(child=child))
del results['naughty']
for child in results:
print('{child}: {whishes}'.format(child=child,
whishes=', '.join(results[child])))
def main():
wishlist = get_data()
results = do_santas_work(wishlist)
show_santa(results)
if __name__ == '__main__':
main()
| [
"Doering.Felix@googlemail.com"
] | Doering.Felix@googlemail.com |
bfe73c9937420c25b9574728b02e93be6126d53b | f53a9298c372dc0004e58ae6e53079c8afa2ed75 | /docs/source/conf.py | 3b62687c402251be1ac33fe49970605f5f5714b9 | [
"BSD-3-Clause"
] | permissive | aaraney/nbsphinx-multilink | f8654b5c12b48577cd8892bd3ed7886b5f4b4772 | dd0a393c22c4aed25c300fd5ef1a21fe0bb537f1 | refs/heads/master | 2023-03-27T05:41:19.206914 | 2021-04-02T18:09:43 | 2021-04-02T18:17:30 | 341,663,061 | 1 | 0 | BSD-3-Clause | 2021-04-02T18:17:30 | 2021-02-23T19:15:54 | Python | UTF-8 | Python | false | false | 6,221 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'nbsphinx-link'
copyright = '2018, Vidar Tonaas Fauske'
author = 'Vidar Tonaas Fauske'
# The short X.Y version
# get version from python package:
import os
here = os.path.dirname(__file__)
repo = os.path.join(here, '..', '..')
_version_py = os.path.join(repo, 'nbsphinx_link', '_version.py')
version_ns = {}
with open(_version_py) as f:
exec(f.read(), version_ns)
# The short X.Y version.
version = '%i.%i' % version_ns['version_info'][:2]
# The full version, including alpha/beta/rc tags.
release = version_ns['__version__']
import subprocess
try:
git_rev = subprocess.check_output(['git', 'describe', '--exact-match', 'HEAD'], universal_newlines=True)
except subprocess.CalledProcessError:
try:
git_rev = subprocess.check_output(['git', 'rev-parse', 'HEAD'], universal_newlines=True)
except subprocess.CalledProcessError:
git_rev = ''
if git_rev:
git_rev = git_rev.splitlines()[0] + '/'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'nbsphinx',
'nbsphinx_link',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# Read The Docs
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from
# docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'nbsphinx-linkdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'nbsphinx-link.tex', 'nbsphinx-link Documentation',
'Vidar Tonaas Fauske', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nbsphinx-link', 'nbsphinx-link Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'nbsphinx-link', 'nbsphinx-link Documentation',
author, 'nbsphinx-link', 'One line description of project.',
'Miscellaneous'),
]
# Ensure env.metadata[env.docname]['nbsphinx-link-target']
# points relative to repo root:
nbsphinx_link_target_root = repo
nbsphinx_prolog = (
r"""
{% if env.metadata[env.docname]['nbsphinx-link-target'] %}
{% set docpath = env.metadata[env.docname]['nbsphinx-link-target'] %}
{% else %}
{% set docpath = env.doc2path(env.docname, base='docs/source/') %}
{% endif %}
.. only:: html
.. role:: raw-html(raw)
:format: html
.. nbinfo::
This page was generated from `{{ docpath }}`__.
__ https://github.com/vidartf/nbsphinx-link/blob/
""" +
git_rev + r"{{ docpath }}"
)
| [
"vidartf@gmail.com"
] | vidartf@gmail.com |
2fbd91842be2caeec6ae550d3a7b18aba36a26d9 | 30b31b91b0e7e8155ece40efba025aba9cdad8da | /data/generate_cdf.py | 2422595f68ba5062d54df6437b829bc5b314f98e | [] | no_license | ziy212/HeadlessBrowser | 18d267fddea51f463fa051d3e7efaf7fe432e640 | 7536c98ab2c3a58ec3f1de298448c87672ada199 | refs/heads/master | 2021-01-19T07:20:16.135785 | 2015-11-12T19:04:37 | 2015-11-12T19:04:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,883 | py | import matplotlib.pyplot as plt
import matplotlib
import sys
import numpy as np
def readData(path,scale=1.0,min=0.0, max=30.0):
f = open(path)
data = []
for line in f:
val = float(line.strip())
if val > max or val < min:
print str(val)
else:
data.append(val*scale)
f.close()
return sorted(data)
def drawThreeDelayCDF(data_chicago,data_ec2,data_local,data_chicago2,data_ec22,data_local2,xlim=0,xmax=0,ylabel="CDF RTT",xlabel="Time (ms)"):
font = {'family' : 'normal',\
'weight' : 'bold',\
'size' : 22}
matplotlib.rc('font', **font)
params = {'legend.fontsize': 20, 'axes.labelsize':25}
plt.rcParams.update(params)
fig = plt.figure(1)
ax1 = plt.subplot(121)
[i.set_linewidth(6) for i in ax1.spines.itervalues()]
#fig.suptitle("test",fontsize=15)
#plt.legend([plot1,plot2,plot3], ["Chicago HoneyFarm", 'EC2 HoneyFarm',"Local HoneyFarm"],bbox_to_anchor=(0.31,1))
data_chicago = np.sort(data_chicago)
data_ec2 = np.sort(data_ec2)
data_local = np.sort(data_local)
yvals1=np.arange(len(data_chicago))/float(len(data_chicago))
eighty_index = int(0.8*len(data_chicago))
print "I eighty percent for chicago: %f "%data_chicago[eighty_index]
eighty_index = int(0.8*len(data_ec2))
print "I eighty percent for ec2: %f "%data_ec2[eighty_index]
eighty_index = int(0.8*len(data_local))
print "I eighty percent for local: %f "%data_local[eighty_index]
yvals2=np.arange(len(data_ec2))/float(len(data_ec2))
yvals3=np.arange(len(data_local))/float(len(data_local))
plt.xscale('log')
if xlim != 0 and xmax != 0:
plt.xlim(xlim,xmax)
plot1, = plt.plot(data_chicago, yvals1, 'b',linewidth=6.0)
plot2, = plt.plot(data_ec2, yvals2,'r',linewidth=6.0)
plot3, = plt.plot(data_local, yvals3,'k', linewidth=6.0)
plt.ylabel("CDF")
yticks = np.arange(0, 1.1, 0.2)
plt.yticks(yticks)
plt.xlabel("Time (ms)")
plt.legend([plot1,plot2,plot3], ["Chicago", 'EC2',"Local"],bbox_to_anchor=(1.05,1.12),ncol=3,loc=9,frameon=False)
#figure 2
#fig = plt.figure(2)
ax2 = plt.subplot(122)
[i.set_linewidth(6) for i in ax2.spines.itervalues()]
#fig.suptitle("test",fontsize=15)
data_chicago = np.sort(data_chicago2)
data_ec2 = np.sort(data_ec22)
data_local = np.sort(data_local2)
eighty_index = int(0.8*len(data_chicago))
print "NI eighty percent for chicago: %f "%data_chicago[eighty_index]
eighty_index = int(0.8*len(data_ec2))
print "NI eighty percent for ec2: %f "%data_ec2[eighty_index]
eighty_index = int(0.8*len(data_local))
print "NI eighty percent for local: %f "%data_local[eighty_index]
yvals1=np.arange(len(data_chicago))/float(len(data_chicago))
yvals2=np.arange(len(data_ec2))/float(len(data_ec2))
yvals3=np.arange(len(data_local))/float(len(data_local))
plt.xscale('log')
if xlim != 0 and xmax != 0:
plt.xlim(xlim,xmax)
plot1, = plt.plot(data_chicago, yvals1, 'b',linewidth=6.0)
plot2, = plt.plot(data_ec2, yvals2,'r',linewidth=6.0)
plot3, = plt.plot(data_local, yvals3,'k', linewidth=6.0)
#plt.title('Initial RTT')
#plt.ylabel("Non-initial RTT CDF")
plt.yticks(yticks)
plt.xlabel(xlabel)
plt.show()
def drawCDF2(data1,data2):
font = {'family' : 'normal',\
'weight' : 'bold',\
'size' : 16}
matplotlib.rc('font', **font)
params = {'legend.fontsize': 16, 'axes.labelsize':16}
plt.rcParams.update(params)
fig = plt.figure(1)
ax1 = plt.subplot(111)
[i.set_linewidth(2) for i in ax1.spines.itervalues()]
plt.xlim(0,31)
data1 = np.sort(data1)
data2 = np.sort(data2)
#data3 = np.sort(data3)
yvals1=np.arange(len(data1))/float(len(data1))
yvals2=np.arange(len(data2))/float(len(data2))
eighty_index = int(0.8 * len(data1))
nighty_index = int(0.9 * len(data1))
#yvals2=np.arange(len(data2))/float(len(data2))
#yvals3=np.arange(len(data3))/float(len(data3))
#plt.xlim(0.05,3)
#plt.xscale('log')
plot1, = plt.plot(data1, yvals1,linewidth=2.0)
plot2, = plt.plot(data2, yvals2,'r--', linewidth=2.0)
#plot3, = plt.plot(data3, yvals3,'y--')
plt.ylabel("Percentage of Pages")
yticks = np.arange(0, 1.1, 0.1)
plt.yticks(yticks)
plt.grid()
plt.xlabel("Loading Time (s)")
plt.legend([plot1,plot2], ["With CSPAutoGen", 'Without CSPAutoGen'],bbox_to_anchor=(0.95,0.25))
print "Eightyth %f "%data1[eighty_index]
print "Nightyth %f "%data1[nighty_index]
plt.show()
def main():
data1 = readData(sys.argv[1],max=300)
data2 = readData(sys.argv[2],max=300)
drawCDF2(data1,data2)
#drawThreeDelayCDF(data1,data2,data3,data11,data22,data33)
#drawCDF2(data)
if __name__=="__main__":
main()
| [
"xiangpan2011@gmail.com"
] | xiangpan2011@gmail.com |
86039b9f96851250bb77722e43982f6be91550cd | cd033c38e985a6735ec90c88f4c8047f20d82064 | /funciones4final.py | dff54cbdc6159d3e4b18a3898e5b29aa02b4afcf | [] | no_license | AnabellaParedes/DEMO_CONTABLE | 8e57a722dbc8d2419ad5bd007f8923c1010b2587 | c619ac1c49737657a5482700dbe2b8638597f057 | refs/heads/master | 2022-11-10T18:43:59.746779 | 2020-07-01T02:25:36 | 2020-07-01T02:25:36 | 274,784,076 | 0 | 0 | null | 2020-06-29T05:08:49 | 2020-06-24T22:48:06 | Python | UTF-8 | Python | false | false | 7,438 | py | #sueldom = sueldo mensual
#sueldoa = sueldo anual
#m = n meses
#g = grati
#mes_i = mes de inicio
#mes_f = mes de salida
#anio_i = anio de inicio
#anio_f = anio de salida
def conteo_meses(iniciomes,finmes,inicioanio,finalanio):
#Se quiere determinar el numero de meses para los diferentes calculos en el programa
#Se evalua de acuerdo al año y los meses, además valida que tenga sentido, por ejemplo
#si es año final es menor al inicial, no tendría coherencia
mes_i=iniciomes
mes_f=finmes
anio_i=inicioanio
anio_f=finalanio
#total num de meses
if mes_f>=mes_i: #si el mes final es mayor, se tomara en cuenta los años para los calculos
if anio_f==anio_i: #si los años son iguales
m=(mes_f-mes_i)+1
elif anio_f>anio_i: #si el año final es el mayor
m=(12-mes_i+mes_f+1)+12*(anio_f-anio_i-1)
else:
m=0 #si el año inicial fuera mayor, el num de meses es 0 para validarlo mas adelante
else:
if anio_f>anio_i: #en caso de que el año final sea mayor
m=(12-mes_i+mes_f+1)+12*(anio_f-anio_i-1)
else:
m=0 #si el año y mes inicial fueran mayores, el num de meses es 0 para validarlo mas adelante
return m
def grati(sueldom,m): #para costo laboral
#Calcula el total de grati completo por el tiempo que se le va a contratar
#Equivale a un sexto del sueldo mensual por lo meses que trabajará
g = (sueldom/6)*(m)
return g
def bono(gratificacion): #para costo laboral y liquidacion
#El bono ley siempre será el 9% de la gratificacion otorgada
bono = gratificacion*0.09
return bono
def grati_trunca(sueldom,mes_i,mes_f,anio_i,anio_f): #para liquidacion
#Esto corresponde al calculo de el monto que le faltaria cobrar al empleado luego de su renuncia
#Esta evaluado con respecto a las fechas de pago:
#Desde enero a junio, se cobra el 15 de julio
#Desde julio y noviembre, se cobra el 15 de diciembre
#Puede que a la persona le toque cobrar solo lo que trabajo despues de una fecha de pago,
#o cobrar una gratificación completa
if mes_f==12 or mes_i==12: #se valida hasta el mes de noviembre
mes_f = mes_f-1
mes_i = mes_i-1
a=0
if anio_i<anio_f: #si es año final es mayor
if 1<=mes_f<=6 and 6<mes_i<=11: #los numeros representan los meses
a = mes_f #ejemplo: 1 igual a enero
elif 1<=mes_f<=6 and 1<=mes_i<=6:
a = mes_f
elif 6<=mes_f<=11 and 6<=mes_i<=11:
a = mes_f-6
elif 1<=mes_i<=6 and 6<mes_f<=11:
a = mes_f-6
elif anio_f==anio_i: #si sucede en el mismo año
if 1<=mes_f<=6 and 1<=mes_i<=6:
a = mes_f-mes_i+1
elif 6<=mes_f<=11 and 6<=mes_i<=11:
a = mes_f-mes_i+1
elif 1<=mes_i<=6 and 6<mes_f<=11:
a = mes_f-6
elif 1<=mes_f<=6 and 6<mes_i<=11:
a = mes_f
gratitrunca=(sueldom/6)*a #Corresponde a 1/6 del sueldo por los meses que faltan cobrar
return gratitrunca
def vaca(sueldom,m): #para costo laboral y liquidacion
#Para calcular el monto de las vacaciones que te correponden en pago
#Equivale a un doceavo del sueldo mensual por los meses que se trabaje
vt = (sueldom/12)*(m)
return vt
def CTS(sueldom,m): #para costo
#Por ser usado para calcular el costo de contratar recien a una persona, no se consideran los parametros
#de pago. Entonces se calcularia con respecto al tiempo que diraria el contrato "m"
#Equivale a un sueldo mas un sexto de la gratificacion, entre 12
cts = ((sueldom+(sueldom+sueldom*0.09)/6)/12)*m
return cts
def CTS_trunca(sueldom,mes_i,mes_f,anio_i,anio_f): #para liquidacion
#Corresponde al monto que le falta cobrar al empleado despues de su salida antes de tiempo
#Esta compensacion por tiempo de trabajo se pagan en estos dias:
#De noviembre a abril, el 15 de mayo
#De mayo a octubre, el 15 de noviembre
#Puede que a la persona le toque cobrar solo lo que trabajo despues de una fecha de pago,
#o cobrar una cts completa
b=0
if anio_i<anio_f: #si el año final es mayor
if 1<=mes_f<=4 and 5<=mes_i<=10:
b = mes_f+2
elif 11<=mes_i<=12 and 1<=mes_f<=4:
b = mes_f+2
elif 11<=mes_i<=12 and 5<=mes_f<=10:
b = mes_f-4
elif 1<=mes_f<=4 and 1<=mes_i<=4:
b = mes_f+2
elif 5<=mes_i<=10 and 5<=mes_f<=10:
b = mes_f-4
elif 11<=mes_f<=12 and 11<=mes_i<=12:
b = mes_f-10
elif 11<=mes_f<=12 and 5<=mes_i<=10:
b = mes_f-10
elif 1<=mes_i<=4 and 5<=mes_f<=10:
b = (mes_f-5)+1
elif 1<=mes_i<=4 and 11<=mes_f<=12:
b = mes_f-10
elif anio_f==anio_i: #Si sucede todo en un mismo año
if 1<=mes_f<=4 and 1<=mes_i<=4: #los meses finales no pueden ser menores
b = mes_f-mes_i+1
elif 5<=mes_i<=10 and 5<=mes_f<=10:
b = (mes_f-mes_i)+1
elif 11<=mes_f<=12 and 11<=mes_i<=12:
b = mes_f-mes_i+1
elif 11<=mes_f<=12 and 5<=mes_i<=10:
b = mes_f-10
elif 1<=mes_i<=4 and 5<=mes_f<=10:
b = (mes_f-5)+1
elif 1<=mes_i<=4 and 11<=mes_f<=12:
b = mes_f-10
ctstrunca = ((sueldom+(sueldom+sueldom*0.09)/6)/12)*b #Corresponde a un sueldo mas 1/6 de la grati, entre 12
return ctstrunca #Por los meses que faltan cobrar
def i_5c(sueldom,bono_ord): #para remuneracion neta
if sueldom<2150: #Cuando el sueldo sea menor a 2150, no existe impuesto de 5ta
i_5c=0
else: #Se realiza una proyeccion anual
s = sueldom*12 #Se calcula el sueldo en una año (por 12)
grati = sueldom*2 #En el año se ganan dos gratis
bono = grati*0.09 #El bono es el 9% de la grati
proyeccion_1anio = s+grati+bono+bono_ord
renta_neta = proyeccion_1anio-30100 #se le resta 7UIT(UIT=4300soles)
#tasas de impuesto
i = renta_neta/4300
if i>0 and i<=5: #Existen distintas categorias para pagar un %
i_5c = (renta_neta*0.08)/12 #mientras se suba de categoria, se acumula el gasto hasta el nivel que llegues
elif i>5 and i<=20:
a = 21500
i_5c = (1720+(renta_neta-a)*0.14)/12
elif i>20 and i<=35:
a = 86000
i_5c = (10750+(renta_neta-a)*0.17)/12
elif i>35 and i<=45:
a = 150500
i_5c = (21715+(renta_neta-a)*0.2)/12
elif i>45:
a = 193500
i_5c = (30315+(renta_neta-a)*0.3)/12
return round(i_5c,2)
def AFP(sueldom,porc):
porc = porc/100
AFP = sueldom*porc #Se paga un impuesto por AFP
return AFP
def ESSALUD(sueldom,m): #para costo laboral
#El seguro social corresponde al 9% del sueldo
porc_estado = 0.09
essalud = (sueldom*m)*porc_estado
return essalud
| [
"66015059+AnabellaParedes@users.noreply.github.com"
] | 66015059+AnabellaParedes@users.noreply.github.com |
583f042acda1f09fdea5324cdb1e140bb2a1ccc9 | 08c8bf9ea15094eee96df1a42d1d222362b5dc7a | /src/dicom_parser/utils/__init__.py | 37bf50c0c7ec3daf0e1791668ce9fc4a845ac3ca | [
"MIT"
] | permissive | open-dicom/dicom_parser | 51801fc9e86379e470308b28f05b9520315c82fc | 8f9224dfb1e12ee52aa17d520f10e169fe400391 | refs/heads/main | 2023-04-07T01:21:31.390019 | 2023-03-15T12:48:23 | 2023-03-15T12:48:23 | 241,392,223 | 12 | 9 | MIT | 2023-03-15T12:48:27 | 2020-02-18T15:09:51 | Python | UTF-8 | Python | false | false | 276 | py | """
Utilities for the *dicom_parser* package.
"""
from dicom_parser.utils.parse_tag import parse_tag
from dicom_parser.utils.path_generator import generate_paths
from dicom_parser.utils.read_file import read_file
from dicom_parser.utils.requires_pandas import requires_pandas
| [
"z.baratz@gmail.com"
] | z.baratz@gmail.com |
b398741b20cab175383fd7a981945f9bbdd411d9 | dc8a337ea1d8a285577d33e5cfd4dbbe846ee1a0 | /src/main/scala/DiameterOfNAryTree.py | d45f17eaccb63fce612d3cd1eafde3455b775d7e | [] | no_license | joestalker1/leetcode | 8a5cdda17abd33c3eef859732f75d7bec77a9d0e | ae392ddbc7eb56cb814b9e9715043c98a89a6314 | refs/heads/master | 2023-04-13T22:09:54.407864 | 2023-04-09T19:22:54 | 2023-04-09T19:22:54 | 131,803,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | """
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children if children is not None else []
"""
class Solution:
def diameter(self, root: 'Node') -> int:
if not root:
return 0
longest_diameter = 0
def find_longest_diameter(node):
nonlocal longest_diameter
if not node:
return 0
longest_branch_len1 = 0
longest_branch_len2 = 0
for child in node.children:
branch_len = find_longest_diameter(child) + 1
if branch_len > longest_branch_len1:
longest_branch_len2 = max(longest_branch_len2, longest_branch_len1)
longest_branch_len1 = branch_len
elif branch_len > longest_branch_len2:
longest_branch_len2 = branch_len
longest_diameter = max(longest_diameter, longest_branch_len1 + longest_branch_len2)
return longest_branch_len1
find_longest_diameter(root)
return longest_diameter | [
"denys@dasera.com"
] | denys@dasera.com |
3e229daf1c200bb0454cd6367a521b44a361e1d2 | a170eb7fee1ecec17d87625ba6631bce8456ecf7 | /stacks/maximum-element/solution.py | 19f38afcf072591c0a6f3c1c8869c0d95affa9f1 | [] | no_license | takuti/HackerRank | 9c92a58d2ae43ea15069659bf3e9785504bc6a3e | 3a2057ea0527d091e390ea1d609e559c5ff0045a | refs/heads/master | 2023-06-03T12:25:57.452552 | 2021-06-21T22:12:36 | 2021-06-21T22:12:36 | 369,939,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | #!/bin/python
import math
import os
import random
import re
import sys
#
# Complete the 'getMax' function below.
#
# The function is expected to return an INTEGER_ARRAY.
# The function accepts STRING_ARRAY operations as parameter.
#
def getMax(operations):
stack = []
res = []
max_in_stack = 0
for op in operations:
lst = op.split(' ')
o = lst[0]
if o == '1': # 1 x
val = int(lst[1])
stack.append(val)
if val > max_in_stack:
max_in_stack = val
elif o == '2': # 2
val = stack.pop(-1)
if val == max_in_stack:
max_in_stack = 0 if len(stack) == 0 else max(stack)
else: # 3
res.append(max_in_stack)
return res
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(raw_input().strip())
ops = []
for _ in xrange(n):
ops_item = raw_input()
ops.append(ops_item)
res = getMax(ops)
fptr.write('\n'.join(map(str, res)))
fptr.write('\n')
fptr.close()
| [
"k.takuti@gmail.com"
] | k.takuti@gmail.com |
ee82f7737c2b4bb17aecaf8168bcff6011eae8de | 1064038560fd177e1274f9d50bcf4e466700ca79 | /hcseduapp/migrations/0011_multiplechoicea_video.py | 071cac8d384ba902a1f25934d92e102580c21012 | [] | no_license | YingjiaLi1/HcseduApp | a5c546d51b5a22f8554772cc9b0fe41d5369844d | 2ebd3732fd6f41073784f8347a410302b4d3a328 | refs/heads/master | 2020-06-30T05:32:57.340098 | 2019-08-29T00:45:10 | 2019-08-29T00:45:10 | 200,742,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-08-16 00:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hcseduapp', '0010_linkedq_opno'),
]
operations = [
migrations.AddField(
model_name='multiplechoicea',
name='video',
field=models.CharField(blank=True, max_length=50),
),
]
| [
"“2414112l@student.gla.ac.uk”"
] | “2414112l@student.gla.ac.uk” |
208dfd445577442be6e09ca6e927cdf8157d070b | 73bca317047b37eda3cb6f49704a0ba7382f8d98 | /background/network_scan_detector.py | f0c099048749d2b4efc39a14e2edc4694b35d54f | [
"MIT"
] | permissive | Hadhat/flow-inspector | aa0e65f8117367549d88f65dec1792c9adc4875e | b53d5a06046992ae2cb9be8586454da1625f215e | refs/heads/master | 2020-12-25T13:24:03.553525 | 2014-08-12T12:19:43 | 2014-08-12T12:19:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | from analysis_base import AnalysisBase
import config
import common
class NetworkScanDetector(AnalysisBase):
def __init__(self, flowbackend, databackend):
AnalysisBase.__init__(self, flowbackend, databackend)
def analyze(self, startBucket, endBucket):
tableName = common.DB_FLOW_PREFIX + str(self.flowBackend.getBucketSize(startBucket, endBucket, 1000))
#print self.flowBackend.run_query(tableName, "SELECT proto, srcIP, dstIP, dstPort, COUNT(DISTINCT dstIP) AS di from %s WHERE pkts <= 3 AND (proto != 1 OR dstPort = 2048) GROUP BY proto, srcIP, dstPort HAVING di >= 50 ORDER BY di DESC");
| [
"braun@net.in.tum.de"
] | braun@net.in.tum.de |
6a2021d0dc6d0b3741d58fc65b577d713cf89407 | 7c355b29791f4a3a8a98b68b05a6668a0f7d2e9e | /decode.py | 4fd525d50c522d3f49a8e846947475d544944dd6 | [] | no_license | msaspeech/Arabic_ASR | 4b14aba71ef949da81bfae472502ab26f7afc232 | dac9dbfdfb3d66155b6a61bcb3ff17c4a38d827d | refs/heads/master | 2023-04-08T07:56:32.763349 | 2019-07-22T10:01:38 | 2019-07-22T10:01:38 | 191,977,084 | 3 | 2 | null | 2023-03-24T23:08:06 | 2019-06-14T16:46:16 | Python | UTF-8 | Python | false | false | 178 | py | from models.Speech_API.models.speech_recognition import recognize_speech
decoded_sentence = recognize_speech("test.wav", latent_dim=300, architecture=1)
print(decoded_sentence)
| [
"sofiane.mdjk@gmail.com"
] | sofiane.mdjk@gmail.com |
0ba768c26a48b70e9506ac0d5ef54ada17b6814a | c94d11f27b745194b091a16df3e9b4dbf60d3e3a | /process-ontology-flask.py | f3fb55da2ff110322c068cae7415a26a91326fce | [] | no_license | jbgour/OWL-reasoning | 4f008e06d2c0159fafc1a5806ee608a839e4a15b | c821bf46b3a60508e9c65d169bcb5e05b9898ad7 | refs/heads/main | 2023-02-24T06:34:01.557146 | 2021-01-31T17:01:21 | 2021-01-31T17:01:21 | 328,758,855 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,051 | py | # Fichier site_dynamique.py
from owlready2 import *
onto = get_ontology("process-ontology.owl").load()
from flask import Flask, url_for
app = Flask(__name__)
sync_reasoner_pellet([onto], infer_property_values=True, infer_data_property_values=True, debug=True,
keep_tmp_file=True)
@app.route('/')
def page_ontologie():
html = """<html><body>"""
html += """<h2>Ontologie '%s'</h2>""" % onto.base_iri
html += """<h3>Classes racines</h3>"""
for Class in Thing.subclasses():
html += """<p><a href="%s">%s</a></p>""" % (url_for("page_classe", iri=Class.iri), Class.name)
html += """</body></html>"""
return html
@app.route('/classe/<path:iri>')
def page_classe(iri):
Class = IRIS[iri]
html = """<html><body><h2>Classe '%s'</h2>""" % Class.name
html += """<h3>superclasses</h3>"""
for SuperClass in Class.is_a:
if isinstance(SuperClass, ThingClass):
html += """<p><a href="%s">%s</a></p>""" % (url_for("page_classe", iri=SuperClass.iri), SuperClass.name)
else:
html += """<p>%s</p>""" % SuperClass
html += """<h3>Classes équivalentes</h3>"""
for EquivClass in Class.equivalent_to:
html += """<p>%s</p>""" % EquivClass
html += """<h3>Sous-classes</h3>"""
for SousClass in Class.subclasses():
html += """<p><a href="%s">%s</a></p>""" % (url_for("page_classe", iri=SousClass.iri), SousClass.name)
html += """<h3>Individus</h3>"""
for individu in Class.instances():
html += """<p><a href="%s">%s</a></p>""" % (url_for("page_individu", iri=individu.iri), individu.name)
html += """</body></html>"""
return html
@app.route('/individu/<path:iri>')
def page_individu(iri):
individu = IRIS[iri]
html = """<html><body><h2>Individu '%s'</h2>""" % individu.name
html += """<h3>Classes</h3>"""
# TODO: ajouter des liens vers chacune des classes dont l'individu est instance
classGroupAncestors = []
for classgroup in individu.is_a:
print(classgroup)
classGroupAncestors = list(classgroup.ancestors())
isProcess = False
for a in classGroupAncestors:
if a.name == 'Process':
isProcess = True
html += """<p><a href="%s">%s</a></p>""" % (url_for("page_classe", iri=classgroup.iri), classgroup.name)
html += """<h3>Relations</h3>"""
# TODO: pour les individus qui sont instances de Process, donner les propriétés
if isProcess:
for prop in individu.get_properties():
for value in prop[individu]:
print(".%s == %s" % (prop.python_name, value))
def valueToDisplay(value):
try:
return value.name
except:
return value
html += """<p>.%s == %s</p>""" % (prop.python_name, valueToDisplay(value))
html += """</body></html>"""
return html
import werkzeug.serving
werkzeug.serving.run_simple("localhost", 5000, app)
| [
"jeanbaptiste.gourlet@hotmail.fr"
] | jeanbaptiste.gourlet@hotmail.fr |
d739614e11fd2e45bcceb34f65bad95af7031d9b | c7a3d3d83eafe816a7b9e2daceccec7e9cf95211 | /blogging/argcomplete_patch.py | a898ec8110e60e9e8cef020bdf2e159a7b4f1553 | [
"MIT"
] | permissive | cuyu/blogging | a5c0c844f82e2fcc60e7cfe1ebc300f7081c812e | 51ecf90d0e81c97223c66bda8b18ded0913a45ff | refs/heads/master | 2021-01-02T08:48:19.897829 | 2019-01-25T08:56:55 | 2019-01-25T08:56:55 | 99,064,270 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,933 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A monkey patch to fix auto complete failed when inputs contains unicode words in zsh
See https://github.com/kislyuk/argcomplete/issues/228 for the discussions
"""
import argcomplete
from argcomplete import *
def hacked_call(self, argument_parser, always_complete_options=True, exit_method=os._exit, output_stream=None,
exclude=None, validator=None, print_suppressed=False, append_space=None,
default_completer=FilesCompleter()):
self.__init__(argument_parser, always_complete_options=always_complete_options, exclude=exclude,
validator=validator, print_suppressed=print_suppressed, append_space=append_space,
default_completer=default_completer)
if "_ARGCOMPLETE" not in os.environ:
# not an argument completion invocation
return
global debug_stream
try:
debug_stream = os.fdopen(9, "w")
except:
debug_stream = sys.stderr
if output_stream is None:
try:
output_stream = os.fdopen(8, "wb")
except:
debug("Unable to open fd 8 for writing, quitting")
exit_method(1)
# print("", stream=debug_stream)
# for v in "COMP_CWORD COMP_LINE COMP_POINT COMP_TYPE COMP_KEY _ARGCOMPLETE_COMP_WORDBREAKS COMP_WORDS".split():
# print(v, os.environ[v], stream=debug_stream)
ifs = os.environ.get("_ARGCOMPLETE_IFS", "\013")
if len(ifs) != 1:
debug("Invalid value for IFS, quitting [{v}]".format(v=ifs))
exit_method(1)
comp_line = os.environ["COMP_LINE"]
comp_point = int(os.environ["COMP_POINT"])
# Adjust comp_point for wide chars
if USING_PYTHON2:
comp_point = len(ensure_str(comp_line[:comp_point]))
else:
comp_point = len(ensure_str(ensure_bytes(comp_line)[:comp_point]))
comp_line = ensure_str(comp_line)
cword_prequote, cword_prefix, cword_suffix, comp_words, last_wordbreak_pos = split_line(comp_line, comp_point)
# _ARGCOMPLETE is set by the shell script to tell us where comp_words
# should start, based on what we're completing.
# 1: <script> [args]
# 2: python <script> [args]
# 3: python -m <module> [args]
start = int(os.environ["_ARGCOMPLETE"]) - 1
comp_words = comp_words[start:]
# debug(
# "\nLINE: '{l}'\nPREQUOTE: '{pq}'\nPREFIX: '{p}'".format(l=comp_line, pq=cword_prequote, p=cword_prefix).encode(
# 'utf-8'),
# "\nSUFFIX: '{s}'".format(s=cword_suffix).encode('utf-8'),
# "\nWORDS:", comp_words)
completions = self._get_completions(comp_words, cword_prefix, cword_prequote, last_wordbreak_pos)
debug("\nReturning completions:", completions)
output_stream.write(ifs.join(completions).encode(sys_encoding))
output_stream.flush()
debug_stream.flush()
exit_method(0)
argcomplete.CompletionFinder.__call__ = hacked_call
| [
"icyarm@qq.com"
] | icyarm@qq.com |
ee55e6039ac4ac62e5acce167fbe3a3938014bbc | 44f791c94194d7001dfd12f913c2a3edf8b2d27d | /utils/Tuples.py | 4e74cf353ad25ddaa0d33023725a84442ff9fa94 | [] | no_license | Josephbakulikira/Ray-Tracing-with-python-from-Scratch | 41fadfa3b8b071ae3161baf0df348824d886b835 | d77bb367e37bd0d1e2c84513906d49586581d4e7 | refs/heads/master | 2023-04-28T10:46:44.519315 | 2021-06-02T22:25:02 | 2021-06-02T22:25:02 | 354,248,435 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,709 | py | import numpy as np
from math import sqrt, pow
class Tuples:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def add(a, b):
if a.w + b.w == 0:
return Vector3(a.x + b.x, a.y + b.y , a.z + b.z)
return Point(a.x + b.x, a.y + b.y , a.z + b.z)
def sub(a, b):
if a.w - b.w == 0:
return Vector3(a.x - b.x, a.y - b.y , a.z - b.z)
return Point(a.x - b.x, a.y - b.y , a.z - b.z)
def multiply(tuple, s):
if tuple.w == 0:
return Vector3(tuple.x * s, tuple.y * s, tuple.z * s)
return Point(tuple.x * s, tuple.y * s, tuple.z * s)
def divide(tuple, d):
if tuple.w == 0:
return Vector3(tuple.x / d, tuple.y / d, tuple.z / d)
return Point(tuple.x / d, tuple.y * s, tuple.z / d)
def reflect(vec, normal):
return Tuples.sub(vec, Tuples.multiply(Tuples.multiply(normal, 2), Tuples.dot(vec, normal)))
def NegateTuple(tuple):
if tuple.w == 0:
return Vector3(tuple.x * -1, tuple.y * -1, tuple.z * -1)
return Point(tuple.x * -1, tuple.y * -1, tuple.z * -1)
def dot(a, b):
return a.x * b.x + a.y * b.y + a.z * b.z
def cross(a, b):
return Vector3((a.y * b.z) - (a.z * b.y),
(a.z * b.x) - (a.x * b.z),
(a.x * b.y) - (a.y * b.x))
def toTuple(matrix):
if matrix[3][0] == 0:
return Vector3(matrix[0][0], matrix[1][0], matrix[2][0])
return Point(matrix[0][0], matrix[1][0], matrix[2][0])
def toMatrix(tuple):
return np.array([ [tuple.x], [tuple.y], [tuple.z], [tuple.w] ])
class Vector3(Tuples):
def __init__(self, x, y, z):
super().__init__(x, y, z)
self.w = 0
def zeros():
return Vector3(0, 0, 0)
def units():
return Vector3(1, 1, 1)
def negate(self):
self.x *= -1
self.y *= -1
self.z *= -1
def GetMagnitude(v):
return sqrt(pow(v.x, 2) + pow(v.y, 2) + pow(v.z, 2) + pow(v.w, 2))
def Normalize(v):
mag = sqrt(pow(v.x, 2) + pow(v.y, 2) + pow(v.z, 2) + pow(v.w, 2))
try:
return Vector3(v.x/mag, v.y/mag, v.z/mag)
except ZeroDivisionError:
return Vector3(0, 0, 0)
def __repr__(self):
return f'Vector3 --> ( x: {self.x}, y: {self.y}, z: {self.z})'
class Point(Tuples):
def __init__(self, x, y, z):
super().__init__(x, y, z)
self.w = 1
def zeros():
return Point(0, 0, 0)
def units():
return Point(1, 1, 1)
def __repr__(self):
return f'Point --> ( x: {self.x}, y: {self.y}, z: {self.z})'
| [
"48150537+Josephbakulikira@users.noreply.github.com"
] | 48150537+Josephbakulikira@users.noreply.github.com |
882b15701109914a0cfce2e6a901b2d32f952f93 | 940755a6b0c475ef0e3fb89564953a2657a59d44 | /04_build_nn_quickly.py | 479f84fc277003273c99e0abe291016fa5695b89 | [] | no_license | LeungHan/pytorch-learn | 5aa62f5f87e1b404a609e1057b21e27e7ee18185 | 56f9e762014bd7f3c8c013774121c53366e7afa9 | refs/heads/master | 2022-04-22T19:59:51.079941 | 2020-04-27T03:26:42 | 2020-04-27T03:31:23 | 259,193,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,672 | py | import torch
import matplotlib.pyplot as plt
from torch.autograd import Variable
if __name__ == "__main__":
# load data
n_data = torch.ones(100,2)
x0 = torch.normal(2*n_data,1)
y0 = torch.zeros(100) #标签0
x1 = torch.normal(-2*n_data,1)
y1 = torch.ones(100) #标签1
x = torch.cat((x0,x1),0).type(torch.FloatTensor)
y = torch.cat((y0, y1), 0).type(torch.LongTensor)
x,y = Variable(x),Variable(y)
net = torch.nn.Sequential(
torch.nn.Linear(2,10),
torch.nn.ReLU(),
torch.nn.Linear(10,2)
)
optimizer = torch.optim.SGD(net.parameters(), lr = 0.02)
loss_func = torch.nn.CrossEntropyLoss()
for i in range(100):
out = net(x)
loss = loss_func(out, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 2 == 0:
plt.cla()
# torch.max(a,1):返回a中每一行中最大值的那个元素
# troch.max()[1], 只返回最大值的每个索引
# dim=0表示按列计算;dim=1表示按行计算
tmp = torch.softmax(out, dim=1)
predict = torch.max(tmp, 1)[1] # 获取每一行最大值的索引
pred_y = predict.data.numpy()
target_y = y.data.numpy()
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')
accuracy = float((pred_y == target_y).astype(int).sum()) / float(target_y.size)
plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'})
plt.pause(0.1)
plt.ioff()
plt.show()
| [
"1844113541@qq.com"
] | 1844113541@qq.com |
d604d05e0d9730fd0585f355762e61fb909d71a0 | c9d5ddaad2197044c60260d43bb7e46f7ad035ef | /model_utils.py | fa66284845f236ee35e0e13c3ae0c13e9c0233cd | [] | no_license | Li-Ming-Fan/my_reader | 938f6e76004f70cf565ed3bf12d774ceb31177a9 | 11950798c7c1f76bfbf7dc6aa48676bccb8ef8b8 | refs/heads/master | 2022-04-10T08:27:49.231768 | 2020-03-15T13:39:55 | 2020-03-15T13:39:55 | 211,350,095 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,942 | py |
import os
import json
from utils import compute_bleu_rouge
from utils import normalize
#
def do_eval(model, batcher, settings, result_dir=None, result_prefix=None, save_full_info=False):
"""
"""
pred_answers, ref_answers = [], []
total_loss, total_num = 0, 0
count = 0
while True:
#
batch = batcher.get_next_batch()
if batch is None: break
#
results = model.run_eval_one_batch(batch)
count += 1
print(count)
#
loss = results["loss_optim"]
idx_passage = results["idx_passage"]
idx_start = results["idx_start"]
idx_end = results["idx_end"]
# pred_prob = results["pred_prob"]
#
batch_size = len(idx_passage)
total_loss += loss * batch_size
total_num += batch_size
#
sidx = 0
for sidx in range(batch_size):
#
sample = batch['data_raw'][sidx]
idx_p_curr = idx_passage[sidx]
idx_s_curr = idx_start[sidx]
idx_e_curr = idx_end[sidx]
# prob_curr = pred_prob[sidx]
#
pred_a = ''.join(sample['passages'][idx_p_curr]['passage_tokens'][idx_s_curr: idx_e_curr + 1])
#
if save_full_info:
sample['pred_answers'] = [pred_a]
pred_answers.append(sample)
else:
pred_answers.append({'question_id': sample['question_id'],
'question_type': sample['question_type'],
'answers': [ pred_a ],
'entity_answers': [[]],
'yesno_answers': []})
if 'answers' in sample:
ref_answers.append({'question_id': sample['question_id'],
'question_type': sample['question_type'],
'answers': sample['answers'],
'entity_answers': [[]],
'yesno_answers': []})
#
# saving
if result_dir is not None and result_prefix is not None:
result_file = os.path.join(result_dir, result_prefix + '.json')
with open(result_file, 'w', encoding="utf-8") as fout:
for pred_answer in pred_answers:
fout.write(json.dumps(pred_answer, ensure_ascii=False) + '\n')
#
model.logger.info('saving {} results to {}'.format(result_prefix, result_file))
#
#
# metric
# this average loss is invalid on test set, since we don't have true start_id and end_id
ave_loss = 1.0 * total_loss / total_num
#
if len(ref_answers) > 0:
pred_dict, ref_dict = {}, {}
for pred, ref in zip(pred_answers, ref_answers):
question_id = ref['question_id']
if len(ref['answers']) > 0:
pred_dict[question_id] = normalize(pred['answers'])
ref_dict[question_id] = normalize(ref['answers'])
#
bleu_rouge = compute_bleu_rouge(pred_dict, ref_dict)
else:
bleu_rouge = None
#
print("ave_loss: %g" % ave_loss)
print("bleu_rouge:")
print(bleu_rouge)
#
model.logger.info('ave_loss: {}'.format(ave_loss))
model.logger.info('bleu_rouge: {}'.format(bleu_rouge))
#
return ave_loss, bleu_rouge
#
def do_train(model, train_batcher, settings):
"""
"""
total_num, total_loss = 0, 0
sect_batch_loss = 0.0
sect_range = settings.check_period_batch
loss_best = 10000
#
# count = 0
while True:
#
batch = train_batcher.get_next_batch()
if batch is None: break
#
results = model.run_train_one_batch(batch)
# count += 1
# print(count)
#
loss = results["loss_optim"]
global_step = results["global_step"]
lr = results["lr"]
#
batch_size = len(batch["batch_questions"])
total_loss += loss * batch_size
total_num += batch_size
#
sect_batch_loss += loss
#
if global_step % sect_range == 0:
print("curr, lr, loss: %d, %g, %g" % (global_step, lr, loss) )
#
sect_mean_loss = sect_batch_loss / sect_range
model.logger.info('average loss from batch {} to {} is {}'.format(
global_step - sect_range + 1, global_step, sect_mean_loss))
sect_batch_loss = 0
#
model.save_ckpt(settings.model_dir, settings.model_tag, global_step)
if sect_mean_loss < loss_best:
model.save_ckpt_best(settings.model_dir_best, settings.model_tag, global_step)
loss_best = sect_mean_loss
#
return 1.0 * total_loss / total_num
#
def do_predict(model, batcher, settings, result_dir=None, result_prefix=None, save_full_info=False):
"""
"""
pred_answers = []
total_num = 0
count = 0
while True:
#
batch = batcher.get_next_batch()
if batch is None: break
#
results = model.predict_with_pb_from_batch(batch)
count += 1
print(count)
#
idx_passage = results["idx_passage"]
idx_start = results["idx_start"]
idx_end = results["idx_end"]
# pred_prob = results["pred_prob"]
#
batch_size = len(idx_passage)
total_num += batch_size
#
sidx = 0
for sidx in range(batch_size):
#
sample = batch['data_raw'][sidx]
idx_p_curr = idx_passage[sidx]
idx_s_curr = idx_start[sidx]
idx_e_curr = idx_end[sidx]
# prob_curr = pred_prob[sidx]
#
pred_a = ''.join(sample['passages'][idx_p_curr]['passage_tokens'][idx_s_curr: idx_e_curr + 1])
#
if save_full_info:
sample['pred_answers'] = [pred_a]
pred_answers.append(sample)
else:
pred_answers.append({'question_id': sample['question_id'],
'question_type': sample['question_type'],
'answers': [ pred_a ],
'entity_answers': [[]],
'yesno_answers': []})
#
# saving
if result_dir is not None and result_prefix is not None:
result_file = os.path.join(result_dir, result_prefix + '.json')
with open(result_file, 'w', encoding="utf-8") as fout:
for pred_answer in pred_answers:
fout.write(json.dumps(pred_answer, ensure_ascii=False) + '\n')
#
model.logger.info('saving {} results to {}'.format(result_prefix, result_file))
#
#
print("prediction finished")
#
| [
"li_m_f@163.com"
] | li_m_f@163.com |
bdbf431600e3d185cef15b8104b5c4a77abad69b | e338694a7f01a24ffcd3a9fb91c5d11ab7b5b2ba | /scripts/word_counts_to_vocab.py | 636df809da3e8154a1ce353d62bb77c0b82fa2c4 | [
"Apache-2.0"
] | permissive | zaffnet/pocolm | c457027b81073e3568939f46d87cd6c90ea28375 | d146c1fe8a05a709e4ddec941dba5e8e8ad67527 | refs/heads/master | 2020-07-01T09:00:31.532192 | 2019-08-07T20:03:55 | 2019-08-07T20:03:55 | 201,118,403 | 0 | 0 | NOASSERTION | 2019-08-07T19:53:13 | 2019-08-07T19:53:13 | null | UTF-8 | Python | false | false | 8,782 | py | #!/usr/bin/env python
# We're using python 3.x style print but want it to work in python 2.x.
from __future__ import print_function
import os
import argparse
import sys
import operator
from collections import defaultdict
parser = argparse.ArgumentParser(description="Creates a vocabulary file from a 'counts' directory "
"as created by get_counts.py and a set of weights as created by "
"get_unigram_weights.py. A vocabulary file has the same format as "
"a 'symbols' file from OpenFST, i.e. each line is 'word integer-symbol'."
"However, it is necessary that the BOS, EOS and unknown-word (normally "
"<s>, </s> and <unk>), be give symbols 1, 2 and 3 respecively. You "
"may use this script to generate the file, or generate it manually."
"The vocabulary file is written to the standard output.",
epilog="See also wordlist_to_vocab.py",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--num-words', type=int,
help='If specified, the maximum number of words to include '
'in the vocabulary. If not specified, all words will be included.')
parser.add_argument('--weights',
help="File with weights for each data-source (except dev), in the "
"same format as from get_unigram_weights.py, i.e. each line has "
"'corpus-name weight'. By default, no weighting is used.")
parser.add_argument('--fold-dev-into', type=str,
help='If supplied, the name of data-source into which to fold the '
'counts of the dev data for purposes of vocabulary estimation '
'(typically the same data source from which the dev data was '
'originally excerpted).')
parser.add_argument('--unk-symbol', type=str, default='<unk>',
help='Written form of the unknown-word symbol, normally <unk> '
'or <UNK>. Will not normally appear in the text data.')
parser.add_argument('--bos-symbol', type=str, default='<s>',
help='Written form of the beginning-of-sentence marker, normally <s> '
'or <S>. Appears in the ARPA file but should not appear in the '
'text data.')
parser.add_argument('--eos-symbol', type=str, default='</s>',
help='Written form of the beginning-of-sentence marker, normally </s> '
'or <S>. Appears in the ARPA file but should not appear in the '
'text data.')
parser.add_argument('--epsilon-symbol', type=str, default='<eps>',
help='Written form of label used for word-index zero, normally <eps>, '
'for compatibility with OpenFst. This is never used to represent an '
'actual word, and if this appears in your text data it will be mapped '
'to the unknown-word symbol. Override this at your own risk.')
parser.add_argument('count_dir',
help='Directory in which to look for counts (see get_counts.py)')
args = parser.parse_args()
# read in the weights.
name_to_weight = {}
if args.weights is not None:
f = open(args.weights, 'r')
num_weights_read = 0
for line in f:
try:
[name, weight] = line.split()
weight = float(weight) # check it's a float.
if weight < 1.0e-10: # this will ensure we get the vocab size we
# wanted, even if some weights were estimated
# as zero.
weight = 1.0e-10
print('word_counts_to_vocab.py: warning: flooring weight for {0} to {1}'.format(
name, weight), file=sys.stderr)
name_to_weight[name] = weight
except Exception as e:
print(str(e), file=sys.stderr)
sys.exit('word_counts_to_vocab.py: bad line {0} in weights file {1}'.format(
line[0:-1], args.weights))
num_weights_read += 1
if num_weights_read == 0:
sys.exit('word_counts_to_vocab.py: empty weights file ' + args.weights)
f.close()
# map from word to weighted count.
word_to_weighted_count = defaultdict(float)
saw_counts_with_weight = False
saw_counts_without_weight = False
num_counts_files = 0
for name in os.listdir(args.count_dir):
if name.endswith('.counts'):
num_counts_files += 1
# read the counts.
name_no_suffix = name[:-7]
if name_no_suffix == 'dev':
if args.fold_dev_into is None:
continue # don't include the dev counts unless we're told to
# fold them into some other data source.
else:
name_no_suffix = args.fold_dev_into
if name_no_suffix in name_to_weight:
weight = name_to_weight[name_no_suffix]
saw_counts_with_weight = True
else:
weight = 1.0
saw_counts_without_weight = True
counts_path = args.count_dir + os.sep + name
f = open(counts_path, 'r')
for line in f:
try:
[count, word] = line.split()
count = int(count) # just check that it's an integer.
word_to_weighted_count[word] += count * weight
except Exception as e:
print(str(e), file=sys.stderr)
sys.exit('word_counts_to_vocab.py: bad line in counts file {0}: {1}'.format(
counts_path, line[:-1]))
f.close()
# note: if weights are provided, we expect 1 more counts files than the
# number of weights, due to the 'dev.counts'.
if ((saw_counts_with_weight and saw_counts_without_weight) or
((args.weights is not None) and (num_counts_files != len(name_to_weight) + 1))):
sys.exit('word_counts_to_vocab.py: it looks like the names in the weights file {0} '
'do not match the files in the counts directory {1}'.format(
args.weights, args.count_dir))
# deal with BOS, EOS and UNK, and <eps>; we can
# ensure the correct ordering by adding counts larger than the max.
# this part prints warnings if these were present in the raw counts.
max_weighted_count = max(word_to_weighted_count.values())
if args.epsilon_symbol in word_to_weighted_count:
print('word_counts_to_vocab.py: warning: epsilon symbol {0} appears in the text. '
' It will be replaced by {1} during data preparation.'.format(
args.epsilon_symbol, args.unk_symbol), file=sys.stderr)
word_to_weighted_count[args.epsilon_symbol] = 5.0 * max_weighted_count
if args.bos_symbol in word_to_weighted_count:
print('word_counts_to_vocab.py: severe warning: beginning-of-sentence symbol {0}'
' appears in the text. It will be replaced by {1} during data '
'preparation.'.format(args.bos_symbol, args.unk_symbol), file=sys.stderr)
word_to_weighted_count[args.bos_symbol] = 4.0 * max_weighted_count
if args.eos_symbol in word_to_weighted_count:
print('word_counts_to_vocab.py: severe warning: end-of-sentence symbol {0}'
' appears in the text. It will be replaced by {1} during data '
'preparation.'.format(args.eos_symbol, args.unk_symbol), file=sys.stderr)
word_to_weighted_count[args.eos_symbol] = 3.0 * max_weighted_count
if args.unk_symbol in word_to_weighted_count:
print('word_counts_to_vocab.py: mild warning: unknown-word symbol {0} appears in the text. '
'Make sure you know what you are doing.'.format(args.unk_symbol), file=sys.stderr)
word_to_weighted_count[args.unk_symbol] = 2.0 * max_weighted_count
sorted_list = sorted(word_to_weighted_count.items(),
key=operator.itemgetter(1), reverse=True)
if args.num_words is not None and len(sorted_list) > args.num_words + 1:
print('word_counts_to_vocab.py: you specified --num-words={0} so limiting the '
'vocabulary from {1} to {0} words based on {3}count.'.format(
args.num_words, len(sorted_list) - 1, args.num_words,
("weighted " if args.weights is not None else "")), file=sys.stderr)
sorted_list = sorted_list[0:args.num_words + 1]
# Here is where we produce the output of this program; it goes to the standard
# output.
index = 0
for [word, count] in sorted_list:
print(word, index)
index += 1
print('word_counts_to_vocab.py: created vocabulary with {0} entries'.format(len(sorted_list) - 1),
file=sys.stderr)
| [
"dpovey@gmail.com"
] | dpovey@gmail.com |
71dca2055bdb5ba65a6d62df5a2e08130b4b5940 | bfaa2b4c13343613d951ab288c321506f4b74fa2 | /Scripts_Python/Number_race_1.py | 1ccbd9390f38fbf0064c33f4c4a255bf83091c5d | [
"MIT"
] | permissive | Msarate16/FrameWorks_8A | 88aae4b0a728f023b0b4d05643f05d7db96d585e | 19e3084d59d6866c34988fbab4027d5d062d6bb8 | refs/heads/main | 2023-08-31T18:24:40.824447 | 2021-09-21T19:23:25 | 2021-09-21T19:23:25 | 402,074,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | import os
from random import randint
def dices():
status =True
while status:
dice1 = randint(1, 6)
dice2 = randint(1, 6)
print("Dado 1: ", dice1)
print("Dado 2: ", dice2)
if dice1 == dice2:
status=False
print("::: Es su primer par::: ")
elif dice1 == dice2:
status = False
print("::: Es su segundo par, ha ganado")
elif dice1 + dice2 == 100:
key = input("::: Presiona cualquier tecla para lanzar los dados nuevamente :::")
dices() | [
"noreply@github.com"
] | Msarate16.noreply@github.com |
dd9fde8de36512ec2ee7d88e4b47c058cd8f2c33 | 173f95e690f0823cd74732630f4fa68460a623c5 | /muver_api/migrations/0010_auto_20160504_0913.py | 4bfe850a428962a9cdec67ea363f31ae472999d3 | [] | no_license | kjmullen/mUver | 7e6d579aa338e0efe952afd428c4c535fcb2ace1 | fb03cb799634daf33d5196bac944a0daea084120 | refs/heads/master | 2016-09-14T10:58:40.826890 | 2016-06-06T22:06:21 | 2016-06-06T22:06:21 | 57,250,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-04 16:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('muver_api', '0009_auto_20160503_1632'),
]
operations = [
migrations.AddField(
model_name='job',
name='confirmation_mover',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='job',
name='confirmation_user',
field=models.BooleanField(default=False),
),
]
| [
"kevinkozzik@gmail.com"
] | kevinkozzik@gmail.com |
1947595924c21a1bf501f2d14a776b5ebfb39f31 | e6f762e3f5cdae05280274dfe90909ab1e95032f | /mlstds/context_processors.py | d3ba3e8ffbddc71079eeb2ccb0a7456e58fee542 | [] | no_license | tooxie/django-mlstds | 54344a6e03c13754f5a9b98131c7347379735785 | cff3ad58b73077e431cf8db545d596132dd655b5 | refs/heads/master | 2016-09-06T19:02:51.097451 | 2010-10-19T04:43:55 | 2010-10-19T04:43:55 | 1,000,838 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | # -*- coding: utf-8 -*-
from django.conf import settings
def urls(request):
url_dict = {}
for attr in dir(settings):
if attr.endswith('_URL'):
url_dict[attr] = getattr(settings, attr).replace('%s', '')
return url_dict
def language(request):
try:
lang = settings.LANGUAGE_CODE[:settings.LANGUAGE_CODE.index('-')]
except:
lang = settings.LANGUAGE_CODE
return {'LANGUAGE': lang}
def site(request):
from django.contrib.sites.models import Site
return {'site': Site.objects.get_current()}
| [
"alvaro@mourino.net"
] | alvaro@mourino.net |
d10a003b80f6ddccc84ddfa5947ddf51b8cc9e1d | f8e43d1f5e19258ab44bf34dbf06a9ccb4f4422e | /data_structures/s01_array.py | c2ee1fab47cf60fa0363abccb733856ff0d3f969 | [] | no_license | yanshugang/study_data_structures_and_algorithms | bf47a303dab29f067ec9dd4f7d57e53cb114a5a7 | 4aa8a3d9bcbfd169fbe821378ff219b347bda92f | refs/heads/master | 2020-04-13T09:58:59.965788 | 2019-07-26T06:30:33 | 2019-07-26T06:30:33 | 163,126,224 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | """
线性结构:内存连续,可以通过下标访问
类型:数组(array)和列表(list)
用list实现定长的array
+ = = = = = = = +
| append | O(n) |
+ — — — — — — — +
| remove | O(n) |
+ - - - - - - - +
| search | O(1) |
+ = = = = = = = +
"""
# python的array: 只能存同一类型(数值或字符),更推荐使用numpy.array
class Array(object):
def __init__(self, size=32):
self._size = size
self._items = [None] * size
def __getattr__(self, index):
return self._items[index]
def __setitem__(self, index, value):
self._items[index] = value
def __len__(self):
return self._size
def clear(self, value=None):
for i in range(len(self._items)):
self._items[i] = value
def __iter__(self):
for item in self._items:
yield item
def test_array():
size = 10
a = Array(size)
a[0] = 1
assert a[0] == 1
| [
"yanshugang11@163.com"
] | yanshugang11@163.com |
3d84ce90ce3962bc77142682f63e790d812925bb | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_367/ch22_2020_04_20_18_07_39_360782.py | b597fe84dbffca639b7593bf7e98b02e691bfba4 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | p=int(input('Quantos cigarros: '))
o=int(input('Quantos anos: '))
cigarros= (p*360) * o
tempo= (cigarros *10)/144
print('dias a menos: {0}' .format(tempo)) | [
"you@example.com"
] | you@example.com |
98ca94745ddff3a473f8e09f14c52f30b4d8cb72 | c63ec7aef9fa908a1bde776039b57a586a6f42ca | /helpdesk/cc.py | 117a61470c2309d38acf464cfc4bfc7480812334 | [] | no_license | terkpeh1990/gasinfosys | b7741342859cd11556804be55de4fc27f8e96aff | 1a4cac96fedf0374b9db8a330def342dab3a4b25 | refs/heads/main | 2023-07-10T01:11:40.282421 | 2021-08-13T08:26:46 | 2021-08-13T08:26:46 | 395,572,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,956 | py | # from django.db import models
# from django.contrib.auth.models import User
# from django.conf import settings
# from datetime import datetime,date
# from django.contrib.sessions.models import Session
# from django.core.validators import MaxValueValidator
# from django.db.models.signals import post_save,pre_save
# from django.dispatch import receiver
# from amis.settings import EMAIL_HOST_USER
# from django.core.mail import send_mail,EmailMessage
# import uuid
# from .utils import id_generator,incrementor
#
# # Create your models here.
# User = settings.AUTH_USER_MODEL
#
# class Grade(models.Model):
# grade_name = models.CharField(max_length=100)
#
# def __str__(self):
# return self.grade_name
#
# class Prority(models.Model):
# level_name = models.CharField(max_length=100)
#
# def __str__(self):
# return self.level_name
#
# class Region(models.Model):
# region_name = models.CharField(max_length=100)
#
# def __str__(self):
# return self.region_name
#
# class District(models.Model):
# districtname = models.CharField(max_length=100)
# region = models.ForeignKey(Region,blank=True,null=True, on_delete= models.CASCADE)
#
# def __str__(self):
# return self.districtname
#
# class Status(models.Model):
# status_name = models.CharField(max_length= 100)
#
# def __str__(self):
# return self.status_name
#
# class agent_Status(models.Model):
# astatus_name = models.CharField(max_length= 100)
#
# def __str__(self):
# return self.astatus_name
#
# class Category(models.Model):
# category_name = models.CharField(max_length=100)
#
# def __str__(self):
# return self.category_name
#
# class UserSession(models.Model):
# user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
# session = models.OneToOneField(Session, on_delete=models.CASCADE)
#
# class Profile(models.Model):
# user = models.OneToOneField(User,blank=True,null=True, on_delete= models.CASCADE)
# name = models.CharField(max_length=200,null=True,blank=True)
# email = models.CharField(max_length=200,null=True,blank=True)
# telephone = models.CharField(max_length=20)
# grade = models.ForeignKey(Grade,null=True,on_delete= models.CASCADE)
# region = models.ForeignKey(Region,blank=True,null=True, on_delete= models.CASCADE)
# district = models.ForeignKey(District,blank=True,null=True, on_delete= models.CASCADE)
# is_staff = models.BooleanField(default=False)
# is_agent = models.BooleanField(default=False)
# is_admin = models.BooleanField(default=False)
# is_director = models.BooleanField(default=False)
# is_new = models.BooleanField(default=False)
#
# def __str__(self):
# return self.name
#
#
# class Team(models.Model):
# team_name = models.CharField(max_length= 100)
# def __str__(self):
# return self.team_name
#
#
# class Technician(models.Model):
# technician = models.ForeignKey(Profile, on_delete=models.CASCADE)
# team = models.ForeignKey(Team, on_delete=models.CASCADE)
#
# def __str__(self):
# return self.technician.name
#
# class Ticket(models.Model):
# ess =(
# ('Escalate','Escalate'),
# )
#
# name = models.ForeignKey(Profile,null=True,blank=True,on_delete= models.CASCADE)
# subject = models.CharField(max_length=200,null=True,blank=True)
# description = models.CharField(max_length=200,null=True,blank=True)
# region = models.CharField(max_length=200,null=True,blank=True)
# district = models.CharField(max_length=200,null=True,blank=True)
# category = models.ForeignKey(Category,null=True,blank=True,on_delete= models.CASCADE)
# status = models.ForeignKey(Status,null=True,blank=True,on_delete= models.CASCADE)
# astatus = models.ForeignKey(agent_Status,null=True,blank=True,on_delete= models.CASCADE)
# prority = models.ForeignKey(Prority,null=True,blank=True,on_delete= models.CASCADE)
# agent = models.ForeignKey(Technician,null=True,blank=True,on_delete= models.CASCADE)
# agent_team = models.ForeignKey(Team,null=True,blank=True,on_delete= models.CASCADE)
# ticket_date = models.DateField()
# expected_date = models.DateField(null=True,blank=True)
# escalated = models.CharField(max_length = 60, choices = ess )
# remarks = models.CharField(max_length=200,null=True,blank=True)
# close_date =models.DateField(null=True,blank=True)
#
# def __str__(self):
# return self.subject
#
# class Meta():
# ordering = ["-id"]
#
#
# # def save(self):
# # if not self.id:
# # number = incrementor()
# # self.id = "AS" +"-" + "TN" +"-"+ str(number())
# # while Ticket.objects.filter(id=self.id).exists():
# # self.id = "AS" +"-" + "TN" +"-"+ str(number())
# # super(Ticket, self).save()
#
#
# # @receiver(post_save, sender=Ticket)
# # def send_mail(sender, instance,created, **kwargs):
# # user_email = instance.name.email
# # ticket_id = instance.id
# # subjects = instance.subject
# # description = instance.description
# # status = instance.status
# # to =user_email
# # # from_email =EMAIL_HOST_USER
# # html_content = "You have succesfully generated a ticket.<br>Details:<br>TIcket ID : %s <br> Describtion : %s <br> Ticket Status : %s <br><br><br>Thank You <br>IT HELPDESK"
# # body= html_content %(ticket_id,description,status)
# # subject =subjects
# # # message=EmailMessage(subject=subject',body,EMAIL_HOST_USER,[to])
# # message=EmailMessage(subject,body,EMAIL_HOST_USER,[to])
# # message.content_subtype='html'
# # message.send()
#
#
# class AssignedTicket(models.Model):
# ticketid= models.CharField(max_length=20, unique=True, primary_key=True,editable=False)
# name = models.ForeignKey(Profile,null=True,blank=True,on_delete= models.CASCADE)
# subject = models.CharField(max_length=200,null=True,blank=True)
# description = models.CharField(max_length=200,null=True,blank=True)
# prority = models.ForeignKey(Prority,null=True,blank=True,on_delete= models.CASCADE)
# ticket_date = models.DateField()
# expected_date = models.DateField(null=True,blank=True)
# status = models.ForeignKey(Status,null=True,blank=True,on_delete= models.CASCADE)
# astatus = models.ForeignKey(agent_Status,null=True,blank=True,on_delete= models.CASCADE)
#
# def __str__(self):
# return self.subject
#
# # @receiver(pre_save, sender=AssignedTicket)
# # def send_mail(sender, instance, **kwargs):
# # user_email = instance.name.email
# # ticket_id = instance.ticketid
# # subjects = instance.subject
# # description = instance.description
# # status = instance.status
# # to =user_email
# #
# # html_content = "Ticket with the details below has been assigned to you.<br>Details:<br>TIcket ID : %s <br> Describtion : %s <br> Ticket Status : %s <br><br><br>Thank You <br>IT HELPDESK"
# # body= html_content %(ticket_id,description,status)
# # subject =subjects
# #
# # message=EmailMessage(subject,body,EMAIL_HOST_USER,[to])
# # message.content_subtype='html'
# # message.send()
#
#
# class EscalatedTicket(models.Model):
# ticketid= models.CharField(max_length=20, unique=True, primary_key=True,editable=False)
# name = models.ForeignKey(Profile,null=True,blank=True,on_delete= models.CASCADE)
# subject = models.CharField(max_length=200,null=True,blank=True)
# description = models.CharField(max_length=200,null=True,blank=True)
# ticket_date = models.DateField(null=True,blank=True)
# expected_date = models.DateField(null=True,blank=True)
# status = models.ForeignKey(Status,null=True,blank=True,on_delete= models.CASCADE)
# astatus = models.ForeignKey(agent_Status,null=True,blank=True,on_delete= models.CASCADE)
#
#
# def __str__(self):
# return self.subject
#
# # @receiver(pre_save, sender=EscalatedTicket)
# # def send_mail(sender, instance, **kwargs):
# # user_email = instance.name.email
# # ticket_id = instance.ticketid
# # subjects = instance.subject
# # description = instance.description
# # status = instance.status
# # to =user_email
# #
# # html_content = "Ticket with the details below has been escalated to you.<br>Details:<br>TIcket ID : %s <br> Describtion : %s <br> Ticket Status : %s <br><br><br>Thank You <br>IT HELPDESK"
# # body= html_content %(ticket_id,description,status)
# # subject =subjects
# #
# # message=EmailMessage(subject,body,EMAIL_HOST_USER,[to])
# # message.content_subtype='html'
# # message.send()
#
# class Ticket_Comments(models.Model):
# ticket_id = models.ForeignKey(Ticket,on_delete= models.CASCADE)
# content = models.CharField(max_length=700)
# agent = models.ForeignKey(Technician,on_delete= models.CASCADE)
# creation_date = models.DateField(validators=[MaxValueValidator(limit_value=date.today)])
# # last_updated = models.DateTimeField()
#
# def __str__(self):
# return self.content
#
# class Escalate(models.Model):
# ticket_id = models.ForeignKey(Ticket,on_delete= models.CASCADE)
# agent = models.ForeignKey(Technician,on_delete= models.CASCADE)
# agent_team = models.ForeignKey(Team,null=True,blank=True,on_delete= models.CASCADE)
# escalated_date = models.DateField()
# reason = models.CharField(max_length=700)
#
# def __str__(self):
# return self.agent.technician.name
#
# class History(models.Model):
# ticket_id = models.ForeignKey(Ticket,on_delete= models.CASCADE)
# agent = models.ForeignKey(Technician,on_delete= models.CASCADE)
# creation_date = models.DateField(validators=[MaxValueValidator(limit_value=date.today)])
# last_updated = models.DateField()
# solved_date = models.DateField()
# staff = models.ForeignKey(Profile,on_delete= models.CASCADE)
| [
"68113347+ITtechnical@users.noreply.github.com"
] | 68113347+ITtechnical@users.noreply.github.com |
d35cb59ff4dce7e4ec40f93acc7ef6f00e528744 | 7c4db177c9bd28c65a0792d77e64d3121a10030e | /Software Fix/workaround/views.py | ea10d0cd16b5535d20dc68fa9e6c7a82e4f2f26b | [] | no_license | jsomiya/Smart-Store-Replenishment-System | 9ca8aa6d1339b4aabb260458becd7996c9acd7d3 | 53626de603c31d51f2b2862f79f0d5fce329a94f | refs/heads/master | 2022-11-10T21:44:35.378564 | 2020-06-18T07:21:13 | 2020-06-18T07:21:13 | 273,176,642 | 1 | 0 | null | 2020-06-18T07:56:33 | 2020-06-18T07:56:32 | null | UTF-8 | Python | false | false | 1,310 | py | from django.shortcuts import render
from pymongo import MongoClient
import random
client = MongoClient(
"mongodb+srv://ushita:ushita26@cluster0-s5zz8.mongodb.net/test?retryWrites=true&w=majority")
db = client.test
collection = db.items
# Home Page with Start Button
def button(request):
return render(request, 'home.html')
# Initial state of shelf
def output(request):
res = collection.find_one({"machine_no": 1})
return render(request, 'home.html', {"data": res, "data1": "initial"})
# Reduce stock below threshold
def lower(request):
res = collection.find_one({"machine_no": 1})
num1 = random.randrange(120, 1200, 120)
result = collection.update_one(
{"machine_no": 1}, {"$set": {"current_weight": num1}})
doc = collection.find_one({"machine_no": 1})
print(doc)
return render(request, 'home.html', {"data2": doc, "data": res, "data1": "low"})
# Increase stock above threshold
def increase(request):
res = collection.find_one({"machine_no": 1})
num2 = random.randrange(1200, 6000, 120)
result = collection.update_one(
{"machine_no": 1}, {"$set": {"current_weight": num2}})
doc = collection.find_one({"machine_no": 1})
print(doc)
return render(request, 'home.html', {"data2": doc, "data": res, "data1": "high"})
| [
"ushitag@gmail.com"
] | ushitag@gmail.com |
11a18ee577d876d1a1a56f94a0d171b9c39c9932 | 82caa7c74a95b7ffdf578d0942e1a2b7fac1edff | /wider resnet38/run/train.py | 7e6f1a9fbe8220e2c799e13db68fc452187fae02 | [] | no_license | yudiz97/11785-project | 50e0c5e7bded7b056db9772ff05a6c394814ac73 | e160bf5abf09cd61fadb7c45337868ecf2389ae7 | refs/heads/master | 2023-01-09T16:31:32.940628 | 2020-11-11T05:09:27 | 2020-11-11T05:09:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,232 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os,sys
sys.path.append("..")
import numpy as np
import tensorflow as tf
import data_utils as dt
from core import resnet38
os.environ['CUDA_VISIBLE_DEVICES'] = ''
config_gpu = tf.ConfigProto()
config_gpu.gpu_options.per_process_gpu_memory_fraction = 1.0
train_data_params = {'data_dir': '../data/CityDatabase',
'dataset': 'train',
'batch_size': 1,
'pred_save_path': '../data/pred_trainIDs',
'colored_save_path': '../data/pred_colored',
'labelIDs_save_path': '../data/pred_labelIDs'}
dataset = dt.CityDataSet(train_data_params)
params = {'batch_size': 128,
'decay_rate': 0.0002,
# 'feed_path': 'data/trained_weights/empty.npy',
'feed_path': 'data/saved_weights/modelA_40.npy',
'save_path': 'data/saved_weights/',
'tsboard_save_path': 'data/tsboard/'}
train_ep = 150
# val_step_iter = 100
save_ep = 10
with tf.Session() as sess:
#with tf.Session(config=config_gpu) as sess:
res38 = resnet38.ResNet38(params['feed_path'])
save_path = params['save_path']
batch_size = params['batch_size']
train_img = tf.placeholder(tf.float32, shape=[batch_size, 32, 32,
3])
train_label = tf.placeholder(tf.int64, shape=[batch_size])
[train_op, total_loss, train_acc, correct_preds] = res38.train(image=train_img, label=train_label, params=params)
save_dict_op = res38._var_dict
TrainLoss_sum = tf.summary.scalar('train_loss', total_loss)
TrainAcc_sum = tf.summary.scalar('train_acc', train_acc)
# ValLoss_sum = tf.summary.scalar('val_loss', total_loss)
# ValAcc_sum = tf.summary.scalar('val_acc', train_acc)
Train_summary = tf.summary.merge_all()
# Val_summary = tf.summary.merge([ValLoss_sum, ValAcc_sum])
writer = tf.summary.FileWriter(params['tsboard_save_path']+'modelA_40e3', sess.graph)
init = tf.global_variables_initializer()
sess.run(init)
num_iters = np.int32(50000 / batch_size) + 1
print('Start training...')
for epoch in range(train_ep):
print('Eopch %d'%epoch)
for iters in range(num_iters):
next_images, next_labels = dataset.next_batch()
train_feed_dict = {train_img: next_images, train_label: next_labels}
[train_op_, total_loss_, train_acc_, Train_summary_] = sess.run([train_op, total_loss, train_acc, Train_summary], train_feed_dict)
writer.add_summary(Train_summary_, iters)
if iters % 50 == 0 and iters !=0:
print('Iter %d loss: %f'%(iters, total_loss_))
if epoch % save_ep == 0 and epoch !=0:
print('Save trained weight after epoch: %d'%epoch)
save_npy = sess.run(save_dict_op)
save_path = params['save_path']
if len(save_npy.keys()) != 0:
save_name = 'modelA_40e3_%d.npy'%(epoch)
save_path = save_path + save_name
np.save(save_path, save_npy)
# Shuffle and flip dataset
dataset.shuffle()
# dataset.flip()
| [
"596835723@qq.com"
] | 596835723@qq.com |
e7be0c7059dccaf889b64e2159db7d2d79347d2a | 39b35326534d6efa8a60344ef59eac3d8cea562f | /recyclepj/login/migrations/0001_initial.py | a60ce3cc7a0f6134b3dd6af63e3c9f7fde4e2aed | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | Hyo-gyeong/Django_review | 8635e8311111cab56066c6b87429c7f57c5e42c3 | 8b59d717c0c8c4404230c8eaa42e6074cacdd712 | refs/heads/master | 2021-01-03T08:32:06.706689 | 2020-08-31T04:55:59 | 2020-08-31T04:55:59 | 240,000,924 | 0 | 0 | null | 2020-08-17T19:21:30 | 2020-02-12T11:53:19 | Python | UTF-8 | Python | false | false | 492 | py | # Generated by Django 3.0.5 on 2020-07-15 17:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Login',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
],
),
]
| [
"cdkrcd8@gmail.com"
] | cdkrcd8@gmail.com |
7dac21dec591b462ebe6dc791a459ab9a3d9c604 | 60b1f668808de2b82c2fcb62b07b45bb165219f2 | /test/test_operation_action_response_error.py | a5bbc78cccc9e8f90f8214c160336f2601ff00f0 | [] | no_license | andersonmiguel/Egoi | 6d37bf7a3a7555e764f7a6e792b3ef1c68fe8e20 | b5f59f9b33ea94e170f4e7e26c6a37a78d2874c2 | refs/heads/master | 2022-06-21T07:18:44.920786 | 2020-05-04T17:29:02 | 2020-05-04T17:29:02 | 261,250,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,437 | py | # coding: utf-8
"""
APIv3 (Beta)
# Introduction Just a quick peek!!! This is our new version of API. Remember, it is not stable yet!!! But we invite you play with it and give us your feedback ;) # Getting Started E-goi can be integrated with many environments and programming languages via our REST API. We've created a developer focused portal to give your organization a clear and quick overview of how to integrate with E-goi. The developer portal focuses on scenarios for integration and flow of events. We recommend familiarizing yourself with all of the content in the developer portal, before start using our rest API. The E-goi APIv3 is served over HTTPS. To ensure data privacy, unencrypted HTTP is not supported. Request data is passed to the API by POSTing JSON objects to the API endpoints with the appropriate parameters. BaseURL = api.egoiapp.com # RESTful Services This API supports 5 HTTP methods: * <b>GET</b>: The HTTP GET method is used to **read** (or retrieve) a representation of a resource. * <b>POST</b>: The POST verb is most-often utilized to **create** new resources. * <b>PATCH</b>: PATCH is used for **modify** capabilities. The PATCH request only needs to contain the changes to the resource, not the complete resource * <b>PUT</b>: PUT is most-often utilized for **update** capabilities, PUT-ing to a known resource URI with the request body containing the newly-updated representation of the original resource. * <b>DELETE</b>: DELETE is pretty easy to understand. It is used to **delete** a resource identified by a URI. # Authentication We use a custom authentication method, you will need a apikey that you can find in your account settings. Below you will see a curl example to get your account information: #!/bin/bash curl -X GET 'https://api.egoiapp.com/my-account' \\ -H 'accept: application/json' \\ -H 'Apikey: <YOUR_APY_KEY>' Here you can see a curl Post example with authentication: #!/bin/bash curl -X POST 'http://api.egoiapp.com/tags' \\ -H 'accept: application/json' \\ -H 'Apikey: <YOUR_APY_KEY>' \\ -H 'Content-Type: application/json' \\ -d '{`name`:`Your custom tag`,`color`:`#FFFFFF`}' # SDK Get started quickly with E-goi with our integration tools. Our SDK is a modern open source library that makes it easy to integrate your application with E-goi services. * <b><a href='https://github.com/E-goi/sdk-java'>Java</a></b> * <b><a href='https://github.com/E-goi/sdk-php'>PHP</a></b> * <b><a href='https://github.com/E-goi/sdk-python'>Python</a></b> <security-definitions/> # noqa: E501
The version of the OpenAPI document: 3.0.0-beta
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import egoi-api
from egoi-api.models.operation_action_response_error import OperationActionResponseError # noqa: E501
from egoi-api.rest import ApiException
class TestOperationActionResponseError(unittest.TestCase):
"""OperationActionResponseError unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testOperationActionResponseError(self):
"""Test OperationActionResponseError"""
# FIXME: construct object with mandatory attributes with example values
# model = egoi-api.models.operation_action_response_error.OperationActionResponseError() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"integrations@e-goi.com"
] | integrations@e-goi.com |
46b6301a28be145eb50c67c791673b511d885716 | 9056b5a416adc2b5c4d4b26f576ddb4fb0b1d22c | /transformacion.py | e073345902dcb4549283b9edbbf708ff6040e84b | [] | no_license | GarbiSebastian/TeLeng-TP | 9128dd82afe9977bc42ed75942134070553a5c7d | a90adda58979c2d33e7c8cc02329da49528869fb | refs/heads/master | 2021-01-01T18:29:51.311295 | 2014-11-27T04:48:26 | 2014-11-27T04:48:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,801 | py | import sys
import numpy as np
from math import *
from copy import deepcopy
class Transformacion(object):
def __init__(self):
self.space = np.identity(4)
self.color = np.array([1,1,1])
self.depth = 100
def transformar(self,trans):
self.space = np.dot(self.space,trans.space)
self.color = self.color * trans.color
self.depth = min(self.depth, trans.depth)
def debug(self):
print 'space'
print self.space
print 'color'
print self.color
print 'depth'
print self.depth
#------------------------------------------------------------------------
class TransRX(Transformacion):
def __init__(self,num):
super(TransRX,self).__init__()
x = radians(num)
self.space = np.array([[1, 0, 0, 0],[0, cos(x), sin(x), 0],[0, -sin(x), cos(x), 0],[0, 0, 0, 1]])
class TransRY(Transformacion):
def __init__(self,num):
super(TransRY,self).__init__()
y = radians(num)
self.space = np.array([[cos(y), 0, -sin(y), 0],[0, 1, 0, 0],[sin(y), 0, cos(y), 0],[0, 0, 0, 1]])
class TransRZ(Transformacion):
def __init__(self,num):
super(TransRZ,self).__init__()
z = radians(num)
self.space = np.array([[cos(z), sin(z), 0, 0],[-sin(z), cos(z), 0, 0],[0, 0, 1, 0],[0, 0, 0, 1]])
class TransT(Transformacion):
def __init__(self,tx=0,ty=0,tz=0):
super(TransT,self).__init__()
self.space = np.array([[1, 0, 0, 0],[0, 1, 0, 0],[0, 0, 1, 0],[tx, ty, tz, 1]])
class TransS(Transformacion):
def __init__(self,sx=1,sy=1,sz=1):
super(TransS,self).__init__()
self.space = np.array([[sx, 0, 0, 0],[0, sy, 0, 0],[0, 0, sz, 0],[0, 0, 0, 1]])
class TransC(Transformacion):
def __init__(self,cr=1,cg=1,cb=1):
super(TransC,self).__init__()
self.color = np.array([cr, cg, cb])
class TransD(Transformacion):
def __init__(self,d=100):
super(TransD,self).__init__()
self.depth = d | [
"garbyseba@gmail.com"
] | garbyseba@gmail.com |
d52c3f74e131fb8123e565a6fa2173817f2da8cd | 50c2bb79de4d4ecc7a0b6b835d611627fccd82cc | /datatest/__past__/api08.py | f62a436ef48063a6b382853b9356c3ea0c6018d6 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | chansonZ/datatest | 84d493623e218988acf1e533914b71db110f839f | 2e200e2bb7d9a8016bcb6e908d4a8b12db8e2568 | refs/heads/master | 2021-07-07T07:58:40.766960 | 2017-09-29T15:07:20 | 2017-09-29T15:07:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,347 | py | """Backward compatibility for version 0.8 API."""
from __future__ import absolute_import
import datatest
from datatest.utils import collections
from datatest.utils import itertools
from datatest.errors import NOTFOUND
def _columns(self, type=list): # Removed in datatest 0.8.2
return type(self.fieldnames)
datatest.DataSource.columns = _columns
def _require_sequence(data, sequence): # New behavior in datatest 0.8.3
"""Compare *data* against a *sequence* of values. Stops at the
first difference found and returns an AssertionError. If no
differences are found, returns None.
"""
if isinstance(data, str):
raise ValueError("uncomparable types: 'str' and sequence type")
data_type = getattr(data, 'evaluation_type', data.__class__)
if not issubclass(data_type, collections.Sequence):
type_name = data_type.__name__
msg = "expected sequence type, but got " + repr(type_name)
raise ValueError(msg)
message_prefix = None
previous_element = NOTFOUND
zipped = itertools.zip_longest(data, sequence, fillvalue=NOTFOUND)
for index, (actual, expected) in enumerate(zipped):
if actual == expected:
previous_element = actual
continue
if actual == NOTFOUND:
message_prefix = ('Data sequence is missing '
'elements starting with index {0}').format(index)
message_suffix = 'Expected {0!r}'.format(expected)
elif expected == NOTFOUND:
message_prefix = ('Data sequence contains extra '
'elements starting with index {0}').format(index)
message_suffix = 'Found {0!r}'.format(actual)
else:
message_prefix = \
'Data sequence differs starting at index {0}'.format(index)
message_suffix = \
'Found {0!r}, expected {1!r}'.format(actual, expected)
break
else: # <- NOBREAK!
return None # <- EXIT!
leading_elements = []
if index > 1:
leading_elements.append('...')
if previous_element != NOTFOUND:
leading_elements.append(repr(previous_element))
actual_repr = repr(actual) if actual != NOTFOUND else '?????'
caret_underline = '^' * len(actual_repr)
trailing_elements = []
next_tuple = next(zipped, NOTFOUND)
if next_tuple != NOTFOUND:
trailing_elements.append(repr(next_tuple[0]))
if next(zipped, NOTFOUND) != NOTFOUND:
trailing_elements.append('...')
if leading_elements:
leading_string = ', '.join(leading_elements) + ', '
else:
leading_string = ''
leading_whitespace = ' ' * len(leading_string)
if trailing_elements:
trailing_string = ', ' + ', '.join(trailing_elements)
else:
trailing_string = ''
sequence_string = leading_string + actual_repr + trailing_string
message = '{0}:\n\n {1}\n {2}{3}\n{4}'.format(message_prefix,
sequence_string,
leading_whitespace,
caret_underline,
message_suffix)
return AssertionError(message)
datatest.require._require_sequence = _require_sequence
| [
"shawnbrown@users.noreply.github.com"
] | shawnbrown@users.noreply.github.com |
b396995015c218b5e511b8ef7c840db5cff1965b | 9c0b9766ade4272096f8d067b3ceac57bfad10d9 | /NeuralOCR/NeuralOCR/wsgi.py | b270114492822006b43175c09e19559fa4001812 | [
"MIT"
] | permissive | rbalda/neural_ocr | 68828996e1b0e34768f8de9b5604ccf253b1f079 | 140585a7e99f1d49e52b142811273b02c08c0675 | refs/heads/master | 2021-01-10T04:59:54.072242 | 2016-01-25T14:16:02 | 2016-01-25T14:16:02 | 49,538,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for NeuralOCR project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "NeuralOCR.settings")
application = get_wsgi_application()
| [
"rbalda@espol.edu.ec"
] | rbalda@espol.edu.ec |
c41b07f87b3c1d8b47a5ea6374d5e8c5edc50ec4 | 6ab5d6d9f69346ad1af17b7c96e8c20834ec5ac6 | /functionaltests/api/v2/test_blacklist.py | c9729c8f00bfaea157458264cca0f05e959e68db | [
"Apache-2.0"
] | permissive | ISCAS-VDI/designate-base | eec2f6bf0d71fcf8c1f975be0e94357e41e1f5ca | bd945607e3345fbef8645c3441e96b032b70b098 | refs/heads/master | 2020-04-03T00:07:03.726411 | 2016-06-07T06:53:04 | 2016-06-07T06:53:04 | 60,588,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,338 | py | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest_lib import exceptions
from functionaltests.common import datagen
from functionaltests.api.v2.base import DesignateV2Test
from functionaltests.api.v2.clients.blacklist_client import BlacklistClient
class BlacklistTest(DesignateV2Test):
def test_get_blacklist_404(self):
client = BlacklistClient.as_user('admin')
self._assert_exception(
exceptions.NotFound,
'blacklist_not_found',
404, client.get_blacklist,
str(uuid.uuid4()))
def test_update_blacklist_404(self):
model = datagen.random_blacklist_data()
client = BlacklistClient.as_user('admin')
self._assert_exception(
exceptions.NotFound,
'blacklist_not_found',
404,
client.patch_blacklist,
str(uuid.uuid4()), model)
def test_delete_blacklist_404(self):
client = BlacklistClient.as_user('admin')
self._assert_exception(
exceptions.NotFound,
'blacklist_not_found',
404,
client.delete_blacklist,
str(uuid.uuid4()))
def test_get_blacklist_invalid_uuid(self):
client = BlacklistClient.as_user('admin')
self._assert_invalid_uuid(client.get_blacklist, 'fooo')
def test_update_blacklist_invalid_uuid(self):
model = datagen.random_blacklist_data()
client = BlacklistClient.as_user('admin')
self._assert_invalid_uuid(client.patch_blacklist, 'fooo', model)
def test_delete_blacklist_invalid_uuid(self):
client = BlacklistClient.as_user('admin')
self._assert_invalid_uuid(client.get_blacklist, 'fooo')
| [
"wangfeng@nfs.iscas.ac.cn"
] | wangfeng@nfs.iscas.ac.cn |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.