hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
31fbb57a08cc16c164f1e1499b813f433099b29f | 1,041 | py | Python | core/criterion.py | PietrOz/arboreal | 3fda601a9af21a11bef3284a11d0bffe804cb3cc | [
"Apache-2.0"
] | null | null | null | core/criterion.py | PietrOz/arboreal | 3fda601a9af21a11bef3284a11d0bffe804cb3cc | [
"Apache-2.0"
] | null | null | null | core/criterion.py | PietrOz/arboreal | 3fda601a9af21a11bef3284a11d0bffe804cb3cc | [
"Apache-2.0"
] | 3 | 2019-10-12T04:13:08.000Z | 2022-02-04T07:06:01.000Z | from core.datatype import Datatype
| 26.025 | 67 | 0.677233 | from core.datatype import Datatype
def gini_impurity(labels):
# apple apple orange banana banana
count = len(labels)
unique = set(labels)
contributions = []
for label in unique:
p = labels.count(label) / count
impurity = p * (1.0 - p)
contributions.append(impurity)
return sum(contributions)
def mean_squared_error(targets):
# 0.1, 0.3, 0.9
mean = sum(targets) / len(targets)
mse = sum([(target - mean) ** 2 for target in targets])
return mse
def get_criterion(target_counter, target_type):
# unoptimized pass-through to gini and mse; #TODO optimize
unraveled_targets = []
for target in target_counter:
unraveled_targets.extend([target] * target_counter[target])
if target_type == Datatype.categorical:
criterion = gini_impurity(unraveled_targets)
elif target_type == Datatype.numerical:
criterion = mean_squared_error(unraveled_targets)
else:
raise ValueError("Unrecognized target_type")
return criterion
| 934 | 0 | 69 |
7ff7122e196b282d35fe222c084c4aab06123611 | 4,074 | py | Python | spacy_lookups_data/tests/conftest.py | kadarakos/spacy-lookups-data | 2f98c24775218da5ac272957f376d5b580d85571 | [
"MIT"
] | 67 | 2019-09-30T21:31:18.000Z | 2022-03-26T11:05:02.000Z | spacy_lookups_data/tests/conftest.py | kadarakos/spacy-lookups-data | 2f98c24775218da5ac272957f376d5b580d85571 | [
"MIT"
] | 13 | 2019-10-23T14:11:49.000Z | 2021-09-27T14:29:38.000Z | spacy_lookups_data/tests/conftest.py | kadarakos/spacy-lookups-data | 2f98c24775218da5ac272957f376d5b580d85571 | [
"MIT"
] | 47 | 2019-10-22T11:20:09.000Z | 2022-01-08T20:34:45.000Z | import spacy
import pytest
INIT_LOOKUPS_CONFIG = {
"@misc": "spacy.LookupsDataLoader.v1",
"lang": "${nlp.lang}",
"tables": ["lexeme_norm"],
}
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
| 21.108808 | 61 | 0.651448 | import spacy
import pytest
INIT_LOOKUPS_CONFIG = {
"@misc": "spacy.LookupsDataLoader.v1",
"lang": "${nlp.lang}",
"tables": ["lexeme_norm"],
}
@pytest.fixture(scope="session")
def ca_lookup_nlp():
nlp = spacy.blank("ca")
nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def cs_nlp():
nlp = spacy.blank("cs")
nlp.config["initialize"]["lookups"] = INIT_LOOKUPS_CONFIG
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def da_nlp():
nlp = spacy.blank("da")
nlp.config["initialize"]["lookups"] = INIT_LOOKUPS_CONFIG
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def de_nlp():
nlp = spacy.blank("de")
nlp.config["initialize"]["lookups"] = INIT_LOOKUPS_CONFIG
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def en_nlp():
nlp = spacy.blank("en")
nlp.config["initialize"]["lookups"] = INIT_LOOKUPS_CONFIG
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def en_lookup_nlp():
nlp = spacy.blank("en")
nlp.config["initialize"]["lookups"] = INIT_LOOKUPS_CONFIG
nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def fr_lookup_nlp():
nlp = spacy.blank("fr")
nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def grc_nlp():
nlp = spacy.blank("grc")
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def hr_nlp():
nlp = spacy.blank("hr")
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def it_lookup_nlp():
nlp = spacy.blank("it")
nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def ga_pos_lookup_nlp():
nlp = spacy.blank("ga")
nlp.add_pipe("lemmatizer", config={"mode": "pos_lookup"})
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def lb_nlp():
nlp = spacy.blank("lb")
nlp.config["initialize"]["lookups"] = INIT_LOOKUPS_CONFIG
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def lt_nlp():
nlp = spacy.blank("lt")
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def mk_lookup_nlp():
nlp = spacy.blank("mk")
nlp.config["initialize"]["lookups"] = INIT_LOOKUPS_CONFIG
nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def nl_nlp():
nlp = spacy.blank("nl")
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def nl_lookup_nlp():
nlp = spacy.blank("nl")
nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def ro_nlp():
nlp = spacy.blank("ro")
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def ru_lookup_nlp():
nlp = spacy.blank("ru")
nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def sr_nlp():
nlp = spacy.blank("sr")
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def sv_nlp():
nlp = spacy.blank("sv")
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def sv_lookup_nlp():
nlp = spacy.blank("sv")
nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def tr_nlp():
nlp = spacy.blank("tr")
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
| 2,664 | 0 | 484 |
1eae46ca13f3b40aa55892ce9376c6666af5b131 | 1,829 | py | Python | django1703zz/day5/stumanage/models.py | z991/Stuent_Manage_Systerm | 23ec1595968a87a311ac3b718e2e9d7ea6a65b58 | [
"MIT"
] | 2 | 2019-01-21T03:43:39.000Z | 2019-08-18T04:18:37.000Z | django1703zz/day5/stumanage/models.py | z991/Stuent_Manage_Systerm | 23ec1595968a87a311ac3b718e2e9d7ea6a65b58 | [
"MIT"
] | 11 | 2020-02-12T03:27:54.000Z | 2022-03-12T00:10:50.000Z | django1703zz/day5/stumanage/models.py | z991/Stuent_Manage_Systerm | 23ec1595968a87a311ac3b718e2e9d7ea6a65b58 | [
"MIT"
] | null | null | null | #coding:utf8
from django.db import models
from django.contrib.auth.models import User
from pytz import timezone
from django.conf import settings
TIME_ZONE = settings.TIME_ZONE if settings.TIME_ZONE else 'Asia/Shanghai'
# Create your models here.
| 33.87037 | 99 | 0.72936 | #coding:utf8
from django.db import models
from django.contrib.auth.models import User
from pytz import timezone
from django.conf import settings
TIME_ZONE = settings.TIME_ZONE if settings.TIME_ZONE else 'Asia/Shanghai'
# Create your models here.
class Student(models.Model):
name=models.CharField(verbose_name='学生姓名',max_length=20)
age=models.IntegerField(verbose_name='学生年龄')
score=models.DecimalField(verbose_name='分数',max_digits=5,decimal_places=2,null=True,blank=True)
email=models.EmailField(verbose_name='学生邮箱',null=True,blank=True)
add_date=models.DateTimeField(verbose_name='添加时间',auto_now_add=True)
cls=models.ForeignKey('Class')
avatar=models.ImageField(verbose_name='头像',upload_to='avatar/',default='avatar/default.jpg')
#内部类 对表
def __unicode__(self):
return self.name
class Meta:
verbose_name=verbose_name_plural='学生'
class Class(models.Model):
name=models.CharField(verbose_name='班级名称',max_length=10)
def __unicode__(self):
return self.name
class Meta:
verbose_name=verbose_name_plural='班级'
class UserProfile(models.Model):
phone = models.CharField(verbose_name='手机', max_length=20)
nick = models.CharField(max_length=30)
user = models.OneToOneField(User)
class Meta:
verbose_name = verbose_name_plural = '用户信息'
def __unicode__(self):
return self.nick
class Movie(models.Model):
name = models.CharField(verbose_name="电影名称", max_length=64)
actor = models.CharField(verbose_name="演员", max_length=128)
up_time = models.DateField(verbose_name="上映时间")
score = models.CharField(verbose_name="评分", max_length=8)
img = models.TextField(verbose_name="图片")
class Meta:
verbose_name = verbose_name_plural = '猫眼电影'
def __unicode__(self):
return self.name | 104 | 1,501 | 90 |
6fea67b75bc7c251966a525b93cf5e63ba13f7c3 | 2,371 | py | Python | tests/make.py | simomarsili/mmdemux | 80edee6b98ff85208edac2e45d0edf0ee922b1ed | [
"MIT"
] | null | null | null | tests/make.py | simomarsili/mmdemux | 80edee6b98ff85208edac2e45d0edf0ee922b1ed | [
"MIT"
] | null | null | null | tests/make.py | simomarsili/mmdemux | 80edee6b98ff85208edac2e45d0edf0ee922b1ed | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Prepare tests files."""
# pylint: disable=no-member
import mdtraj
import simtk.openmm as mm
from openmmtools import mcmc, states, testsystems
from openmmtools.multistate import MultiStateReporter, ParallelTemperingSampler
from simtk import unit
def energy_function(test, topology=None, system=None, positions=None):
"""Potential energy for TestSystem object."""
topology = topology or test.topology
system = system or test.system
positions = positions or test.positions
platform = mm.Platform.getPlatformByName('CPU')
properties = {}
integrator = mm.VerletIntegrator(0.002 * unit.picoseconds)
simulation = mm.app.Simulation(top, system, integrator, platform,
properties)
simulation.context.setPositions(positions)
state = simulation.context.getState(getEnergy=True)
ene = state.getPotentialEnergy().value_in_unit(unit.kilojoule_per_mole)
return ene
stem = 'text_repex'
testsystem = testsystems.AlanineDipeptideImplicit()
# testsystem = testsystems.SrcImplicit()
# testsystem = testsystems.HostGuestVacuum()
# save topology as .pdb
top = mdtraj.Topology.from_openmm(testsystem.topology)
trj = mdtraj.Trajectory([testsystem.positions / unit.nanometers], top)
trj.save(stem + '.pdb')
# save system as .xml
serialized_system = mm.openmm.XmlSerializer.serialize(testsystem.system)
with open(stem + '.xml', 'w') as fp:
print(serialized_system, file=fp)
n_replicas = 3 # Number of temperature replicas.
T_min = 298.0 * unit.kelvin # Minimum temperature.
T_max = 600.0 * unit.kelvin # Maximum temperature.
reference_state = states.ThermodynamicState(system=testsystem.system,
temperature=T_min)
move = mcmc.GHMCMove(timestep=2.0 * unit.femtoseconds, n_steps=50)
sampler = ParallelTemperingSampler(mcmc_moves=move,
number_of_iterations=float('inf'),
online_analysis_interval=None)
storage_path = stem + '.nc'
reporter = MultiStateReporter(storage_path, checkpoint_interval=1)
sampler.create(reference_state,
states.SamplerState(testsystem.positions),
reporter,
min_temperature=T_min,
max_temperature=T_max,
n_temperatures=n_replicas)
sampler.run(n_iterations=10)
| 37.046875 | 79 | 0.701814 | # -*- coding: utf-8 -*-
"""Prepare tests files."""
# pylint: disable=no-member
import mdtraj
import simtk.openmm as mm
from openmmtools import mcmc, states, testsystems
from openmmtools.multistate import MultiStateReporter, ParallelTemperingSampler
from simtk import unit
def energy_function(test, topology=None, system=None, positions=None):
"""Potential energy for TestSystem object."""
topology = topology or test.topology
system = system or test.system
positions = positions or test.positions
platform = mm.Platform.getPlatformByName('CPU')
properties = {}
integrator = mm.VerletIntegrator(0.002 * unit.picoseconds)
simulation = mm.app.Simulation(top, system, integrator, platform,
properties)
simulation.context.setPositions(positions)
state = simulation.context.getState(getEnergy=True)
ene = state.getPotentialEnergy().value_in_unit(unit.kilojoule_per_mole)
return ene
stem = 'text_repex'
testsystem = testsystems.AlanineDipeptideImplicit()
# testsystem = testsystems.SrcImplicit()
# testsystem = testsystems.HostGuestVacuum()
# save topology as .pdb
top = mdtraj.Topology.from_openmm(testsystem.topology)
trj = mdtraj.Trajectory([testsystem.positions / unit.nanometers], top)
trj.save(stem + '.pdb')
# save system as .xml
serialized_system = mm.openmm.XmlSerializer.serialize(testsystem.system)
with open(stem + '.xml', 'w') as fp:
print(serialized_system, file=fp)
n_replicas = 3 # Number of temperature replicas.
T_min = 298.0 * unit.kelvin # Minimum temperature.
T_max = 600.0 * unit.kelvin # Maximum temperature.
reference_state = states.ThermodynamicState(system=testsystem.system,
temperature=T_min)
move = mcmc.GHMCMove(timestep=2.0 * unit.femtoseconds, n_steps=50)
sampler = ParallelTemperingSampler(mcmc_moves=move,
number_of_iterations=float('inf'),
online_analysis_interval=None)
storage_path = stem + '.nc'
reporter = MultiStateReporter(storage_path, checkpoint_interval=1)
sampler.create(reference_state,
states.SamplerState(testsystem.positions),
reporter,
min_temperature=T_min,
max_temperature=T_max,
n_temperatures=n_replicas)
sampler.run(n_iterations=10)
| 0 | 0 | 0 |
3aa30d764262b42736d1bb2220cfe44d5c41484f | 652 | py | Python | 2019-12-19_introductionInSeminar/code/manualAi.py | TrackerSB/MasterThesis | 2792203d28d6c7b62f54545344ee6772d2ec5b64 | [
"MIT"
] | null | null | null | 2019-12-19_introductionInSeminar/code/manualAi.py | TrackerSB/MasterThesis | 2792203d28d6c7b62f54545344ee6772d2ec5b64 | [
"MIT"
] | null | null | null | 2019-12-19_introductionInSeminar/code/manualAi.py | TrackerSB/MasterThesis | 2792203d28d6c7b62f54545344ee6772d2ec5b64 | [
"MIT"
] | null | null | null | if __name__ == "__main__":
from pathlib import Path
from drivebuildclient.AIExchangeService import AIExchangeService
from drivebuildclient.aiExchangeMessages_pb2 import VehicleID
service = AIExchangeService("localhost", 8383)
# Send tests
submission_result = service.run_tests("test", "test", Path("criteriaA.dbc.xml"), Path("environmentA.dbe.xml"))
# Interact with a simulation
if submission_result and submission_result.submissions:
for test_name, sid in submission_result.submissions.items():
vid = VehicleID()
vid.vid = "<vehicleID>"
MyFancyAI(service).start(sid, vid)
| 38.352941 | 114 | 0.707055 | if __name__ == "__main__":
from pathlib import Path
from drivebuildclient.AIExchangeService import AIExchangeService
from drivebuildclient.aiExchangeMessages_pb2 import VehicleID
service = AIExchangeService("localhost", 8383)
# Send tests
submission_result = service.run_tests("test", "test", Path("criteriaA.dbc.xml"), Path("environmentA.dbe.xml"))
# Interact with a simulation
if submission_result and submission_result.submissions:
for test_name, sid in submission_result.submissions.items():
vid = VehicleID()
vid.vid = "<vehicleID>"
MyFancyAI(service).start(sid, vid)
| 0 | 0 | 0 |
bd8dbda9931a4233177ea7c32144a912644de904 | 1,100 | py | Python | configurer/parsers/__init__.py | jamielennox/configurer | 83edafc711aa15da534e90159aaca783ddbfbe56 | [
"Apache-2.0"
] | null | null | null | configurer/parsers/__init__.py | jamielennox/configurer | 83edafc711aa15da534e90159aaca783ddbfbe56 | [
"Apache-2.0"
] | 3 | 2019-05-09T10:58:59.000Z | 2019-05-09T10:59:56.000Z | configurer/parsers/__init__.py | jamielennox/configurer | 83edafc711aa15da534e90159aaca783ddbfbe56 | [
"Apache-2.0"
] | null | null | null |
__all__ = [
'Boolean',
'List',
'Or',
]
| 20.37037 | 68 | 0.528182 |
__all__ = [
'Boolean',
'List',
'Or',
]
def Boolean(value):
if isinstance(value, bool):
return value
if isinstance(value, int):
return value != 0
try:
s = str(value).strip().lower()
except Exception:
pass
else:
if s in ('true', '1', 'on', 'yes'):
return True
if s in ('false', '0', 'off', 'no'):
return False
raise ValueError('Invalid Boolean Value: %s' % s)
def Or(*parsers):
def _or_parser(value):
for parser in parsers:
try:
return parser(value)
except ValueError:
pass
raise ValueError("Can't parse or parser option: %s" % value)
return _or_parser
def List(parser, delimiter=','):
def _list_parser(value):
if isinstance(value, (list, tuple)):
pass
elif isinstance(value, str):
value = value.split(delimiter)
else:
raise ValueError('Invalid List Value: %s' % value)
return [parser(v) for v in value]
return _list_parser
| 976 | 0 | 69 |
6b91c7ee53c19631334375a5d5d99360933063d1 | 756 | py | Python | core/telegram/decorators.py | mpyrev/lunchegram | 14a29da74e14a959be607e4fc76bbc2f181c5278 | [
"MIT"
] | 1 | 2019-11-26T13:49:02.000Z | 2019-11-26T13:49:02.000Z | core/telegram/decorators.py | mpyrev/lunchegram | 14a29da74e14a959be607e4fc76bbc2f181c5278 | [
"MIT"
] | 7 | 2019-12-24T13:17:23.000Z | 2022-02-10T12:35:14.000Z | core/telegram/decorators.py | mpyrev/lunchegram | 14a29da74e14a959be607e4fc76bbc2f181c5278 | [
"MIT"
] | null | null | null | from functools import wraps
from django.db import transaction
from accounts.models import User
from core.models import TelegramChat
def infuse_user():
"""
Adds user instance to args if possible.
Also creates
"""
return decorator
| 23.625 | 103 | 0.60582 | from functools import wraps
from django.db import transaction
from accounts.models import User
from core.models import TelegramChat
def infuse_user():
"""
Adds user instance to args if possible.
Also creates
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
message = args[0]
uid = message.from_user.id
user = User.objects.get_from_telegram_uid(uid)
if hasattr(message, 'chat'):
with transaction.atomic():
TelegramChat.objects.get_or_create(uid=uid, defaults=dict(chat_id=message.chat.id))
args = (user,) + args
return func(*args, **kwargs)
return wrapper
return decorator
| 476 | 0 | 26 |
525ce6e0660223c0a5ffd309b3e48b4d0898096d | 874 | py | Python | truck_telemetry/truck_telemetry.py | Dreagonmon/truck_telemetry | e353585940ff40949e836bb8d27671aab014dbcd | [
"MIT"
] | null | null | null | truck_telemetry/truck_telemetry.py | Dreagonmon/truck_telemetry | e353585940ff40949e836bb8d27671aab014dbcd | [
"MIT"
] | null | null | null | truck_telemetry/truck_telemetry.py | Dreagonmon/truck_telemetry | e353585940ff40949e836bb8d27671aab014dbcd | [
"MIT"
] | null | null | null | from multiprocessing.shared_memory import SharedMemory
from .telemetry_version import v1_10
_mem = None
_telemetry_sdk_version = None
| 27.3125 | 65 | 0.704805 | from multiprocessing.shared_memory import SharedMemory
from .telemetry_version import v1_10
_mem = None
_telemetry_sdk_version = None
def init():
global _mem, _telemetry_sdk_version
_mem = SharedMemory(name="Local\\SCSTelemetry", create=False)
for version in (v1_10,):
if (version.is_same_version(_mem.buf)):
_telemetry_sdk_version = version
break
if _telemetry_sdk_version == None:
raise Exception("Not support this telemetry sdk version")
def get_data():
return _telemetry_sdk_version.parse_data(_mem.buf)
def get_version_number():
if _telemetry_sdk_version != None:
return _telemetry_sdk_version.get_version_number()
else:
return 0
def deinit():
global _mem, _telemetry_sdk_version
if _mem != None:
_mem.close()
_mem = None
_telemetry_sdk_version = None
| 647 | 0 | 92 |
3b7a585d722da59a13235a5532d627f887808d57 | 18,940 | py | Python | baselines/jft/batchensemble.py | pyun-ram/uncertainty-baselines | 5de6a93973c0989dc7eff48d126af57cd7918e20 | [
"Apache-2.0"
] | null | null | null | baselines/jft/batchensemble.py | pyun-ram/uncertainty-baselines | 5de6a93973c0989dc7eff48d126af57cd7918e20 | [
"Apache-2.0"
] | null | null | null | baselines/jft/batchensemble.py | pyun-ram/uncertainty-baselines | 5de6a93973c0989dc7eff48d126af57cd7918e20 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BatchEnsemble Vision Transformer."""
import copy
import functools
import itertools
import multiprocessing
import os
import time
from absl import app
from absl import flags
from absl import logging
from clu import metric_writers
from clu import preprocess_spec
import flax
import flax.jax_utils
import flax.struct
import jax
import jax.config
import jax.nn
import jax.numpy as jnp
import ml_collections
import numpy as np
import robustness_metrics as rm
import tensorflow as tf
from tensorflow.io import gfile
import uncertainty_baselines as ub
import batchensemble_utils # local file import
import checkpoint_utils # local file import
import input_utils # local file import
import preprocess_utils # local file import
import train_utils # local file import
# TODO(dusenberrymw): Open-source remaining imports.
ensemble = None
train = None
xprof = None
core = None
xm = None
xm_api = None
BIG_VISION_DIR = None
ml_collections.config_flags.DEFINE_config_file(
'config', None, 'Training configuration.', lock_config=True)
flags.DEFINE_string('output_dir', default=None, help='Work unit directory.')
flags.DEFINE_integer(
'num_cores', default=None, help='Unused. How many devices being used.')
flags.DEFINE_boolean(
'use_gpu', default=None, help='Unused. Whether or not running on GPU.')
flags.DEFINE_string('tpu', None,
'Unused. Name of the TPU. Only used if use_gpu is False.')
# Adds jax flags to the program.
jax.config.parse_flags_with_absl()
if __name__ == '__main__':
app.run(main)
| 41.810155 | 80 | 0.673231 | # coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BatchEnsemble Vision Transformer."""
import copy
import functools
import itertools
import multiprocessing
import os
import time
from absl import app
from absl import flags
from absl import logging
from clu import metric_writers
from clu import preprocess_spec
import flax
import flax.jax_utils
import flax.struct
import jax
import jax.config
import jax.nn
import jax.numpy as jnp
import ml_collections
import numpy as np
import robustness_metrics as rm
import tensorflow as tf
from tensorflow.io import gfile
import uncertainty_baselines as ub
import batchensemble_utils # local file import
import checkpoint_utils # local file import
import input_utils # local file import
import preprocess_utils # local file import
import train_utils # local file import
# TODO(dusenberrymw): Open-source remaining imports.
ensemble = None
train = None
xprof = None
core = None
xm = None
xm_api = None
BIG_VISION_DIR = None
ml_collections.config_flags.DEFINE_config_file(
'config', None, 'Training configuration.', lock_config=True)
flags.DEFINE_string('output_dir', default=None, help='Work unit directory.')
flags.DEFINE_integer(
'num_cores', default=None, help='Unused. How many devices being used.')
flags.DEFINE_boolean(
'use_gpu', default=None, help='Unused. Whether or not running on GPU.')
flags.DEFINE_string('tpu', None,
'Unused. Name of the TPU. Only used if use_gpu is False.')
# Adds jax flags to the program.
jax.config.parse_flags_with_absl()
def main(_):
config = flags.FLAGS.config
output_dir = flags.FLAGS.output_dir
tf.io.gfile.makedirs(output_dir)
seed = config.get('seed', 0)
rng = jax.random.PRNGKey(seed)
tf.random.set_seed(seed)
save_checkpoint_path = None
if config.get('checkpoint_steps'):
gfile.makedirs(output_dir)
save_checkpoint_path = os.path.join(output_dir, 'checkpoint.npz')
# Create an asynchronous multi-metric writer.
writer = metric_writers.create_default_writer(
output_dir, just_logging=jax.process_index() > 0)
checkpoint_writer = None
# Loss to apply.
loss_to_apply = getattr(core, config.get('loss_to_apply', 'softmax_xent'))
compute_ece = config.get('compute_ece', False)
is_sigmoid = config.get('loss_to_apply', 'softmax_xent') == 'sigmoid_xent'
if compute_ece and is_sigmoid:
error_msg = 'Inconsistent config: ECE can only be used with "softmax_xent".'
raise ValueError(error_msg)
ens_size = config.get('model.transformer.ens_size', 1)
# The pool is used to perform misc operations such as logging in async way.
pool = multiprocessing.pool.ThreadPool()
# Ideally, this should make code deterministic, but for many reasons we are
# not there yet. For instance, tf.data.map is not determisntic.
rng_generator = tf.random.Generator.from_seed(config.get('seed', 0))
tf.random.set_global_generator(
rng_generator.split(jax.process_count())[jax.process_index()])
logging.info('Number of devices: %s (process index: %s)', jax.device_count(),
jax.process_index())
logging.info('Config:\n%s', str(config))
def write_note(note):
if jax.host_id() == 0:
logging.info('NOTE: %s', note)
write_note('Initializing...')
# Verify settings to make sure no checkpoints are accidentally missed.
if config.get('keep_checkpoint_steps'):
assert config.get('checkpoint_steps'), 'Specify `checkpoint_steps`.'
assert config.keep_checkpoint_steps % config.checkpoint_steps == 0, (
f'`keep_checkpoint_steps` ({config.checkpoint_steps}) should be'
f'divisible by `checkpoint_steps ({config.checkpoint_steps}).`')
if (config.batch_size % jax.device_count() != 0 or
config.batch_size_eval % jax.device_count() != 0):
raise ValueError(f'Batch sizes ({config.batch_size} and '
f'{config.batch_size_eval}) must be divisible by '
f'the number of devices ({jax.device_count()})')
batch_size_per_host = config.batch_size // jax.process_count()
batch_size_per_core = config.batch_size // jax.device_count()
batch_size_per_host_eval = config.batch_size_eval // jax.process_count()
rng, train_ds_rng = jax.random.split(rng)
train_ds_rng = jax.random.fold_in(train_ds_rng, jax.process_index())
train_ds = input_utils.get_data(
dataset=config.dataset,
split=config.train_split,
rng=train_ds_rng,
host_batch_size=batch_size_per_host,
preprocess_fn=preprocess_spec.parse(
spec=config.pp_train, available_ops=preprocess_utils.all_ops()),
shuffle_buffer_size=config.shuffle_buffer_size,
prefetch_size=config.get('prefetch_to_host', 2),
data_dir=config.get('dataset_dir'))
train_iter = input_utils.start_input_pipeline(
train_ds, config.get('prefetch_to_device', 1))
ntrain_img = input_utils.get_num_examples(
config.dataset,
split=config.train_split,
host_batch_size=batch_size_per_host,
data_dir=config.get('dataset_dir'))
steps_per_epoch = ntrain_img / config.batch_size
if config.get('num_epochs'):
total_steps = int(config.num_epochs * steps_per_epoch)
assert not config.get('total_steps'), 'Set either num_epochs or total_steps'
else:
total_steps = config.total_steps
logging.info(
'Running for %d steps, that means %f epochs and %f steps per epoch',
total_steps, total_steps * config.batch_size / ntrain_img,
steps_per_epoch)
def _get_val_split(dataset, split, pp_eval, data_dir=None):
# We do ceil rounding such that we include the last incomplete batch.
nval_img = input_utils.get_num_examples(
dataset,
split=split,
host_batch_size=batch_size_per_host_eval,
drop_remainder=False,
data_dir=data_dir)
val_steps = int(np.ceil(nval_img / config.batch_size_eval))
logging.info('Running validation for %d steps for %s, %s', val_steps,
dataset, split)
if isinstance(pp_eval, str):
pp_eval = preprocess_spec.parse(
spec=pp_eval, available_ops=preprocess_utils.all_ops())
val_ds = input_utils.get_data(
dataset=dataset,
split=split,
rng=None,
host_batch_size=batch_size_per_host_eval,
preprocess_fn=pp_eval,
cache=config.get('val_cache', 'batched'),
repeat_after_batching=True,
shuffle=False,
prefetch_size=config.get('prefetch_to_host', 2),
drop_remainder=False,
data_dir=data_dir)
val_iter = input_utils.start_input_pipeline(
val_ds, config.get('prefetch_to_device', 1))
return (val_iter, val_steps)
val_iter_splits = {
'val':
_get_val_split(
config.dataset,
split=config.val_split,
pp_eval=config.pp_eval,
data_dir=config.get('data_dir'))
}
# Note: we return the train loss and val loss for use in reproducibility unit
# tests.
train_loss = -jnp.inf
val_loss = {val_name: -jnp.inf for val_name, _ in val_iter_splits.items()}
# TODO(zmariet): Add fewshot evaluation.
fewshot_results = {'dummy': {(0, 1): -jnp.inf}}
opt_def = train.get_optimizer_from_config(config, f'{BIG_VISION_DIR}.optims')
eval_config = copy.deepcopy(config)
if config.get('eval_overrides'):
with eval_config.unlocked():
eval_config.update(config.eval_overrides)
model = getattr(ub.models, config.model_name)
model_train = model(
num_classes=config.num_classes, train=True, **config.model)
model_eval = model(
num_classes=config.num_classes, train=False, **config.model)
image_size = tuple(train_ds.element_spec['image'].shape[2:])
logging.info('Model initialization: Starting.')
opt_init, rngs = train.model_and_optim_init(
model_train.init, opt_def, (batch_size_per_core,) + image_size,
config.get('init_head_bias'), config.get('seed', 0),
config.get('extra_rngs', []))
logging.info('Model initialization: Done.')
# TODO(jpuigcerver): Support logging parameter count with new sharding.
weight_decay_fn = train.get_weight_decay_function_from_config(config)
batch_loss_fn = ensemble.wrap_ensemble_module_with_auxiliary_loss_fn(
module=model_train,
loss_fn=loss_to_apply,
auxiliary_loss_weight=config.get('auxiliary_loss_weight', 0.0),
ens_size=ens_size)
update_fn = functools.partial(
batchensemble_utils.update_fn_be,
weight_decay_fn=weight_decay_fn,
plot_grad_norm_name_fn=None,
plot_grads_nan_inf=config.get('plot_grads_nan_inf', True),
max_grad_norm_global=config.get('clip_grad_norm', None),
frozen_vars_patterns=config.get('frozen_var_patterns', None),
fast_weight_lr_multiplier=config.get('fast_weight_lr_multiplier', None))
pmap_update_fn = core.pmap_sorted(
update_fn, axis_name='batch', donate_argnums=(0, 1),
static_broadcasted_argnums=(5,))
@functools.partial(jax.pmap, axis_name='batch')
def evaluation_fn(params, images, labels, mask):
# Ignore the entries with all zero labels for evaluation.
mask *= labels.max(axis=1)
tiled_logits, _ = model_eval.apply({'params': flax.core.freeze(params)},
images)
ens_logits = jnp.asarray(jnp.split(tiled_logits, ens_size))
losses = ensemble.ensemble_softmax_xent(
logits=ens_logits, labels=labels)
loss = jax.lax.psum(losses * mask, axis_name='batch')
ncorrect = ensemble.ensemble_softmax_correct_multilabel(
ens_logits, labels, mask, psum_axis_name='batch')
n = jax.lax.psum(mask, axis_name='batch')
logits = jnp.log(jnp.mean(jax.nn.softmax(ens_logits), axis=0))
metric_args = jax.lax.all_gather([logits, labels, mask],
axis_name='batch')
return ncorrect, loss, n, metric_args
checkpoint_data = checkpoint_utils.maybe_load_checkpoint(
train_loop_rngs=rng,
save_checkpoint_path=save_checkpoint_path,
init_optimizer=opt_init,
init_params=opt_init.target,
init_fixed_model_states=None,
default_reinit_params=['head/bias', 'head/kernel'],
config=config)
opt = checkpoint_data.optimizer
opt = opt.replace(target=flax.core.freeze(opt.target))
opt = flax.jax_utils.replicate(opt)
accumulated_train_time = checkpoint_data.accumulated_train_time
train_loop_rngs = jax.tree_map(
train.rng_jax_fold_host_if_needed_and_shard, rngs)
first_step = int(opt.state.step)
start_time = time.time()
# Prepare the learning-rate and pre-fetch it to device to avoid delays.
lr_fn = train_utils.create_learning_rate_schedule(total_steps,
**config.get('lr', {}))
lr_iter = train_utils.prefetch_scalar(
map(lr_fn, range(first_step, total_steps)),
config.get('prefetch_to_device', 1))
if first_step > 0:
write_note('Advancing iterators after resuming from a checkpoint...')
lr_iter = itertools.islice(lr_iter, first_step, None)
train_iter = itertools.islice(train_iter, first_step, None)
# NOTE: Validation eval is only run on certain steps, so determine how many
# times it was run previously.
num_val_runs = sum(
map(
lambda i: train_utils.itstime(i, config.log_eval_steps, total_steps
), range(1, first_step + 1)))
for val_name, (val_iter, val_steps) in val_iter_splits.items():
val_iter = itertools.islice(val_iter, num_val_runs * val_steps, None)
val_iter_splits[val_name] = (val_iter, val_steps)
log_training_first_n_steps = config.get('log_training_first_n_steps', -1)
with metric_writers.ensure_flushes(writer):
callback_fn = lambda x: x # Do nothing.
xprof_session = xprof.MultiStepXprofSession(
profile_steps=20, # For how many steps to profile after warmup.
warmup_steps=170, # For how many steps to wait before profiling.
stop_callback_fn=callback_fn)
for step, train_batch, lr_repl in zip(
range(first_step + 1, total_steps + 1), train_iter, lr_iter):
with xprof_session:
with jax.profiler.StepTraceAnnotation(name='train', step_num=step):
opt, train_loop_rngs, loss_value, _ = pmap_update_fn(
opt,
train_loop_rngs,
lr_repl,
train_batch['image'],
train_batch['labels'],
batch_loss_fn)
# Checkpoint saving.
if train_utils.itstime(
step=step,
every_n_steps=config.get('checkpoint_steps'),
total_steps=total_steps, host=0, first=False):
train_utils.checkpointing_timeout(checkpoint_writer,
config.get('checkpoint_timeout', 1))
train.sync_all_hosts()
time_since_last_start = float(time.time() - start_time)
accumulated_train_time = accumulated_train_time + time_since_last_start
opt_cpu = jax.tree_util.tree_map(lambda x: np.array(x[0]), opt)
copy_step = None
if train_utils.itstime(step, config.get('keep_checkpoint_steps'),
total_steps):
write_note('Keeping a checkpoint copy...')
copy_step = step
checkpoint_data = checkpoint_utils.CheckpointData(
train_loop_rngs=train_loop_rngs,
optimizer=opt_cpu,
accumulated_train_time=accumulated_train_time)
checkpoint_writer = pool.apply_async(
checkpoint_utils.checkpoint_trained_model,
(checkpoint_data, save_checkpoint_path, copy_step))
# Report training progress
if (jax.process_index() == 0 and config.log_training_every_n_steps > 0 and
(step % config.log_training_every_n_steps == 0 or
step == total_steps or step < log_training_first_n_steps)):
train_loss = loss_value[0]
time_elapsed = time.time() - start_time + accumulated_train_time
img_sec_core = (
config.batch_size * step / time_elapsed / jax.device_count())
writer.write_scalars(step, {'learning_rate': lr_repl[0],
'training_loss': np.mean(loss_value),
'img/sec/core': img_sec_core,
'epoch': step / steps_per_epoch})
# Evaluate the model.
if train_utils.itstime(step, config.log_eval_steps, total_steps):
for val_name, (val_iter, val_steps) in val_iter_splits.items():
# Sets up evaluation metrics.
ece_num_bins = config.get('ece_num_bins', 15)
auc_num_bins = config.get('auc_num_bins', 1000)
ece = rm.metrics.ExpectedCalibrationError(num_bins=ece_num_bins)
calib_auc = rm.metrics.CalibrationAUC(correct_pred_as_pos_label=False)
oc_auc_0_5 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.005,
num_bins=auc_num_bins)
oc_auc_1 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.01,
num_bins=auc_num_bins)
oc_auc_2 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.02,
num_bins=auc_num_bins)
oc_auc_5 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.05,
num_bins=auc_num_bins)
# Runs evaluation loop.
ncorrect, loss, nseen = 0, 0, 0
for _, batch in zip(range(val_steps), val_iter):
batch_ncorrect, batch_losses, batch_n, batch_metric_args = (
evaluation_fn(opt.target, batch['image'],
batch['labels'], batch['mask']))
# All results are a replicated array shaped as follows:
# (local_devices, per_device_batch_size, elem_shape...)
# with each local device's entry being identical as they got psum'd.
# So let's just take the first one to the host as numpy.
ncorrect += np.sum(np.array(batch_ncorrect[0]))
loss += np.sum(np.array(batch_losses[0]))
nseen += np.sum(np.array(batch_n[0]))
# Here we parse batch_metric_args to compute uncertainty metrics.
# (e.g., ECE or Calibration AUC).
logits, labels, masks = batch_metric_args
masks = np.array(masks[0], dtype=np.bool)
logits = np.array(logits[0])
probs = jax.nn.softmax(logits)
# From one-hot to integer labels, as required by ECE.
int_labels = np.argmax(np.array(labels[0]), axis=-1)
int_preds = np.argmax(logits, axis=-1)
confidence = np.max(probs, axis=-1)
for p, c, l, d, m in zip(probs, confidence, int_labels,
int_preds, masks):
ece.add_batch(p[m, :], label=l[m])
calib_auc.add_batch(d[m], label=l[m], confidence=c[m])
# TODO(jereliu): Extend to support soft multi-class probabilities.
oc_auc_0_5.add_batch(d[m], label=l[m], custom_binning_score=c[m])
oc_auc_1.add_batch(d[m], label=l[m], custom_binning_score=c[m])
oc_auc_2.add_batch(d[m], label=l[m], custom_binning_score=c[m])
oc_auc_5.add_batch(d[m], label=l[m], custom_binning_score=c[m])
val_loss[val_name] = loss / nseen # Keep for reproducibility tests.
val_measurements = {
f'{val_name}_prec@1': ncorrect / nseen,
f'{val_name}_loss': val_loss[val_name],
f'{val_name}_ece': ece.result()['ece'],
f'{val_name}_calib_auc': calib_auc.result()['calibration_auc'],
f'{val_name}_oc_auc_0.5%': oc_auc_0_5.result()[
'collaborative_auc'],
f'{val_name}_oc_auc_1%': oc_auc_1.result()['collaborative_auc'],
f'{val_name}_oc_auc_2%': oc_auc_2.result()['collaborative_auc'],
f'{val_name}_oc_auc_5%': oc_auc_5.result()['collaborative_auc'],
}
writer.write_scalars(step, val_measurements)
pool.close()
pool.join()
# Return final training loss, validation loss, and fewshot results for
# reproducibility test cases.
return train_loss, val_loss, fewshot_results
if __name__ == '__main__':
app.run(main)
| 16,768 | 0 | 23 |
5cb926ee99ffde1b4ff7b06c86957f872b8bcb09 | 926 | py | Python | libs/const.py | Gianuzzi/turbulent-cloud | 0e226b6d7f0328677e153df64bbb7c12a606a8c2 | [
"MIT"
] | 4 | 2019-09-13T15:14:50.000Z | 2020-10-16T20:31:10.000Z | libs/const.py | Gianuzzi/turbulent-cloud | 0e226b6d7f0328677e153df64bbb7c12a606a8c2 | [
"MIT"
] | 1 | 2021-06-05T10:06:35.000Z | 2021-06-06T07:00:29.000Z | libs/const.py | Gianuzzi/turbulent-cloud | 0e226b6d7f0328677e153df64bbb7c12a606a8c2 | [
"MIT"
] | 9 | 2020-05-04T19:35:46.000Z | 2021-06-23T12:36:41.000Z | from __future__ import print_function
from numpy import sqrt
# all constants in cgs
# gravitational constant
G = 6.674e-8 # [ cm^3 g^-1 s^-1 ]
# avogadro constant
NA = 6.0221418e23 # [ ]
# boltzmann constant
KB = 1.3806504e-16 # [ erg K^-1 ]
# planck constant
H = 6.62606896e-27 # [ erg s ]
# speed of light in vacuum
c = 2.99792458e10 # [ cm s^-1 ]
# solar mass
msol = 1.989e33 # [ g ]
# solar radius
rsol = 6.955e10 # [ cm ]
# solar luminosity
lsol = 3.839e33 # [ erg s^-1 ]
# electron charge
qe = 4.80320427e-10 # [ esu ]
# atomic mass unit
amu = 1.6605390401e-24 # [ g ]
# ev2erg
ev2erg = 1.602177e-12 # [ erg eV^-1 ]
# parsec in cm
parsec = 3.08568025e18 # [ cm ]
# conversion factor for cosmological magnetic field
bfac = sqrt(1e10 * msol) / sqrt(1e6 * parsec) * 1e5 / (1e6 * parsec)
# golden ratio for image heights
golden_ratio = (sqrt(5)-1)/2
| 18.897959 | 68 | 0.609071 | from __future__ import print_function
from numpy import sqrt
# all constants in cgs
# gravitational constant
G = 6.674e-8 # [ cm^3 g^-1 s^-1 ]
# avogadro constant
NA = 6.0221418e23 # [ ]
# boltzmann constant
KB = 1.3806504e-16 # [ erg K^-1 ]
# planck constant
H = 6.62606896e-27 # [ erg s ]
# speed of light in vacuum
c = 2.99792458e10 # [ cm s^-1 ]
# solar mass
msol = 1.989e33 # [ g ]
# solar radius
rsol = 6.955e10 # [ cm ]
# solar luminosity
lsol = 3.839e33 # [ erg s^-1 ]
# electron charge
qe = 4.80320427e-10 # [ esu ]
# atomic mass unit
amu = 1.6605390401e-24 # [ g ]
# ev2erg
ev2erg = 1.602177e-12 # [ erg eV^-1 ]
# parsec in cm
parsec = 3.08568025e18 # [ cm ]
# conversion factor for cosmological magnetic field
bfac = sqrt(1e10 * msol) / sqrt(1e6 * parsec) * 1e5 / (1e6 * parsec)
# golden ratio for image heights
golden_ratio = (sqrt(5)-1)/2
| 0 | 0 | 0 |
54f5493b4ec27c7e8f7513a1758d49ec0341f411 | 258 | py | Python | offthedialbot/env.py | DJam98/bot | 366a46bcca55098e1030a4f05d63e8872a791bf8 | [
"MIT"
] | 2 | 2020-08-31T15:45:07.000Z | 2021-09-26T22:15:43.000Z | offthedialbot/env.py | DJam98/bot | 366a46bcca55098e1030a4f05d63e8872a791bf8 | [
"MIT"
] | 17 | 2020-06-02T02:29:48.000Z | 2021-10-13T23:47:44.000Z | offthedialbot/env.py | DJam98/bot | 366a46bcca55098e1030a4f05d63e8872a791bf8 | [
"MIT"
] | 3 | 2020-05-31T23:17:10.000Z | 2022-03-09T22:23:22.000Z | """Get enviroment from the bot."""
import yaml
try:
with open('config.yml') as file:
env = yaml.load(file, Loader=yaml.FullLoader)
except FileNotFoundError:
raise EnvironmentError("Cannot find 'config.yml' in root, have you created one?")
| 23.454545 | 85 | 0.697674 | """Get enviroment from the bot."""
import yaml
try:
with open('config.yml') as file:
env = yaml.load(file, Loader=yaml.FullLoader)
except FileNotFoundError:
raise EnvironmentError("Cannot find 'config.yml' in root, have you created one?")
| 0 | 0 | 0 |
5578f72d21e2fc9f70049986321e00b467f84d31 | 4,864 | py | Python | tests/forecast/forecast_unittest.py | AlertingAvian/python-meteorologist | 904089f4062e875148cbe5abc8fe4f7ff1d6a524 | [
"MIT"
] | null | null | null | tests/forecast/forecast_unittest.py | AlertingAvian/python-meteorologist | 904089f4062e875148cbe5abc8fe4f7ff1d6a524 | [
"MIT"
] | null | null | null | tests/forecast/forecast_unittest.py | AlertingAvian/python-meteorologist | 904089f4062e875148cbe5abc8fe4f7ff1d6a524 | [
"MIT"
] | null | null | null | """
Copyright (C) 2021 Patrick Maloney
"""
import unittest
from python_meteorologist import forecast as fc
if __name__ == '__main__':
unittest.main()
| 54.651685 | 119 | 0.584498 | """
Copyright (C) 2021 Patrick Maloney
"""
import unittest
from python_meteorologist import forecast as fc
class ForecastTest(unittest.TestCase):
def test_missing_user_agent(self):
with self.assertRaises(TypeError) as context:
fc.Forecaster()
self.assertTrue('User Agent is required.' in str(context.exception))
def test_forecast_properties(self):
forecaster = fc.Forecaster('Test Application, alertingavian@vivaldi.net')
forecast = forecaster.get_forecast(20017)
# properties
# make sure all are of the correct type
self.assertEqual(type(forecast.properties.updated), str)
self.assertEqual(type(forecast.properties.generated_at), str)
self.assertEqual(type(forecast.properties.update_time), str)
self.assertEqual(type(forecast.properties.valid_times), str)
self.assertEqual(type(forecast.properties.elevation), float)
# make sure all are > 0
self.assertGreater(len(forecast.properties.updated), 0)
self.assertGreater(len(forecast.properties.generated_at), 0)
self.assertGreater(len(forecast.properties.update_time), 0)
self.assertGreater(len(forecast.properties.valid_times), 0)
def test_forecast_periods(self): # do i really have to do it again for the hourly test. plz no
forecaster = fc.Forecaster('Test Application, alertingavian@vivaldi.net')
forecast = forecaster.get_forecast(20017)
# periods list
for period in forecast.periods:
# check type
self.assertEqual(type(period), fc.Period)
# individual periods
for period in forecast.periods:
# number
self.assertEqual(type(period.number), int, msg=f'Expected type number: int, actual: {type(period.number)}')
self.assertGreater(period.number, 0)
# name
self.assertEqual(type(period.name), str, msg=f'Expected type name: str, actual: {type(period.name)}')
self.assertGreater(len(period.name), 0)
# start_time
self.assertEqual(type(period.start_time), str, msg=f'Expected type start_time: str, actual: '
f'{type(period.start_time)}')
self.assertGreater(len(period.start_time), 0)
# end_time
self.assertEqual(type(period.end_time), str, msg=f'Expected type end_time: str, actual: '
f'{type(period.end_time)}')
self.assertGreater(len(period.end_time), 0)
# is_day_time
self.assertEqual(type(period.is_daytime), bool, msg=f'Expected type is_daytime: int, actual: '
f'{type(period.number)}')
# temperature
self.assertEqual(type(period.temperature), int, msg=f'Expected type temperature: int, actual: '
f'{type(period.temperature)}')
# temp_unit
self.assertEqual(type(period.temp_unit), str, msg=f'Expected type temp_unit: str, actual: '
f'{type(period.temp_unit)}')
self.assertEqual(len(period.temp_unit), 1)
# wind_speed
self.assertEqual(type(period.wind_speed), str, msg=f'Expected type wind_speed: str, actual: '
f'{type(period.wind_speed)}')
self.assertGreater(len(period.wind_speed), 0)
# wind_direction
self.assertEqual(type(period.wind_direction), str, msg=f'Expected type wind_direction: str, actual: '
f'{type(period.wind_direction)}')
self.assertGreater(len(period.wind_direction), 0) # what happens if wind speed is 0 and there is no dir
# icon
self.assertEqual(type(period.icon), str, msg=f'Expected type icon: str, actual: {type(period.icon)}')
self.assertTrue('http' in period.icon)
self.assertGreater(len(period.icon), 0)
# short_forecast
self.assertEqual(type(period.short_forecast), str, msg=f'Expected type short_forecast: str, actual: '
f'{type(period.short_forecast)}')
self.assertGreater(len(period.short_forecast), 0)
# long_forecast
self.assertEqual(type(period.long_forecast), str, msg=f'Expected type long_forecast: str, actual: '
f'{type(period.long_forecast)}')
self.assertGreater(len(period.long_forecast), 0)
if __name__ == '__main__':
unittest.main()
| 4,586 | 17 | 103 |
d6080f9ca4efb622c78bc0ddd5fbfee6c2237b41 | 1,831 | py | Python | pipeline.py | KseniaEremeeva/2021-2-level-ctlr | 53dc2d652a25bd94d3c0392768300b1e13a9dac9 | [
"MIT"
] | null | null | null | pipeline.py | KseniaEremeeva/2021-2-level-ctlr | 53dc2d652a25bd94d3c0392768300b1e13a9dac9 | [
"MIT"
] | null | null | null | pipeline.py | KseniaEremeeva/2021-2-level-ctlr | 53dc2d652a25bd94d3c0392768300b1e13a9dac9 | [
"MIT"
] | null | null | null | """
Pipeline for text processing implementation
"""
class EmptyDirectoryError(Exception):
"""
No data to process
"""
class InconsistentDatasetError(Exception):
"""
Corrupt data:
- numeration is expected to start from 1 and to be continuous
- a number of text files must be equal to the number of meta files
- text files must not be empty
"""
class MorphologicalToken:
"""
Stores language params for each processed token
"""
def get_cleaned(self):
"""
Returns lowercased original form of a token
"""
pass
def get_single_tagged(self):
"""
Returns normalized lemma with MyStem tags
"""
pass
def get_multiple_tagged(self):
"""
Returns normalized lemma with PyMorphy tags
"""
pass
class CorpusManager:
"""
Works with articles and stores them
"""
def _scan_dataset(self):
"""
Register each dataset entry
"""
pass
def get_articles(self):
"""
Returns storage params
"""
pass
class TextProcessingPipeline:
"""
Process articles from corpus manager
"""
def run(self):
"""
Runs pipeline process scenario
"""
pass
def _process(self, raw_text: str):
"""
Processes each token and creates MorphToken class instance
"""
pass
def validate_dataset(path_to_validate):
"""
Validates folder with assets
"""
pass
if __name__ == "__main__":
main() | 17.776699 | 74 | 0.580557 | """
Pipeline for text processing implementation
"""
class EmptyDirectoryError(Exception):
"""
No data to process
"""
class InconsistentDatasetError(Exception):
"""
Corrupt data:
- numeration is expected to start from 1 and to be continuous
- a number of text files must be equal to the number of meta files
- text files must not be empty
"""
class MorphologicalToken:
"""
Stores language params for each processed token
"""
def __init__(self, original_word):
pass
def get_cleaned(self):
"""
Returns lowercased original form of a token
"""
pass
def get_single_tagged(self):
"""
Returns normalized lemma with MyStem tags
"""
pass
def get_multiple_tagged(self):
"""
Returns normalized lemma with PyMorphy tags
"""
pass
class CorpusManager:
"""
Works with articles and stores them
"""
def __init__(self, path_to_raw_txt_data: str):
pass
def _scan_dataset(self):
"""
Register each dataset entry
"""
pass
def get_articles(self):
"""
Returns storage params
"""
pass
class TextProcessingPipeline:
"""
Process articles from corpus manager
"""
def __init__(self, corpus_manager: CorpusManager):
pass
def run(self):
"""
Runs pipeline process scenario
"""
pass
def _process(self, raw_text: str):
"""
Processes each token and creates MorphToken class instance
"""
pass
def validate_dataset(path_to_validate):
"""
Validates folder with assets
"""
pass
def main():
# YOUR CODE HERE
pass
if __name__ == "__main__":
main() | 126 | 0 | 104 |
188b739dc12cc0c64c5e44a947e2482837869d07 | 802 | py | Python | bro-scripts/adversaries/hurricane-panda/rogue-dns/dynamic/scrape-alexa.py | kingtuna/cs-bro | 496b7aa4d469d722ff7e16971cd76de7b37b7e6d | [
"BSD-2-Clause"
] | 131 | 2015-04-25T02:38:17.000Z | 2022-01-20T19:38:04.000Z | bro-scripts/adversaries/hurricane-panda/rogue-dns/dynamic/scrape-alexa.py | CrowdStrike/NetworkDetection | fc4f66294e8cbbfb00460e5b1bb136f218293d39 | [
"BSD-2-Clause"
] | 3 | 2015-08-10T18:53:04.000Z | 2017-04-25T22:50:02.000Z | bro-scripts/adversaries/hurricane-panda/rogue-dns/dynamic/scrape-alexa.py | LaudateCorpus1/cs-bro | 47e9bb3627220946f1aaa76f3099d81b3dce6de3 | [
"BSD-2-Clause"
] | 46 | 2015-04-21T00:27:18.000Z | 2022-02-26T03:11:52.000Z | # Rudimentary script to collect domains in the Alexa top 500
# This script can be run as often as needed to refresh the list of domains
# CrowdStrike 2015
# josh.liburdi@crowdstrike.com
import requests
import bs4
# File containing Alexa top 500 domains
# This file name and path is referenced in the Bro script and can be modified
f = open('alexa_domains.txt','w')
f.write('#fields\talexa\n')
# Alexa's top 500 domains are spread across 20 pages
# To change the number of domains collected (top 50, top 250), modify the range
for num in range(0,20):
site = "http://www.alexa.com/topsites/global;" + str(num)
page = requests.get(site)
soup = bs4.BeautifulSoup(page.text)
for link in soup.find_all('a'):
if 'siteinfo' in str(link):
f.write((link.get('href')).split("/")[2] + "\n" )
| 33.416667 | 79 | 0.713217 | # Rudimentary script to collect domains in the Alexa top 500
# This script can be run as often as needed to refresh the list of domains
# CrowdStrike 2015
# josh.liburdi@crowdstrike.com
import requests
import bs4
# File containing Alexa top 500 domains
# This file name and path is referenced in the Bro script and can be modified
f = open('alexa_domains.txt','w')
f.write('#fields\talexa\n')
# Alexa's top 500 domains are spread across 20 pages
# To change the number of domains collected (top 50, top 250), modify the range
for num in range(0,20):
site = "http://www.alexa.com/topsites/global;" + str(num)
page = requests.get(site)
soup = bs4.BeautifulSoup(page.text)
for link in soup.find_all('a'):
if 'siteinfo' in str(link):
f.write((link.get('href')).split("/")[2] + "\n" )
| 0 | 0 | 0 |
07f293fce141c0a95ea48d7d46ec2d683d7f8b5a | 839 | py | Python | rnn_space1/genotypes.py | Sette/CoNAS | 19b061fb7177fb2d2b0eca94f16e8d4b57178859 | [
"Apache-2.0"
] | null | null | null | rnn_space1/genotypes.py | Sette/CoNAS | 19b061fb7177fb2d2b0eca94f16e8d4b57178859 | [
"Apache-2.0"
] | null | null | null | rnn_space1/genotypes.py | Sette/CoNAS | 19b061fb7177fb2d2b0eca94f16e8d4b57178859 | [
"Apache-2.0"
] | null | null | null | from collections import namedtuple
Genotype = namedtuple('Genotype', 'recurrent concat')
PRIMITIVES = [
'none',
'relu',
'sigmoid',
'identity'
]
STEPS = 8
CONCAT = 8
ENAS = Genotype(
recurrent = [
('tanh', 0),
('tanh', 1),
('relu', 1),
('tanh', 3),
('tanh', 3),
('relu', 3),
('relu', 4),
('relu', 7),
('relu', 8),
('relu', 8),
('relu', 8),
],
concat = [2, 5, 6, 9, 10, 11]
)
DARTS_V1 = Genotype(recurrent=[('relu', 0), ('relu', 1), ('tanh', 2), ('relu', 3), ('relu', 4), ('identity', 1), ('relu', 5), ('relu', 1)], concat=range(1, 9))
DARTS_V2 = Genotype(recurrent=[('sigmoid', 0), ('relu', 1), ('relu', 1), ('identity', 1), ('tanh', 2), ('sigmoid', 5), ('tanh', 3), ('relu', 5)], concat=range(1, 9))
DARTS = DARTS_V2
| 23.305556 | 165 | 0.466031 | from collections import namedtuple
Genotype = namedtuple('Genotype', 'recurrent concat')
PRIMITIVES = [
'none',
'relu',
'sigmoid',
'identity'
]
STEPS = 8
CONCAT = 8
ENAS = Genotype(
recurrent = [
('tanh', 0),
('tanh', 1),
('relu', 1),
('tanh', 3),
('tanh', 3),
('relu', 3),
('relu', 4),
('relu', 7),
('relu', 8),
('relu', 8),
('relu', 8),
],
concat = [2, 5, 6, 9, 10, 11]
)
DARTS_V1 = Genotype(recurrent=[('relu', 0), ('relu', 1), ('tanh', 2), ('relu', 3), ('relu', 4), ('identity', 1), ('relu', 5), ('relu', 1)], concat=range(1, 9))
DARTS_V2 = Genotype(recurrent=[('sigmoid', 0), ('relu', 1), ('relu', 1), ('identity', 1), ('tanh', 2), ('sigmoid', 5), ('tanh', 3), ('relu', 5)], concat=range(1, 9))
DARTS = DARTS_V2
| 0 | 0 | 0 |
5a6f449a39d5bd1dfc324b5744c4c9bd209e735e | 4,757 | py | Python | cgt_calc/model.py | quentin389/capital-gains-calculator | 63641e19d5479499e1de1e28d3c9672b35f7e2dd | [
"MIT"
] | 13 | 2021-04-21T21:08:35.000Z | 2022-02-28T22:58:38.000Z | cgt_calc/model.py | quentin389/capital-gains-calculator | 63641e19d5479499e1de1e28d3c9672b35f7e2dd | [
"MIT"
] | 19 | 2021-05-09T10:20:25.000Z | 2022-03-29T16:11:46.000Z | cgt_calc/model.py | quentin389/capital-gains-calculator | 63641e19d5479499e1de1e28d3c9672b35f7e2dd | [
"MIT"
] | 9 | 2021-05-03T16:52:50.000Z | 2022-02-28T21:22:59.000Z | """Model classes."""
from __future__ import annotations
from dataclasses import dataclass
import datetime
from decimal import Decimal
from enum import Enum
from typing import Dict, List
from .util import round_decimal
@dataclass
class HmrcTransactionData:
"""Hmrc transaction figures."""
quantity: Decimal
amount: Decimal
fees: Decimal
# For mapping of dates to int
HmrcTransactionLog = Dict[datetime.date, Dict[str, HmrcTransactionData]]
class ActionType(Enum):
"""Type of transaction action."""
BUY = 1
SELL = 2
TRANSFER = 3
STOCK_ACTIVITY = 4
DIVIDEND = 5
TAX = 6
FEE = 7
ADJUSTMENT = 8
CAPITAL_GAIN = 9
SPIN_OFF = 10
INTEREST = 11
REINVEST_SHARES = 12
REINVEST_DIVIDENDS = 13
WIRE_FUNDS_RECEIVED = 14
@dataclass
class BrokerTransaction:
"""Broken transaction data."""
date: datetime.date
action: ActionType
symbol: str | None
description: str
quantity: Decimal | None
price: Decimal | None
fees: Decimal
amount: Decimal | None
currency: str
broker: str
class RuleType(Enum):
"""HMRC rule type."""
SECTION_104 = 1
SAME_DAY = 2
BED_AND_BREAKFAST = 3
class CalculationEntry: # noqa: SIM119 # this has non-trivial constructor
"""Calculation entry for final report."""
def __init__(
self,
rule_type: RuleType,
quantity: Decimal,
amount: Decimal,
fees: Decimal,
new_quantity: Decimal,
new_pool_cost: Decimal,
gain: Decimal | None = None,
allowable_cost: Decimal | None = None,
bed_and_breakfast_date_index: datetime.date | None = None,
):
"""Create calculation entry."""
self.rule_type = rule_type
self.quantity = quantity
self.amount = amount
self.allowable_cost = (
allowable_cost if allowable_cost is not None else Decimal(0)
)
self.fees = fees
self.gain = gain if gain is not None else Decimal(0)
self.new_quantity = new_quantity
self.new_pool_cost = new_pool_cost
self.bed_and_breakfast_date_index = bed_and_breakfast_date_index
if self.amount >= 0:
assert self.gain == self.amount - self.allowable_cost
def __repr__(self) -> str:
"""Return print representation."""
return f"<CalculationEntry {str(self)}>"
def __str__(self) -> str:
"""Return string representation."""
return (
f"{self.rule_type.name.replace('_', ' ')}, "
f"quantity: {self.quantity}, "
f"disposal proceeds: {self.amount}, "
f"allowable cost: {self.allowable_cost}, "
f"fees: {self.fees}, "
f"gain: {self.gain}"
)
CalculationLog = Dict[datetime.date, Dict[str, List[CalculationEntry]]]
@dataclass
class CapitalGainsReport:
"""Store calculated report."""
tax_year: int
portfolio: dict[str, tuple[Decimal, Decimal]]
disposal_count: int
disposal_proceeds: Decimal
allowable_costs: Decimal
capital_gain: Decimal
capital_loss: Decimal
capital_gain_allowance: Decimal | None
calculation_log: CalculationLog
def total_gain(self) -> Decimal:
"""Total capital gain."""
return self.capital_gain + self.capital_loss
def taxable_gain(self) -> Decimal:
"""Taxable gain with current allowance."""
assert self.capital_gain_allowance is not None
return max(Decimal(0), self.total_gain() - self.capital_gain_allowance)
def __repr__(self) -> str:
"""Return string representation."""
return f"<CalculationEntry: {str(self)}>"
def __str__(self) -> str:
"""Return string representation."""
out = f"Portfolio at the end of {self.tax_year}/{self.tax_year + 1} tax year:\n"
for symbol, (quantity, amount) in self.portfolio.items():
if quantity > 0:
out += (
f" {symbol}: {round_decimal(quantity, 2)}, "
f"£{round_decimal(amount, 2)}\n"
)
out += f"For tax year {self.tax_year}/{self.tax_year + 1}:\n"
out += f"Number of disposals: {self.disposal_count}\n"
out += f"Disposal proceeds: £{self.disposal_proceeds}\n"
out += f"Allowable costs: £{self.allowable_costs}\n"
out += f"Capital gain: £{self.capital_gain}\n"
out += f"Capital loss: £{-self.capital_loss}\n"
out += f"Total capital gain: £{self.total_gain()}\n"
if self.capital_gain_allowance is not None:
out += f"Taxable capital gain: £{self.taxable_gain()}\n"
else:
out += "WARNING: Missing allowance for this tax year\n"
return out
| 28.656627 | 88 | 0.6214 | """Model classes."""
from __future__ import annotations
from dataclasses import dataclass
import datetime
from decimal import Decimal
from enum import Enum
from typing import Dict, List
from .util import round_decimal
@dataclass
class HmrcTransactionData:
"""Hmrc transaction figures."""
quantity: Decimal
amount: Decimal
fees: Decimal
# For mapping of dates to int
HmrcTransactionLog = Dict[datetime.date, Dict[str, HmrcTransactionData]]
class ActionType(Enum):
"""Type of transaction action."""
BUY = 1
SELL = 2
TRANSFER = 3
STOCK_ACTIVITY = 4
DIVIDEND = 5
TAX = 6
FEE = 7
ADJUSTMENT = 8
CAPITAL_GAIN = 9
SPIN_OFF = 10
INTEREST = 11
REINVEST_SHARES = 12
REINVEST_DIVIDENDS = 13
WIRE_FUNDS_RECEIVED = 14
@dataclass
class BrokerTransaction:
"""Broken transaction data."""
date: datetime.date
action: ActionType
symbol: str | None
description: str
quantity: Decimal | None
price: Decimal | None
fees: Decimal
amount: Decimal | None
currency: str
broker: str
class RuleType(Enum):
"""HMRC rule type."""
SECTION_104 = 1
SAME_DAY = 2
BED_AND_BREAKFAST = 3
class CalculationEntry: # noqa: SIM119 # this has non-trivial constructor
"""Calculation entry for final report."""
def __init__(
self,
rule_type: RuleType,
quantity: Decimal,
amount: Decimal,
fees: Decimal,
new_quantity: Decimal,
new_pool_cost: Decimal,
gain: Decimal | None = None,
allowable_cost: Decimal | None = None,
bed_and_breakfast_date_index: datetime.date | None = None,
):
"""Create calculation entry."""
self.rule_type = rule_type
self.quantity = quantity
self.amount = amount
self.allowable_cost = (
allowable_cost if allowable_cost is not None else Decimal(0)
)
self.fees = fees
self.gain = gain if gain is not None else Decimal(0)
self.new_quantity = new_quantity
self.new_pool_cost = new_pool_cost
self.bed_and_breakfast_date_index = bed_and_breakfast_date_index
if self.amount >= 0:
assert self.gain == self.amount - self.allowable_cost
def __repr__(self) -> str:
"""Return print representation."""
return f"<CalculationEntry {str(self)}>"
def __str__(self) -> str:
"""Return string representation."""
return (
f"{self.rule_type.name.replace('_', ' ')}, "
f"quantity: {self.quantity}, "
f"disposal proceeds: {self.amount}, "
f"allowable cost: {self.allowable_cost}, "
f"fees: {self.fees}, "
f"gain: {self.gain}"
)
CalculationLog = Dict[datetime.date, Dict[str, List[CalculationEntry]]]
@dataclass
class CapitalGainsReport:
"""Store calculated report."""
tax_year: int
portfolio: dict[str, tuple[Decimal, Decimal]]
disposal_count: int
disposal_proceeds: Decimal
allowable_costs: Decimal
capital_gain: Decimal
capital_loss: Decimal
capital_gain_allowance: Decimal | None
calculation_log: CalculationLog
def total_gain(self) -> Decimal:
"""Total capital gain."""
return self.capital_gain + self.capital_loss
def taxable_gain(self) -> Decimal:
"""Taxable gain with current allowance."""
assert self.capital_gain_allowance is not None
return max(Decimal(0), self.total_gain() - self.capital_gain_allowance)
def __repr__(self) -> str:
"""Return string representation."""
return f"<CalculationEntry: {str(self)}>"
def __str__(self) -> str:
"""Return string representation."""
out = f"Portfolio at the end of {self.tax_year}/{self.tax_year + 1} tax year:\n"
for symbol, (quantity, amount) in self.portfolio.items():
if quantity > 0:
out += (
f" {symbol}: {round_decimal(quantity, 2)}, "
f"£{round_decimal(amount, 2)}\n"
)
out += f"For tax year {self.tax_year}/{self.tax_year + 1}:\n"
out += f"Number of disposals: {self.disposal_count}\n"
out += f"Disposal proceeds: £{self.disposal_proceeds}\n"
out += f"Allowable costs: £{self.allowable_costs}\n"
out += f"Capital gain: £{self.capital_gain}\n"
out += f"Capital loss: £{-self.capital_loss}\n"
out += f"Total capital gain: £{self.total_gain()}\n"
if self.capital_gain_allowance is not None:
out += f"Taxable capital gain: £{self.taxable_gain()}\n"
else:
out += "WARNING: Missing allowance for this tax year\n"
return out
| 0 | 0 | 0 |
063c484f9eddf70dfded77b0c89313a086c04b30 | 218 | py | Python | coop_bar/apps.py | BenjaminCherpas/coop-bar | 6e5583e08ff6f651b663720e85b0b44cbccc7aab | [
"BSD-3-Clause"
] | null | null | null | coop_bar/apps.py | BenjaminCherpas/coop-bar | 6e5583e08ff6f651b663720e85b0b44cbccc7aab | [
"BSD-3-Clause"
] | null | null | null | coop_bar/apps.py | BenjaminCherpas/coop-bar | 6e5583e08ff6f651b663720e85b0b44cbccc7aab | [
"BSD-3-Clause"
] | 1 | 2019-09-09T11:40:06.000Z | 2019-09-09T11:40:06.000Z | # -*- coding: utf-8 -*-
"""
Customizable tool bar
"""
from __future__ import unicode_literals
from django.apps import AppConfig
| 16.769231 | 39 | 0.711009 | # -*- coding: utf-8 -*-
"""
Customizable tool bar
"""
from __future__ import unicode_literals
from django.apps import AppConfig
class CoopBarAppConfig(AppConfig):
name = 'coop_bar'
verbose_name = "coop Bar"
| 0 | 65 | 23 |
0dde81c70adbf29bc3794f9e088aa4f58943621a | 963 | py | Python | pdfs/Commands/Attach.py | tmearnest/sbd | 92e59ed6286ff7b6a036688db086e47951f07cdd | [
"MIT"
] | null | null | null | pdfs/Commands/Attach.py | tmearnest/sbd | 92e59ed6286ff7b6a036688db086e47951f07cdd | [
"MIT"
] | null | null | null | pdfs/Commands/Attach.py | tmearnest/sbd | 92e59ed6286ff7b6a036688db086e47951f07cdd | [
"MIT"
] | null | null | null | from .Command import Command
from .Completers import citekeyCompleter
from argcomplete.completers import FilesCompleter
| 35.666667 | 117 | 0.661475 | from .Command import Command
from .Completers import citekeyCompleter
from argcomplete.completers import FilesCompleter
class Attach(Command):
command = 'attach'
help = "Attach a supplementary file"
def set_args(self, subparser):
subparser.add_argument('key', metavar='CITE_KEY', type=str).completer = citekeyCompleter
subparser.add_argument('file', metavar='ATTACHMENT', type=str).completer = FilesCompleter(directories=False)
subparser.add_argument('--name', '-n', metavar='NAME', type=str)
def run(self, args):
from ..Database import Database
from ..Exceptions import UserException
db = Database(dataDir=args.data_dir)
try:
e = next(x for x in db.works if x.key() == args.key)
except StopIteration:
raise UserException("Key {} not found".format(args.key))
e.tags = sorted((set(e.tags) | set(args.add)) - set(args.remove))
db.save()
| 701 | 119 | 23 |
afd967c3188fbb324693e3925d0624ce1f560347 | 1,044 | py | Python | ichnaea/alembic/versions/cad2875fd8cb_extend_api_keys.py | crankycoder/ichnaea | fb54000e92c605843b7a41521e36fd648c11ae94 | [
"Apache-2.0"
] | 1 | 2018-01-18T16:02:43.000Z | 2018-01-18T16:02:43.000Z | ichnaea/alembic/versions/cad2875fd8cb_extend_api_keys.py | crankycoder/ichnaea | fb54000e92c605843b7a41521e36fd648c11ae94 | [
"Apache-2.0"
] | null | null | null | ichnaea/alembic/versions/cad2875fd8cb_extend_api_keys.py | crankycoder/ichnaea | fb54000e92c605843b7a41521e36fd648c11ae94 | [
"Apache-2.0"
] | 1 | 2018-01-19T17:56:48.000Z | 2018-01-19T17:56:48.000Z | """Extend api keys with sample_store columns.
Revision ID: cad2875fd8cb
Revises: 385f842b2526
Create Date: 2017-02-22 11:52:47.837989
"""
import logging
from alembic import op
import sqlalchemy as sa
log = logging.getLogger('alembic.migration')
revision = 'cad2875fd8cb'
down_revision = '385f842b2526'
| 24.27907 | 63 | 0.672414 | """Extend api keys with sample_store columns.
Revision ID: cad2875fd8cb
Revises: 385f842b2526
Create Date: 2017-02-22 11:52:47.837989
"""
import logging
from alembic import op
import sqlalchemy as sa
log = logging.getLogger('alembic.migration')
revision = 'cad2875fd8cb'
down_revision = '385f842b2526'
def upgrade():
log.info('Add store_sample_* columns to api_key table.')
op.execute(sa.text(
'ALTER TABLE api_key '
'ADD COLUMN `store_sample_locate` TINYINT(4) '
'AFTER `fallback_cache_expire`, '
'ADD COLUMN `store_sample_submit` TINYINT(4) '
'AFTER `store_sample_locate`'
))
op.execute(sa.text(
'UPDATE api_key SET store_sample_locate = 100'
))
op.execute(sa.text(
'UPDATE api_key SET store_sample_submit = 100'
))
def downgrade():
log.info('Drop store_sample_* columns from api_key table.')
op.execute(sa.text(
'ALTER TABLE api_key '
'DROP COLUMN `store_sample_locate`, '
'DROP COLUMN `store_sample_submit`'
))
| 689 | 0 | 46 |
501b7c9a2ec7eabd0acaa2501e9da9fd088e3bb7 | 342 | py | Python | hello/home/migrations/0008_remove_category_slug.py | pratikd2124/Online-Library | e93ef1f42476d9f1b4b814f0a8254fd8cfbee178 | [
"MIT"
] | null | null | null | hello/home/migrations/0008_remove_category_slug.py | pratikd2124/Online-Library | e93ef1f42476d9f1b4b814f0a8254fd8cfbee178 | [
"MIT"
] | null | null | null | hello/home/migrations/0008_remove_category_slug.py | pratikd2124/Online-Library | e93ef1f42476d9f1b4b814f0a8254fd8cfbee178 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.3 on 2020-12-04 11:44
from django.db import migrations
| 19 | 48 | 0.55848 | # Generated by Django 3.1.3 on 2020-12-04 11:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0007_auto_20201204_1711'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='slug',
),
]
| 0 | 230 | 25 |
0276380d0e3e8ed8b8c414b76de322738320d63d | 281 | py | Python | glitter/tests/urls.py | dhamaniasad/django-glitter | b9b0a3d8b49d5d9b840656f84564ba0a6e016f98 | [
"BSD-3-Clause"
] | 3 | 2017-06-01T16:22:18.000Z | 2018-08-22T21:45:55.000Z | glitter/tests/urls.py | blancltd/django-glitter | b9b0a3d8b49d5d9b840656f84564ba0a6e016f98 | [
"BSD-3-Clause"
] | 85 | 2016-02-25T10:34:03.000Z | 2017-04-03T11:07:59.000Z | glitter/tests/urls.py | blancltd/django-glitter | b9b0a3d8b49d5d9b840656f84564ba0a6e016f98 | [
"BSD-3-Clause"
] | 1 | 2016-08-02T08:21:19.000Z | 2016-08-02T08:21:19.000Z | from django.conf.urls import include, url
from django.contrib import admin
from glitter.blockadmin import blocks
urlpatterns = [
# Django admin
url(r'^admin/', include(admin.site.urls)),
# Glitter block admin
url(r'^blockadmin/', include(blocks.site.urls)),
]
| 18.733333 | 52 | 0.704626 | from django.conf.urls import include, url
from django.contrib import admin
from glitter.blockadmin import blocks
urlpatterns = [
# Django admin
url(r'^admin/', include(admin.site.urls)),
# Glitter block admin
url(r'^blockadmin/', include(blocks.site.urls)),
]
| 0 | 0 | 0 |
a84ce8da40082bc47dcc1ec10e32fb650ba76ef4 | 637 | py | Python | plot_rectangles.py | bmtgoncalves/EABDA17 | c1e7579018da2b26c109967a0c1b3cc82bb26456 | [
"MIT"
] | 6 | 2017-07-05T17:17:22.000Z | 2017-08-02T18:49:16.000Z | plot_rectangles.py | bmtgoncalves/EABDA17 | c1e7579018da2b26c109967a0c1b3cc82bb26456 | [
"MIT"
] | null | null | null | plot_rectangles.py | bmtgoncalves/EABDA17 | c1e7579018da2b26c109967a0c1b3cc82bb26456 | [
"MIT"
] | 2 | 2017-07-05T20:40:55.000Z | 2020-06-08T10:47:02.000Z | import json
import matplotlib.pyplot as plt
import matplotlib.patches as patches
reverse_result = json.load(open('reverse.json')) #gmaps.reverse_geocode((40.6413111,-73.77813909999999))
plt.figure()
plt.xlim(-130,-65)
plt.ylim(20,50)
currentAxis = plt.gca()
for result in reverse_result:
viewport = result["geometry"]["viewport"]
xy = (viewport['southwest']['lng'], viewport['southwest']['lat'])
width = viewport['northeast']['lng']-viewport['southwest']['lng']
height = viewport['northeast']['lat']-viewport['southwest']['lat']
currentAxis.add_patch(patches.Rectangle(xy, width, height, alpha=.1))
plt.savefig('reverse.png') | 30.333333 | 104 | 0.726845 | import json
import matplotlib.pyplot as plt
import matplotlib.patches as patches
reverse_result = json.load(open('reverse.json')) #gmaps.reverse_geocode((40.6413111,-73.77813909999999))
plt.figure()
plt.xlim(-130,-65)
plt.ylim(20,50)
currentAxis = plt.gca()
for result in reverse_result:
viewport = result["geometry"]["viewport"]
xy = (viewport['southwest']['lng'], viewport['southwest']['lat'])
width = viewport['northeast']['lng']-viewport['southwest']['lng']
height = viewport['northeast']['lat']-viewport['southwest']['lat']
currentAxis.add_patch(patches.Rectangle(xy, width, height, alpha=.1))
plt.savefig('reverse.png') | 0 | 0 | 0 |
f245bea1215ea6a232a2ccd3c3b270ff9f9eb316 | 21,199 | py | Python | EventDetection/event_detection/postprocessing.py | tomaskala/bachelor-thesis | fd412fba6ecd2838b034423c8f8db48c50c1a558 | [
"Unlicense"
] | null | null | null | EventDetection/event_detection/postprocessing.py | tomaskala/bachelor-thesis | fd412fba6ecd2838b034423c8f8db48c50c1a558 | [
"Unlicense"
] | null | null | null | EventDetection/event_detection/postprocessing.py | tomaskala/bachelor-thesis | fd412fba6ecd2838b034423c8f8db48c50c1a558 | [
"Unlicense"
] | 1 | 2022-03-14T15:56:55.000Z | 2022-03-14T15:56:55.000Z | import logging
import math
from time import time
import gensim
import numpy as np
import sklearn.mixture as gmm
from scipy.optimize import curve_fit
from scipy.signal import periodogram
from scipy.stats import norm
WINDOW = 7 # Length of the window to use when computing the moving average.
def _moving_average(vector, window):
"""
Compute the moving average along the given vector using a window of the given length.
:param vector: the vector whose moving average to compute
:param window: length of the window to use in the computation
:return: moving average of length len(vector) - window + 1
"""
weights = np.ones(window) / window
return np.convolve(vector, weights, 'valid')
def spectral_analysis(vectors):
"""
Compute the periodogram, dominant power spectra (DPS) and dominant periods (DP) of the given feature trajectories.
:param vectors: matrix whose rows to analyze
:return: DPS, DP
"""
t = time()
n_features, n_days = vectors.shape
freqs, pgram = periodogram(vectors)
with np.errstate(divide='ignore'):
periods = np.tile(1 / freqs, (n_features, 1))
dps_indices = np.argmax(pgram, axis=1)
feature_indices = np.arange(n_features)
dps = pgram[feature_indices, dps_indices]
dp = periods[feature_indices, dps_indices].astype(int)
logging.info('Performed spectral analysis of %d trajectories in %fs.', n_features, time() - t)
logging.info('Frequencies: %s, %s', str(freqs.shape), str(freqs.dtype))
logging.info('Periodogram: %s, %s', str(pgram.shape), str(pgram.dtype))
logging.info('DPS: %s, %s', str(dps.shape), str(dps.dtype))
logging.info('DP: %s, %s', str(dp.shape), str(dp.dtype))
return dps, dp
def estimate_distribution_aperiodic(event_trajectory):
"""
Model the event trajectory by a Gaussian curve. The parameters (mean and standard deviation) are estimated
using non-linear least squares.
:param event_trajectory: trajectory of the event
:return: mean and standard deviation of the model
"""
n_days = len(event_trajectory)
ma = _moving_average(event_trajectory, WINDOW)
ma_mean = np.mean(ma)
ma_std = np.std(ma)
cutoff = ma_mean + ma_std
peak_indices = np.where(event_trajectory > cutoff)
peak_days = peak_indices[0]
peaks = event_trajectory[peak_indices].reshape(-1)
peaks /= np.sum(peaks) # Normalize the trajectory so it can be interpreted as probability.
# Initial guess for the parameters is mu ~ center of the peak period, sigma ~ quarter of the peak period length.
popt, pcov = curve_fit(gaussian_curve, peak_days, peaks, p0=(peak_days[len(peak_days) // 2], len(peak_days) / 4),
bounds=(0.0, n_days))
return popt # Mean, Std
def estimate_distribution_periodic(event_trajectory, event_period):
"""
Model the event trajectory by a mixture of (stream_length / dominant_period) Cauchy distributions, whose
shape tends to represent the peaks more closely than Gaussians due to steeper peaks and fatter tails.
Cauchy distribution parameters are the location (GMM means are used) and half width at half maximum, which
is computed from GMM standard deviations as HWHM = sqrt(2 * ln(2)) * sigma.
:param event_trajectory: trajectory of the event
:param event_period: dominant period of the event
:return: [(loc, hwhm)] for each burst in the event -- length = stream_length / dominant_period
"""
n_days = len(event_trajectory)
days = np.arange(n_days).reshape(-1, 1)
ma = _moving_average(event_trajectory.reshape(-1), WINDOW)
ma_mean = np.mean(ma)
ma_std = np.std(ma)
cutoff = ma_mean + ma_std
observations = np.hstack((days, event_trajectory.reshape(-1, 1)))
observations = observations[observations[:, 1] > cutoff, :]
# Sometimes the cutoff is too harsh and we end up with less observations than components. In that case,
# reduce the number of components to the number of features, since not all peaks were bursty enough.
n_components = min(math.floor(n_days / event_period), len(observations))
g = gmm.GaussianMixture(n_components=int(n_components), covariance_type='diag', init_params='kmeans',
random_state=1)
g.fit(observations)
e_parameters = []
# Extract parameters.
for mean_, cov_ in zip(g.means_, g.covariances_):
loc = mean_[0]
hwhm = np.sqrt(2 * np.log(2)) * np.sqrt(cov_[0])
e_parameters.append((loc, hwhm))
return e_parameters
def create_events_trajectories(events, feature_trajectories, dps):
"""
Create a trajectory for each given event as the average of trajectories of its features weighted by their DPS.
Also return the dominant period of the event, calculated using spectral analysis.
:param events: detected events (list of arrays of their feature indices)
:param feature_trajectories: matrix of feature trajectories as row vectors
:param dps: dominant power spectra of the processed features
:return: trajectories of the given events and their dominant periods
"""
event_trajectories = np.empty((len(events), feature_trajectories.shape[1]), dtype=float)
for i, event in enumerate(events):
e_feature_trajectories = feature_trajectories[event]
e_power_spectra = dps[event]
e_trajectory = (e_feature_trajectories.T @ e_power_spectra) / np.sum(e_power_spectra)
event_trajectories[i] = e_trajectory
_, event_dominant_periods = spectral_analysis(event_trajectories)
return event_trajectories, event_dominant_periods
def keywords2docids_simple(events, feature_trajectories, dps, dtd_matrix, bow_matrix):
"""
Convert the keyword representation of events to document representation. Do this in a simple manner by using all
documents published in an event bursty period containing all its keywords. Although this punishes having too many
distinct keywords, it may have some information value, e.g. events with an empty document set are likely garbage.
Would work only on lemmatized texts, obviously.
:param events: list of events which in turn are lists of their keyword indices
:param feature_trajectories:
:param dps: dominant power spectra of the processed features
:param dtd_matrix: document-to-day matrix
:param bow_matrix: bag-of-words matrix
:return: list of tuples (burst_start, burst_end, burst_documents) for all bursts of each event (that is, each inner
list represents an event and contains 1 tuple for every aperiodic event and T tuples for every periodic event
with T = stream_length / event_period
"""
n_days = feature_trajectories.shape[1]
n_days_half = math.ceil(n_days / 2)
documents = []
event_trajectories, event_periods = create_events_trajectories(events, feature_trajectories, dps)
for i, (event, event_trajectory, event_period) in enumerate(zip(events, event_trajectories, event_periods)):
is_aperiodic = event_period > n_days_half
if is_aperiodic:
burst_loc, burst_scale = estimate_distribution_aperiodic(event_trajectory)
burst_start, burst_end, burst_docs = process_burst(burst_loc, burst_scale, is_aperiodic)
if len(burst_docs) > 300:
burst_docs = burst_docs[:300]
documents.append([(burst_start, burst_end, burst_docs)])
logging.info('Processed aperiodic event %d consisting of %d documents.', i, len(burst_docs))
else:
event_parameters = estimate_distribution_periodic(event_trajectory, event_period)
event_bursts = []
num_docs = 0
for burst_loc, burst_scale in sorted(event_parameters, key=lambda item: item[0]):
burst_start, burst_end, burst_docs = process_burst(burst_loc, burst_scale, is_aperiodic)
if len(burst_docs) > 150:
burst_docs = burst_docs[:150]
event_bursts.append((burst_start, burst_end, burst_docs))
num_docs += len(burst_docs)
documents.append(event_bursts)
logging.info('Processed periodic event %d consisting of %d documents.', i, num_docs)
return documents
def keywords2docids_wmd(doc_fetcher, events, feature_trajectories, dps, dtd_matrix, w2v_model, id2word, k=None):
"""
Convert the keyword representation of events to document representation. Do this by retrieving the documents within
each event's bursty period(s) and then querying them using the event keywords as a query. For each event, take `k`
most similar documents to the query in terms of Word Mover's similarity (negative of Word Mover's Distance).
:param doc_fetcher: document fetcher to use for document streaming
:param events: list of events which in turn are lists of their keyword indices
:param feature_trajectories: matrix of feature trajectories
:param dps: dominant power spectra of the processed features
:param dtd_matrix: document-to-day matrix
:param w2v_model: trained Word2Vec model (or Doc2Vec model with learned word embeddings)
:param id2word: mapping of word IDs to the actual words
:param k: number of most similar documents to return for each event or `None` to return the square root of the
number of documents within an event bursty period
:return: list of tuples (burst_start, burst_end, burst_documents) for all bursts of each event (that is, each inner
list represents an event and contains 1 tuple for every aperiodic event and T tuples for every periodic event
with T = stream_length / event_period. Each document is a pair (document_id, document_wm_similarity) so
that further event cleaning can be performed based on the similarities. The documents of each event are sorted
by their similarities in descending order.
"""
t0 = time()
# Step 1: Assemble a list of event bursty periods and IDs of all documents within each period.
t = time()
logging.info('Assembling documents of all bursty periods.')
event_bursts_docids = _describe_event_bursts(events, feature_trajectories, dps, dtd_matrix)
logging.info('Documents assembled in %fs.', time() - t)
# Step 2: Convert the document IDs to actual documents.
t = time()
logging.info('Converting document IDs to documents.')
event_bursts_documents = _docids2headlines(event_bursts_docids, doc_fetcher)
logging.info('Documents converted in %fs.', time() - t)
# Step 3: Get the documents concerning each event using WM distance.
t = time()
logging.info('Calculating document similarities.')
event_bursts_out = _get_relevant_documents(events, event_bursts_documents, w2v_model, id2word, k)
logging.info('Similarities computed in %fs.', time() - t)
logging.info('Document representation computed in %fs total.', time() - t0)
return event_bursts_out
def _get_burst_docids(dtd_matrix, burst_loc, burst_scale):
"""
Given a burst and width of an event burst, retrieve all documents published within that burst, regardless of
whether they actually concern any event.
:param dtd_matrix: document-to-day matrix
:param burst_loc: location of the burst
:param burst_scale: scale of the burst
:return: start day of the burst, end day of the burst, indices of documents within the burst
"""
n_days = dtd_matrix.shape[1]
# If an event burst starts right at day 0, this would get negative.
burst_start = max(math.floor(burst_loc - burst_scale), 0)
# If an event burst ends at stream length, this would exceed the boundary.
burst_end = min(math.ceil(burst_loc + burst_scale), n_days - 1)
# All documents published on burst days. There is exactly one '1' in every row.
burst_docs, _ = dtd_matrix[:, burst_start:burst_end + 1].nonzero()
return burst_start, burst_end, burst_docs
def _describe_event_bursts(events, feature_trajectories, dps, dtd_matrix):
"""
Retrieve the burst information of the given events. Each event will be represented by a list of its burst
descriptions (1 burst for an aperiodic events, `stream_length / periodicity` bursts for a periodic event).
Each burst is represented by a tuple (burst_start, burst_end, burst_document_ids).
:param events: list of events which in turn are lists of their keyword indices
:param feature_trajectories: matrix of feature trajectories
:param dps: dominant power spectra of the processed features
:param dtd_matrix: document-to-day matrix
:return: burst description of the events
"""
n_days = feature_trajectories.shape[1]
n_days_half = math.ceil(n_days / 2)
events_out = []
event_trajectories, event_periods = create_events_trajectories(events, feature_trajectories, dps)
for i, (event, event_trajectory, event_period) in enumerate(zip(events, event_trajectories, event_periods)):
if event_period > n_days_half:
# Aperiodic event
burst_loc, burst_scale = estimate_distribution_aperiodic(event_trajectory)
burst_start, burst_end, burst_docs = _get_burst_docids(dtd_matrix, burst_loc, burst_scale)
events_out.append([(burst_start, burst_end, burst_docs)])
else:
# Periodic event
event_parameters = estimate_distribution_periodic(event_trajectory, event_period)
event_bursts = []
# Sort the bursts by their location from stream start to end.
for burst_loc, burst_scale in sorted(event_parameters, key=lambda item: item[0]):
burst_start, burst_end, burst_docs = _get_burst_docids(dtd_matrix, burst_loc, burst_scale)
event_bursts.append((burst_start, burst_end, burst_docs))
events_out.append(event_bursts)
return events_out
def _docids2headlines(event_bursts_docids, fetcher):
"""
Given a burst description of an event with document represented by their IDs, return a similar representation with
document IDs replaced by tuples (document ID, document headline).
:param event_bursts_docids: events in the burst description format with document IDs
:param fetcher: data fetcher to load the document headlines with
:return: burst description of the events with document IDs replaced by document IDs and headlines
"""
t = time()
logging.info('Retrieving documents for %d events.', len(event_bursts_docids))
docids = []
# Collect document IDs for all events altogether and retrieve them at once, so the collection is iterated only once.
for event in event_bursts_docids:
for _, _, burst_docs in event:
docids.extend(burst_docs)
docids2heads = _load_headlines(docids, fetcher)
events_out = []
# Redistribute the documents back to the individual events.
for event in event_bursts_docids:
event_out = []
for burst_start, burst_end, burst_docs in event:
headlines_out = [(doc_id, docids2heads[doc_id]) for doc_id in burst_docs]
event_out.append((burst_start, burst_end, headlines_out))
events_out.append(event_out)
logging.info('Retrieved event documents in %fs.', time() - t)
return events_out
def _load_headlines(docids, fetcher):
"""
Load the headlines of documents from the `fetcher` with the given IDs.
:param docids: IDs of the documents to load
:param fetcher: data fetcher to load the document headlines with
:return: a dictionary mapping the given document IDs to their respective headlines
"""
if len(docids) == 0:
raise ValueError('No document IDs given.')
old_names_only = fetcher.names_only
fetcher.names_only = True
docids = list(sorted(set(docids)))
headlines = []
doc_pos = 0
for doc_id, document in enumerate(fetcher):
if doc_id == docids[doc_pos]:
headlines.append(document.name)
doc_pos += 1
if doc_pos == len(docids):
break
fetcher.names_only = old_names_only
return dict(zip(docids, headlines))
def _query_corpus_wmd(corpus, keywords, w2v_model, k):
"""
Given a list of keywords representing an event, query the `corpus` using these keywords and return the `k` most
similar documents according to WMD-based similarity.
:param corpus: corpus of documents represented by a list of tuple (document ID, document headline), each headline
being a list of strings
:param keywords: keywords representation of an event, a list of strings
:param w2v_model: trained Word2Vec model
:param k: number of most similar documents to return; if None, it will be set to `round(sqrt(len(corpus)))`
:return: list of tuples (document ID, document similarity) of length `k`
"""
if k is None:
num_best = round(math.sqrt(len(corpus)))
else:
num_best = k
headlines = [doc[1] for doc in corpus] # Corpus is a list of (doc_id, doc_headline) pairs.
index = gensim.similarities.WmdSimilarity(headlines, w2v_model=w2v_model, num_best=num_best)
event_documents = index[keywords]
return event_documents
def _get_relevant_documents(events, event_bursts_headlines, w2v_model, id2word, k):
"""
Retrieve the IDs of documents relevant to the given events. This is the function employing a measure of semantic
similarity to the corpus of documents published within the bursty periods retrieved previously.
:param events: list of events which in turn are lists of their keyword indices
:param event_bursts_headlines: burst description of the events with document headlines
:param w2v_model: trained Word2Vec model
:param id2word: mapping of word IDs to the actual words
:param k: number of most similar documents to return; if None, it will be set to `round(sqrt(len(corpus)))`
:return: burst description of the events with document IDs, only those documents relevant to the events will be here
"""
event_bursts_out = []
for event_id, (event, bursts) in enumerate(zip(events, event_bursts_headlines)):
bursts_out = []
event_keywords = [id2word[keyword_id] for keyword_id in event]
num_docs = 0
most_similar_headline, top_similarity = None, -math.inf
least_similar_headline, bot_similarity = None, math.inf
logging.disable(logging.INFO) # Gensim loggers are super chatty.
for burst in bursts:
burst_start, burst_end, burst_headlines = burst
# Local IDs with respect to the burst.
event_burst_docids_local = _query_corpus_wmd(burst_headlines, event_keywords, w2v_model, k)
# Global IDs with respect to the whole document collection.
event_burst_docids = [(burst_headlines[doc_id][0], doc_sim) for doc_id, doc_sim in event_burst_docids_local]
bursts_out.append((burst_start, burst_end, event_burst_docids))
num_docs += len(event_burst_docids)
if event_burst_docids_local[0][1] > top_similarity:
top_id, top_similarity = event_burst_docids_local[0]
most_similar_headline = burst_headlines[top_id][1]
if event_burst_docids_local[-1][1] < bot_similarity:
bot_id, bot_similarity = event_burst_docids_local[-1]
least_similar_headline = burst_headlines[bot_id][1]
event_bursts_out.append(bursts_out)
logging.disable(logging.NOTSET) # Re-enable logging.
event_desc = ', '.join(event_keywords) if len(event_keywords) <= 6 else ', '.join(event_keywords[:6]) + '...'
logging.info('Processed event %d [%s] consisting of %d documents.', event_id, event_desc, num_docs)
logging.info('Most similar headline: "%s" (sim: %f), least similar headline: "%s" (sim: %f)',
', '.join(most_similar_headline), top_similarity, ', '.join(least_similar_headline),
bot_similarity)
return event_bursts_out
| 45.394004 | 120 | 0.707958 | import logging
import math
from time import time
import gensim
import numpy as np
import sklearn.mixture as gmm
from scipy.optimize import curve_fit
from scipy.signal import periodogram
from scipy.stats import norm
WINDOW = 7 # Length of the window to use when computing the moving average.
def _moving_average(vector, window):
"""
Compute the moving average along the given vector using a window of the given length.
:param vector: the vector whose moving average to compute
:param window: length of the window to use in the computation
:return: moving average of length len(vector) - window + 1
"""
weights = np.ones(window) / window
return np.convolve(vector, weights, 'valid')
def spectral_analysis(vectors):
"""
Compute the periodogram, dominant power spectra (DPS) and dominant periods (DP) of the given feature trajectories.
:param vectors: matrix whose rows to analyze
:return: DPS, DP
"""
t = time()
n_features, n_days = vectors.shape
freqs, pgram = periodogram(vectors)
with np.errstate(divide='ignore'):
periods = np.tile(1 / freqs, (n_features, 1))
dps_indices = np.argmax(pgram, axis=1)
feature_indices = np.arange(n_features)
dps = pgram[feature_indices, dps_indices]
dp = periods[feature_indices, dps_indices].astype(int)
logging.info('Performed spectral analysis of %d trajectories in %fs.', n_features, time() - t)
logging.info('Frequencies: %s, %s', str(freqs.shape), str(freqs.dtype))
logging.info('Periodogram: %s, %s', str(pgram.shape), str(pgram.dtype))
logging.info('DPS: %s, %s', str(dps.shape), str(dps.dtype))
logging.info('DP: %s, %s', str(dp.shape), str(dp.dtype))
return dps, dp
def estimate_distribution_aperiodic(event_trajectory):
"""
Model the event trajectory by a Gaussian curve. The parameters (mean and standard deviation) are estimated
using non-linear least squares.
:param event_trajectory: trajectory of the event
:return: mean and standard deviation of the model
"""
def gaussian_curve(value, loc, scale):
return norm.pdf(value, loc=loc, scale=scale)
n_days = len(event_trajectory)
ma = _moving_average(event_trajectory, WINDOW)
ma_mean = np.mean(ma)
ma_std = np.std(ma)
cutoff = ma_mean + ma_std
peak_indices = np.where(event_trajectory > cutoff)
peak_days = peak_indices[0]
peaks = event_trajectory[peak_indices].reshape(-1)
peaks /= np.sum(peaks) # Normalize the trajectory so it can be interpreted as probability.
# Initial guess for the parameters is mu ~ center of the peak period, sigma ~ quarter of the peak period length.
popt, pcov = curve_fit(gaussian_curve, peak_days, peaks, p0=(peak_days[len(peak_days) // 2], len(peak_days) / 4),
bounds=(0.0, n_days))
return popt # Mean, Std
def estimate_distribution_periodic(event_trajectory, event_period):
"""
Model the event trajectory by a mixture of (stream_length / dominant_period) Cauchy distributions, whose
shape tends to represent the peaks more closely than Gaussians due to steeper peaks and fatter tails.
Cauchy distribution parameters are the location (GMM means are used) and half width at half maximum, which
is computed from GMM standard deviations as HWHM = sqrt(2 * ln(2)) * sigma.
:param event_trajectory: trajectory of the event
:param event_period: dominant period of the event
:return: [(loc, hwhm)] for each burst in the event -- length = stream_length / dominant_period
"""
n_days = len(event_trajectory)
days = np.arange(n_days).reshape(-1, 1)
ma = _moving_average(event_trajectory.reshape(-1), WINDOW)
ma_mean = np.mean(ma)
ma_std = np.std(ma)
cutoff = ma_mean + ma_std
observations = np.hstack((days, event_trajectory.reshape(-1, 1)))
observations = observations[observations[:, 1] > cutoff, :]
# Sometimes the cutoff is too harsh and we end up with less observations than components. In that case,
# reduce the number of components to the number of features, since not all peaks were bursty enough.
n_components = min(math.floor(n_days / event_period), len(observations))
g = gmm.GaussianMixture(n_components=int(n_components), covariance_type='diag', init_params='kmeans',
random_state=1)
g.fit(observations)
e_parameters = []
# Extract parameters.
for mean_, cov_ in zip(g.means_, g.covariances_):
loc = mean_[0]
hwhm = np.sqrt(2 * np.log(2)) * np.sqrt(cov_[0])
e_parameters.append((loc, hwhm))
return e_parameters
def create_events_trajectories(events, feature_trajectories, dps):
"""
Create a trajectory for each given event as the average of trajectories of its features weighted by their DPS.
Also return the dominant period of the event, calculated using spectral analysis.
:param events: detected events (list of arrays of their feature indices)
:param feature_trajectories: matrix of feature trajectories as row vectors
:param dps: dominant power spectra of the processed features
:return: trajectories of the given events and their dominant periods
"""
event_trajectories = np.empty((len(events), feature_trajectories.shape[1]), dtype=float)
for i, event in enumerate(events):
e_feature_trajectories = feature_trajectories[event]
e_power_spectra = dps[event]
e_trajectory = (e_feature_trajectories.T @ e_power_spectra) / np.sum(e_power_spectra)
event_trajectories[i] = e_trajectory
_, event_dominant_periods = spectral_analysis(event_trajectories)
return event_trajectories, event_dominant_periods
def keywords2docids_simple(events, feature_trajectories, dps, dtd_matrix, bow_matrix):
"""
Convert the keyword representation of events to document representation. Do this in a simple manner by using all
documents published in an event bursty period containing all its keywords. Although this punishes having too many
distinct keywords, it may have some information value, e.g. events with an empty document set are likely garbage.
Would work only on lemmatized texts, obviously.
:param events: list of events which in turn are lists of their keyword indices
:param feature_trajectories:
:param dps: dominant power spectra of the processed features
:param dtd_matrix: document-to-day matrix
:param bow_matrix: bag-of-words matrix
:return: list of tuples (burst_start, burst_end, burst_documents) for all bursts of each event (that is, each inner
list represents an event and contains 1 tuple for every aperiodic event and T tuples for every periodic event
with T = stream_length / event_period
"""
def process_burst(loc, scale, aperiodic):
# If an event burst starts right at day 0, this would get negative.
start = max(math.floor(loc - scale), 0)
# If an event burst ends at stream length, this would exceed the boundary.
end = min(math.ceil(loc + scale), n_days - 1)
# All documents published on burst days. There is exactly one '1' in every row.
burst_docs_all, _ = dtd_matrix[:, start:end + 1].nonzero()
# Documents containing at least one of the event word features.
docs_either_words = bow_matrix[:, event]
# Documents containing all of the event word features.
docs_words = np.where(docs_either_words.getnnz(axis=1) == len(event))[0]
# Documents both published on burst days and containing all event word features. Do not assume unique for
# periodic events, as some bursty periods may overlap.
docs_both = np.intersect1d(burst_docs_all, docs_words, assume_unique=aperiodic)
return start, end, docs_both
n_days = feature_trajectories.shape[1]
n_days_half = math.ceil(n_days / 2)
documents = []
event_trajectories, event_periods = create_events_trajectories(events, feature_trajectories, dps)
for i, (event, event_trajectory, event_period) in enumerate(zip(events, event_trajectories, event_periods)):
is_aperiodic = event_period > n_days_half
if is_aperiodic:
burst_loc, burst_scale = estimate_distribution_aperiodic(event_trajectory)
burst_start, burst_end, burst_docs = process_burst(burst_loc, burst_scale, is_aperiodic)
if len(burst_docs) > 300:
burst_docs = burst_docs[:300]
documents.append([(burst_start, burst_end, burst_docs)])
logging.info('Processed aperiodic event %d consisting of %d documents.', i, len(burst_docs))
else:
event_parameters = estimate_distribution_periodic(event_trajectory, event_period)
event_bursts = []
num_docs = 0
for burst_loc, burst_scale in sorted(event_parameters, key=lambda item: item[0]):
burst_start, burst_end, burst_docs = process_burst(burst_loc, burst_scale, is_aperiodic)
if len(burst_docs) > 150:
burst_docs = burst_docs[:150]
event_bursts.append((burst_start, burst_end, burst_docs))
num_docs += len(burst_docs)
documents.append(event_bursts)
logging.info('Processed periodic event %d consisting of %d documents.', i, num_docs)
return documents
def keywords2docids_wmd(doc_fetcher, events, feature_trajectories, dps, dtd_matrix, w2v_model, id2word, k=None):
"""
Convert the keyword representation of events to document representation. Do this by retrieving the documents within
each event's bursty period(s) and then querying them using the event keywords as a query. For each event, take `k`
most similar documents to the query in terms of Word Mover's similarity (negative of Word Mover's Distance).
:param doc_fetcher: document fetcher to use for document streaming
:param events: list of events which in turn are lists of their keyword indices
:param feature_trajectories: matrix of feature trajectories
:param dps: dominant power spectra of the processed features
:param dtd_matrix: document-to-day matrix
:param w2v_model: trained Word2Vec model (or Doc2Vec model with learned word embeddings)
:param id2word: mapping of word IDs to the actual words
:param k: number of most similar documents to return for each event or `None` to return the square root of the
number of documents within an event bursty period
:return: list of tuples (burst_start, burst_end, burst_documents) for all bursts of each event (that is, each inner
list represents an event and contains 1 tuple for every aperiodic event and T tuples for every periodic event
with T = stream_length / event_period. Each document is a pair (document_id, document_wm_similarity) so
that further event cleaning can be performed based on the similarities. The documents of each event are sorted
by their similarities in descending order.
"""
t0 = time()
# Step 1: Assemble a list of event bursty periods and IDs of all documents within each period.
t = time()
logging.info('Assembling documents of all bursty periods.')
event_bursts_docids = _describe_event_bursts(events, feature_trajectories, dps, dtd_matrix)
logging.info('Documents assembled in %fs.', time() - t)
# Step 2: Convert the document IDs to actual documents.
t = time()
logging.info('Converting document IDs to documents.')
event_bursts_documents = _docids2headlines(event_bursts_docids, doc_fetcher)
logging.info('Documents converted in %fs.', time() - t)
# Step 3: Get the documents concerning each event using WM distance.
t = time()
logging.info('Calculating document similarities.')
event_bursts_out = _get_relevant_documents(events, event_bursts_documents, w2v_model, id2word, k)
logging.info('Similarities computed in %fs.', time() - t)
logging.info('Document representation computed in %fs total.', time() - t0)
return event_bursts_out
def _get_burst_docids(dtd_matrix, burst_loc, burst_scale):
"""
Given a burst and width of an event burst, retrieve all documents published within that burst, regardless of
whether they actually concern any event.
:param dtd_matrix: document-to-day matrix
:param burst_loc: location of the burst
:param burst_scale: scale of the burst
:return: start day of the burst, end day of the burst, indices of documents within the burst
"""
n_days = dtd_matrix.shape[1]
# If an event burst starts right at day 0, this would get negative.
burst_start = max(math.floor(burst_loc - burst_scale), 0)
# If an event burst ends at stream length, this would exceed the boundary.
burst_end = min(math.ceil(burst_loc + burst_scale), n_days - 1)
# All documents published on burst days. There is exactly one '1' in every row.
burst_docs, _ = dtd_matrix[:, burst_start:burst_end + 1].nonzero()
return burst_start, burst_end, burst_docs
def _describe_event_bursts(events, feature_trajectories, dps, dtd_matrix):
"""
Retrieve the burst information of the given events. Each event will be represented by a list of its burst
descriptions (1 burst for an aperiodic events, `stream_length / periodicity` bursts for a periodic event).
Each burst is represented by a tuple (burst_start, burst_end, burst_document_ids).
:param events: list of events which in turn are lists of their keyword indices
:param feature_trajectories: matrix of feature trajectories
:param dps: dominant power spectra of the processed features
:param dtd_matrix: document-to-day matrix
:return: burst description of the events
"""
n_days = feature_trajectories.shape[1]
n_days_half = math.ceil(n_days / 2)
events_out = []
event_trajectories, event_periods = create_events_trajectories(events, feature_trajectories, dps)
for i, (event, event_trajectory, event_period) in enumerate(zip(events, event_trajectories, event_periods)):
if event_period > n_days_half:
# Aperiodic event
burst_loc, burst_scale = estimate_distribution_aperiodic(event_trajectory)
burst_start, burst_end, burst_docs = _get_burst_docids(dtd_matrix, burst_loc, burst_scale)
events_out.append([(burst_start, burst_end, burst_docs)])
else:
# Periodic event
event_parameters = estimate_distribution_periodic(event_trajectory, event_period)
event_bursts = []
# Sort the bursts by their location from stream start to end.
for burst_loc, burst_scale in sorted(event_parameters, key=lambda item: item[0]):
burst_start, burst_end, burst_docs = _get_burst_docids(dtd_matrix, burst_loc, burst_scale)
event_bursts.append((burst_start, burst_end, burst_docs))
events_out.append(event_bursts)
return events_out
def _docids2headlines(event_bursts_docids, fetcher):
"""
Given a burst description of an event with document represented by their IDs, return a similar representation with
document IDs replaced by tuples (document ID, document headline).
:param event_bursts_docids: events in the burst description format with document IDs
:param fetcher: data fetcher to load the document headlines with
:return: burst description of the events with document IDs replaced by document IDs and headlines
"""
t = time()
logging.info('Retrieving documents for %d events.', len(event_bursts_docids))
docids = []
# Collect document IDs for all events altogether and retrieve them at once, so the collection is iterated only once.
for event in event_bursts_docids:
for _, _, burst_docs in event:
docids.extend(burst_docs)
docids2heads = _load_headlines(docids, fetcher)
events_out = []
# Redistribute the documents back to the individual events.
for event in event_bursts_docids:
event_out = []
for burst_start, burst_end, burst_docs in event:
headlines_out = [(doc_id, docids2heads[doc_id]) for doc_id in burst_docs]
event_out.append((burst_start, burst_end, headlines_out))
events_out.append(event_out)
logging.info('Retrieved event documents in %fs.', time() - t)
return events_out
def _load_headlines(docids, fetcher):
"""
Load the headlines of documents from the `fetcher` with the given IDs.
:param docids: IDs of the documents to load
:param fetcher: data fetcher to load the document headlines with
:return: a dictionary mapping the given document IDs to their respective headlines
"""
if len(docids) == 0:
raise ValueError('No document IDs given.')
old_names_only = fetcher.names_only
fetcher.names_only = True
docids = list(sorted(set(docids)))
headlines = []
doc_pos = 0
for doc_id, document in enumerate(fetcher):
if doc_id == docids[doc_pos]:
headlines.append(document.name)
doc_pos += 1
if doc_pos == len(docids):
break
fetcher.names_only = old_names_only
return dict(zip(docids, headlines))
def _query_corpus_wmd(corpus, keywords, w2v_model, k):
"""
Given a list of keywords representing an event, query the `corpus` using these keywords and return the `k` most
similar documents according to WMD-based similarity.
:param corpus: corpus of documents represented by a list of tuple (document ID, document headline), each headline
being a list of strings
:param keywords: keywords representation of an event, a list of strings
:param w2v_model: trained Word2Vec model
:param k: number of most similar documents to return; if None, it will be set to `round(sqrt(len(corpus)))`
:return: list of tuples (document ID, document similarity) of length `k`
"""
if k is None:
num_best = round(math.sqrt(len(corpus)))
else:
num_best = k
headlines = [doc[1] for doc in corpus] # Corpus is a list of (doc_id, doc_headline) pairs.
index = gensim.similarities.WmdSimilarity(headlines, w2v_model=w2v_model, num_best=num_best)
event_documents = index[keywords]
return event_documents
def _get_relevant_documents(events, event_bursts_headlines, w2v_model, id2word, k):
"""
Retrieve the IDs of documents relevant to the given events. This is the function employing a measure of semantic
similarity to the corpus of documents published within the bursty periods retrieved previously.
:param events: list of events which in turn are lists of their keyword indices
:param event_bursts_headlines: burst description of the events with document headlines
:param w2v_model: trained Word2Vec model
:param id2word: mapping of word IDs to the actual words
:param k: number of most similar documents to return; if None, it will be set to `round(sqrt(len(corpus)))`
:return: burst description of the events with document IDs, only those documents relevant to the events will be here
"""
event_bursts_out = []
for event_id, (event, bursts) in enumerate(zip(events, event_bursts_headlines)):
bursts_out = []
event_keywords = [id2word[keyword_id] for keyword_id in event]
num_docs = 0
most_similar_headline, top_similarity = None, -math.inf
least_similar_headline, bot_similarity = None, math.inf
logging.disable(logging.INFO) # Gensim loggers are super chatty.
for burst in bursts:
burst_start, burst_end, burst_headlines = burst
# Local IDs with respect to the burst.
event_burst_docids_local = _query_corpus_wmd(burst_headlines, event_keywords, w2v_model, k)
# Global IDs with respect to the whole document collection.
event_burst_docids = [(burst_headlines[doc_id][0], doc_sim) for doc_id, doc_sim in event_burst_docids_local]
bursts_out.append((burst_start, burst_end, event_burst_docids))
num_docs += len(event_burst_docids)
if event_burst_docids_local[0][1] > top_similarity:
top_id, top_similarity = event_burst_docids_local[0]
most_similar_headline = burst_headlines[top_id][1]
if event_burst_docids_local[-1][1] < bot_similarity:
bot_id, bot_similarity = event_burst_docids_local[-1]
least_similar_headline = burst_headlines[bot_id][1]
event_bursts_out.append(bursts_out)
logging.disable(logging.NOTSET) # Re-enable logging.
event_desc = ', '.join(event_keywords) if len(event_keywords) <= 6 else ', '.join(event_keywords[:6]) + '...'
logging.info('Processed event %d [%s] consisting of %d documents.', event_id, event_desc, num_docs)
logging.info('Most similar headline: "%s" (sim: %f), least similar headline: "%s" (sim: %f)',
', '.join(most_similar_headline), top_similarity, ', '.join(least_similar_headline),
bot_similarity)
return event_bursts_out
| 1,078 | 0 | 54 |
a3abbd327c4401a70daef385d5cd04ab4f5d4d1d | 1,954 | py | Python | test/resources/catalog/test_lookup.py | shuichiro-makigaki/yandeley-python-sdk | 2c15145d11ddfdf33a94da6c846afdd13f310b54 | [
"Apache-2.0"
] | null | null | null | test/resources/catalog/test_lookup.py | shuichiro-makigaki/yandeley-python-sdk | 2c15145d11ddfdf33a94da6c846afdd13f310b54 | [
"Apache-2.0"
] | null | null | null | test/resources/catalog/test_lookup.py | shuichiro-makigaki/yandeley-python-sdk | 2c15145d11ddfdf33a94da6c846afdd13f310b54 | [
"Apache-2.0"
] | null | null | null | import pytest
from yandeley.exception import MendeleyApiException
from test import get_client_credentials_session, cassette
from test.resources.catalog import assert_core_view, assert_all_view
| 31.015873 | 100 | 0.681679 | import pytest
from yandeley.exception import MendeleyApiException
from test import get_client_credentials_session, cassette
from test.resources.catalog import assert_core_view, assert_all_view
def test_should_lookup_by_metadata():
session = get_client_credentials_session()
with cassette('fixtures/resources/catalog/lookup/lookup_by_metadata.yaml'):
doc = session.catalog.lookup(
title='Changes in tree reproductive traits reduce functional diversity in a fragmented '
'Atlantic forest landscape',
year=2007,
source='PLoS ONE'
)
assert doc.score == 91
assert_core_view(doc)
def test_should_lookup_by_metadata_all_view():
session = get_client_credentials_session()
with cassette('fixtures/resources/catalog/lookup/lookup_by_metadata_all_view.yaml'):
doc = session.catalog.lookup(
title='Changes in tree reproductive traits reduce functional diversity in a fragmented '
'Atlantic forest landscape',
year=2007,
source='PLoS ONE',
view='all'
)
assert doc.score == 91
assert_all_view(doc)
def test_should_lookup_by_doi():
session = get_client_credentials_session()
with cassette('fixtures/resources/catalog/lookup/lookup_by_doi.yaml'):
doc = session.catalog.lookup(doi='10.1371/journal.pone.0000908')
assert doc.score == 100
assert_core_view(doc)
def test_should_raise_on_not_found():
session = get_client_credentials_session()
with cassette('fixtures/resources/catalog/lookup/not_found.yaml'), \
pytest.raises(MendeleyApiException) as ex_info:
doc = session.catalog.lookup(
title='Underwater basket weaving',
authors='Piers Bursill-Hall'
)
assert doc.score == 91
assert_core_view(doc)
ex = ex_info.value
assert ex.status == 404
| 1,665 | 0 | 92 |
c49f82e9562bea586c02fe5798f0c1cd2f88b4fe | 674 | py | Python | nudgebot/thirdparty/github/organization.py | gshefer/Nudgebot | 4cf6cf1fe975a9002299f0460873c0f21bc8d414 | [
"MIT"
] | 3 | 2018-04-03T14:34:39.000Z | 2018-04-26T12:18:49.000Z | nudgebot/thirdparty/github/organization.py | gshefer/Nudgebot | 4cf6cf1fe975a9002299f0460873c0f21bc8d414 | [
"MIT"
] | null | null | null | nudgebot/thirdparty/github/organization.py | gshefer/Nudgebot | 4cf6cf1fe975a9002299f0460873c0f21bc8d414 | [
"MIT"
] | null | null | null | from cached_property import cached_property
from github.Organization import Organization as PyGithubOrganization
from nudgebot.thirdparty.github.base import PyGithubObjectWrapper, GithubScope
| 26.96 | 78 | 0.728487 | from cached_property import cached_property
from github.Organization import Organization as PyGithubOrganization
from nudgebot.thirdparty.github.base import PyGithubObjectWrapper, GithubScope
class Organization(PyGithubObjectWrapper, GithubScope):
PyGithubClass = PyGithubOrganization
primary_keys = ['organization']
@classmethod
def instantiate(cls, name):
return cls(cls.Endpoint.client.get_organization(name))
@classmethod
def init_by_keys(cls, **query):
return cls.instantiate(name=query.get('organization'))
@cached_property
def query(self)->dict:
return {
'organization': self.name,
}
| 209 | 247 | 23 |
56999679bfc622fa996318420e273153a071ad88 | 2,578 | py | Python | people/migrations/0002_interpersonalrelationship.py | SNFernandes24/church-ims | 944b7e65e926276adfe376ace01cf0adf135b954 | [
"MIT"
] | 1 | 2021-09-11T17:22:37.000Z | 2021-09-11T17:22:37.000Z | people/migrations/0002_interpersonalrelationship.py | SNFernandes24/church-ims | 944b7e65e926276adfe376ace01cf0adf135b954 | [
"MIT"
] | 39 | 2021-06-26T02:01:37.000Z | 2021-07-14T17:11:53.000Z | people/migrations/0002_interpersonalrelationship.py | SNFernandes24/church-ims | 944b7e65e926276adfe376ace01cf0adf135b954 | [
"MIT"
] | 2 | 2021-07-19T08:00:58.000Z | 2022-02-05T16:38:02.000Z | # Generated by Django 3.2.7 on 2021-10-15 12:35
import uuid
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
| 33.051282 | 84 | 0.396043 | # Generated by Django 3.2.7 on 2021-10-15 12:35
import uuid
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("people", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="InterpersonalRelationship",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"relation",
models.CharField(
choices=[
("R", "Romantic"),
("M", "Marital"),
("PC", "Parent-child"),
("S", "Sibling"),
],
help_text="How the person and the relative are associated.",
max_length=2,
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("last_modified", models.DateTimeField(auto_now=True)),
(
"created_by",
models.ForeignKey(
help_text="The user who created this record.",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
(
"person",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="relationships",
to="people.person",
),
),
(
"relative",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="reverse_relationships",
to="people.person",
),
),
],
options={
"db_table": "people_relationship",
"ordering": ["person__username"],
},
),
]
| 0 | 2,385 | 23 |
2ccd0612fa94ede1dc1cddb23fd698b50e0cc848 | 1,084 | py | Python | pipe/src/api/auth.py | dfrnks/power-bi-publish | b57ddae47d717cc48f40ff5142280386aef7dab6 | [
"Apache-2.0"
] | null | null | null | pipe/src/api/auth.py | dfrnks/power-bi-publish | b57ddae47d717cc48f40ff5142280386aef7dab6 | [
"Apache-2.0"
] | null | null | null | pipe/src/api/auth.py | dfrnks/power-bi-publish | b57ddae47d717cc48f40ff5142280386aef7dab6 | [
"Apache-2.0"
] | null | null | null | import requests
from bitbucket_pipes_toolkit import get_logger
logger = get_logger()
def getToken(client_id: str, client_secret: str, username: str, password: str) -> str:
"""
:param client_id: Client ID of Auth Azure App
:param client_secret: Client Secret of Auth Azuere App
:param username: Username for authenticator into Power BI Service
:param password: Password of the username
"""
url = "https://login.microsoftonline.com/common/oauth2/token"
payload = {
'grant_type': 'password',
'scope': 'openid',
'resource': 'https://analysis.windows.net/powerbi/api',
'client_id': client_id,
'client_secret': client_secret,
'username': username,
'password': password,
}
response = requests.post(url, data=payload)
if response.status_code == 200:
return response.json()['access_token']
elif response.status_code in (400, 401):
raise Exception(response.json()["error_description"])
logger.error(response.text)
raise Exception("Authentication failed!")
| 28.526316 | 86 | 0.670664 | import requests
from bitbucket_pipes_toolkit import get_logger
logger = get_logger()
def getToken(client_id: str, client_secret: str, username: str, password: str) -> str:
"""
:param client_id: Client ID of Auth Azure App
:param client_secret: Client Secret of Auth Azuere App
:param username: Username for authenticator into Power BI Service
:param password: Password of the username
"""
url = "https://login.microsoftonline.com/common/oauth2/token"
payload = {
'grant_type': 'password',
'scope': 'openid',
'resource': 'https://analysis.windows.net/powerbi/api',
'client_id': client_id,
'client_secret': client_secret,
'username': username,
'password': password,
}
response = requests.post(url, data=payload)
if response.status_code == 200:
return response.json()['access_token']
elif response.status_code in (400, 401):
raise Exception(response.json()["error_description"])
logger.error(response.text)
raise Exception("Authentication failed!")
| 0 | 0 | 0 |
c12cbbf3fb13f2b2f3f167615c7dc80e8c594a0e | 435 | py | Python | tf_keras_vis/utils/losses.py | luizsantos-1/tf-keras-vis | 4d26dc9c65270f738987b653d5956ea082c35c2b | [
"MIT"
] | 232 | 2019-12-20T13:03:03.000Z | 2022-03-30T11:56:22.000Z | tf_keras_vis/utils/losses.py | luizsantos-1/tf-keras-vis | 4d26dc9c65270f738987b653d5956ea082c35c2b | [
"MIT"
] | 63 | 2020-01-31T20:57:20.000Z | 2022-03-15T09:45:47.000Z | tf_keras_vis/utils/losses.py | luizsantos-1/tf-keras-vis | 4d26dc9c65270f738987b653d5956ea082c35c2b | [
"MIT"
] | 32 | 2020-01-31T07:55:08.000Z | 2022-03-01T15:46:26.000Z | import warnings
warnings.warn(('`tf_keras_vis.utils.losses` module is deprecated. '
'Please use `tf_keras_vis.utils.scores` instead.'), DeprecationWarning)
from .scores import BinaryScore as BinaryLoss # noqa: F401 E402
from .scores import CategoricalScore as CategoricalLoss # noqa: F401 E402
from .scores import InactiveScore as InactiveLoss # noqa: F401 E402
from .scores import Score as Loss # noqa: F401 E402
| 43.5 | 86 | 0.758621 | import warnings
warnings.warn(('`tf_keras_vis.utils.losses` module is deprecated. '
'Please use `tf_keras_vis.utils.scores` instead.'), DeprecationWarning)
from .scores import BinaryScore as BinaryLoss # noqa: F401 E402
from .scores import CategoricalScore as CategoricalLoss # noqa: F401 E402
from .scores import InactiveScore as InactiveLoss # noqa: F401 E402
from .scores import Score as Loss # noqa: F401 E402
| 0 | 0 | 0 |
365f60c075470937679c80b9a931ae24ff4d4ea9 | 2,107 | py | Python | tests/functional/test_home.py | tomaszkyc/timely | 3fc2953a4dc250b885683dea38892ade68a63cf2 | [
"MIT"
] | null | null | null | tests/functional/test_home.py | tomaszkyc/timely | 3fc2953a4dc250b885683dea38892ade68a63cf2 | [
"MIT"
] | null | null | null | tests/functional/test_home.py | tomaszkyc/timely | 3fc2953a4dc250b885683dea38892ade68a63cf2 | [
"MIT"
] | null | null | null | from flask import Flask
from flask.testing import FlaskClient
from app.auth.views import current_user
from app.models import User, db
from tests.conftest import login, logout
| 29.676056 | 82 | 0.695776 | from flask import Flask
from flask.testing import FlaskClient
from app.auth.views import current_user
from app.models import User, db
from tests.conftest import login, logout
def test_home_page(test_client: FlaskClient):
# given a flask app
# when we access a main page
response = test_client.get('/')
# we should get a valid main page
assert response
assert response.status_code == 200
# title on a webpage
assert b'Let\'s do some countdown' in response.data
def test_home_page_without_logging(test_client: FlaskClient):
# given a flask app
# when we access a main page
response = test_client.get('/')
# we should get a valid main page
assert response
assert response.status_code == 200
# title on a webpage
assert not b'Logout' in response.data
assert not b'Your account' in response.data
assert b'Sign in' in response.data
def test_home_page_with_logging(test_client: FlaskClient, user: User, app: Flask):
# given na registered user
with app.app_context():
db.session.add(user)
db.session.commit()
# when the user is logged in
response = login(test_client, user.email, 'password')
# then the page shows and he can see pages which only can be seen
# by logged in user
assert response.status_code == 200
assert b'Logout' in response.data
assert b'Your account' in response.data
assert current_user.email == user.email
def test_home_page_after_logout(user: User, app: Flask):
# given na registered user
with app.app_context():
db.session.add(user)
db.session.commit()
with app.test_client() as test_client:
# when the user is logged in and then logout
login_response = login(test_client, user.email, 'password')
logout_response = logout(test_client)
# then the page shows and he can see pages which only can be seen
# by logged in user
assert logout_response.status_code == 200
assert not b'Your account' in logout_response.data
assert current_user.email != user.email
| 1,835 | 0 | 92 |
7a0bde60bc197d7f0851b6e558656030d3db2816 | 5,254 | py | Python | foodwebviz/normalization.py | lpawluczuk/foodwebviz | 11dc6d49e33634ca074f597b9eef0f146c350bcf | [
"BSD-3-Clause"
] | 1 | 2022-03-08T13:53:26.000Z | 2022-03-08T13:53:26.000Z | foodwebviz/normalization.py | lpawluczuk/foodwebviz | 11dc6d49e33634ca074f597b9eef0f146c350bcf | [
"BSD-3-Clause"
] | 2 | 2021-11-15T14:05:09.000Z | 2022-02-19T10:27:57.000Z | foodwebviz/normalization.py | lpawluczuk/foodwebviz | 11dc6d49e33634ca074f597b9eef0f146c350bcf | [
"BSD-3-Clause"
] | null | null | null | '''Methods for foodweb's flow normalization.'''
import numpy as np
import networkx as nx
__all__ = [
'diet_normalization',
'log_normalization',
'donor_control_normalization',
'predator_control_normalization',
'mixed_control_normalization',
'tst_normalization'
]
def diet_normalization(foodweb_graph_view):
'''In this normalization method, each weight is divided by node's diet.
Diet is sum of all input weights, inlcuding external import.
Parameters
----------
foodweb_graph_view : networkx.SubGraph
Graph View representing foodweb
Returns
-------
subgraph : networkx.SubGraph
Graph View representing normalized foodweb
'''
nx.set_edge_attributes(foodweb_graph_view, {(e[0], e[1]): {'weight': e[2] / get_node_diet(e[1])}
for e in foodweb_graph_view.edges(data='weight')})
return foodweb_graph_view
def log_normalization(foodweb_graph_view):
'''Normalized weigth is a logarithm of original weight.
Parameters
----------
foodweb_graph_view : networkx.SubGraph
Graph View representing foodweb
Returns
-------
subgraph : networkx.SubGraph
Graph View representing normalized foodweb
'''
nx.set_edge_attributes(foodweb_graph_view, {(e[0], e[1]): {'weight': np.log10(e[2])}
for e in foodweb_graph_view.edges(data='weight')})
return foodweb_graph_view
def donor_control_normalization(foodweb_graph_view):
'''Each weight is divided by biomass of the "from" node.
Parameters
----------
foodweb_graph_view : networkx.SubGraph
Graph View representing foodweb
Returns
-------
subgraph : networkx.SubGraph
Graph View representing normalized foodweb
'''
biomass = nx.get_node_attributes(foodweb_graph_view, "Biomass")
nx.set_edge_attributes(foodweb_graph_view, {(e[0], e[1]): {'weight': e[2] / biomass[e[0]]}
for e in foodweb_graph_view.edges(data='weight')})
return foodweb_graph_view
def predator_control_normalization(foodweb_graph_view):
'''Each weight is divided by biomass of the "to" node.
Parameters
----------
foodweb_graph_view : networkx.SubGraph
Graph View representing foodweb
Returns
-------
subgraph : networkx.SubGraph
Graph View representing normalized foodweb
'''
biomass = nx.get_node_attributes(foodweb_graph_view, "Biomass")
nx.set_edge_attributes(foodweb_graph_view, {(e[0], e[1]): {'weight': e[2] / biomass[e[1]]}
for e in foodweb_graph_view.edges(data='weight')})
return foodweb_graph_view
def mixed_control_normalization(foodweb_graph_view):
'''Each weight is equal to donor_control * predator_control.
Parameters
----------
foodweb_graph_view : networkx.SubGraph
Graph View representing foodweb
Returns
-------
subgraph : networkx.SubGraph
Graph View representing normalized foodweb
'''
biomass = nx.get_node_attributes(foodweb_graph_view, "Biomass")
nx.set_edge_attributes(foodweb_graph_view, {(e[0], e[1]):
{'weight': (e[2] / biomass[e[0]]) * (e[2] / biomass[e[1]])}
for e in foodweb_graph_view.edges(data='weight')})
return foodweb_graph_view
def tst_normalization(foodweb_graph_view):
'''Function returning a list of internal flows normalized to TST.
Parameters
----------
foodweb_graph_view : networkx.SubGraph
Graph View representing foodweb
Returns
-------
subgraph : networkx.SubGraph
Graph View representing normalized foodweb
'''
tst = sum([x[2] for x in foodweb_graph_view.edges(data='weight')])
nx.set_edge_attributes(foodweb_graph_view, {(e[0], e[1]): {'weight': e[2] / tst}
for e in foodweb_graph_view.edges(data='weight')})
return foodweb_graph_view
def normalization_factory(foodweb_graph_view, norm_type):
'''Applies apropiate normalization method according to norm_type argument.
Parameters
----------
foodweb_graph_view : networkx.SubGraph
Graph View representing foodweb
norm_type : string
Represents normalization type to use.
Available options are: 'diet', 'log', 'biomass', and 'tst'.
Returns
-------
subgraph : networkx.SubGraph
Graph View representing normalized foodweb
'''
normalization_methods = {
'donor_control': donor_control_normalization,
'predator_control': predator_control_normalization,
'mixed_control': mixed_control_normalization,
'log': log_normalization,
'diet': diet_normalization,
'TST': tst_normalization
}
if norm_type == 'linear':
return foodweb_graph_view
if norm_type in normalization_methods:
return normalization_methods[norm_type](foodweb_graph_view)
return foodweb_graph_view | 32.036585 | 107 | 0.643319 | '''Methods for foodweb's flow normalization.'''
import numpy as np
import networkx as nx
__all__ = [
'diet_normalization',
'log_normalization',
'donor_control_normalization',
'predator_control_normalization',
'mixed_control_normalization',
'tst_normalization'
]
def diet_normalization(foodweb_graph_view):
'''In this normalization method, each weight is divided by node's diet.
Diet is sum of all input weights, inlcuding external import.
Parameters
----------
foodweb_graph_view : networkx.SubGraph
Graph View representing foodweb
Returns
-------
subgraph : networkx.SubGraph
Graph View representing normalized foodweb
'''
def get_node_diet(node):
return sum([x[2] for x in foodweb_graph_view.in_edges(node, data='weight')])
nx.set_edge_attributes(foodweb_graph_view, {(e[0], e[1]): {'weight': e[2] / get_node_diet(e[1])}
for e in foodweb_graph_view.edges(data='weight')})
return foodweb_graph_view
def log_normalization(foodweb_graph_view):
'''Normalized weigth is a logarithm of original weight.
Parameters
----------
foodweb_graph_view : networkx.SubGraph
Graph View representing foodweb
Returns
-------
subgraph : networkx.SubGraph
Graph View representing normalized foodweb
'''
nx.set_edge_attributes(foodweb_graph_view, {(e[0], e[1]): {'weight': np.log10(e[2])}
for e in foodweb_graph_view.edges(data='weight')})
return foodweb_graph_view
def donor_control_normalization(foodweb_graph_view):
'''Each weight is divided by biomass of the "from" node.
Parameters
----------
foodweb_graph_view : networkx.SubGraph
Graph View representing foodweb
Returns
-------
subgraph : networkx.SubGraph
Graph View representing normalized foodweb
'''
biomass = nx.get_node_attributes(foodweb_graph_view, "Biomass")
nx.set_edge_attributes(foodweb_graph_view, {(e[0], e[1]): {'weight': e[2] / biomass[e[0]]}
for e in foodweb_graph_view.edges(data='weight')})
return foodweb_graph_view
def predator_control_normalization(foodweb_graph_view):
'''Each weight is divided by biomass of the "to" node.
Parameters
----------
foodweb_graph_view : networkx.SubGraph
Graph View representing foodweb
Returns
-------
subgraph : networkx.SubGraph
Graph View representing normalized foodweb
'''
biomass = nx.get_node_attributes(foodweb_graph_view, "Biomass")
nx.set_edge_attributes(foodweb_graph_view, {(e[0], e[1]): {'weight': e[2] / biomass[e[1]]}
for e in foodweb_graph_view.edges(data='weight')})
return foodweb_graph_view
def mixed_control_normalization(foodweb_graph_view):
'''Each weight is equal to donor_control * predator_control.
Parameters
----------
foodweb_graph_view : networkx.SubGraph
Graph View representing foodweb
Returns
-------
subgraph : networkx.SubGraph
Graph View representing normalized foodweb
'''
biomass = nx.get_node_attributes(foodweb_graph_view, "Biomass")
nx.set_edge_attributes(foodweb_graph_view, {(e[0], e[1]):
{'weight': (e[2] / biomass[e[0]]) * (e[2] / biomass[e[1]])}
for e in foodweb_graph_view.edges(data='weight')})
return foodweb_graph_view
def tst_normalization(foodweb_graph_view):
'''Function returning a list of internal flows normalized to TST.
Parameters
----------
foodweb_graph_view : networkx.SubGraph
Graph View representing foodweb
Returns
-------
subgraph : networkx.SubGraph
Graph View representing normalized foodweb
'''
tst = sum([x[2] for x in foodweb_graph_view.edges(data='weight')])
nx.set_edge_attributes(foodweb_graph_view, {(e[0], e[1]): {'weight': e[2] / tst}
for e in foodweb_graph_view.edges(data='weight')})
return foodweb_graph_view
def normalization_factory(foodweb_graph_view, norm_type):
'''Applies apropiate normalization method according to norm_type argument.
Parameters
----------
foodweb_graph_view : networkx.SubGraph
Graph View representing foodweb
norm_type : string
Represents normalization type to use.
Available options are: 'diet', 'log', 'biomass', and 'tst'.
Returns
-------
subgraph : networkx.SubGraph
Graph View representing normalized foodweb
'''
normalization_methods = {
'donor_control': donor_control_normalization,
'predator_control': predator_control_normalization,
'mixed_control': mixed_control_normalization,
'log': log_normalization,
'diet': diet_normalization,
'TST': tst_normalization
}
if norm_type == 'linear':
return foodweb_graph_view
if norm_type in normalization_methods:
return normalization_methods[norm_type](foodweb_graph_view)
return foodweb_graph_view | 88 | 0 | 27 |
7d2450975e46e1a7e021b1ac10446a7a4e4e5951 | 58,131 | py | Python | tests/pytests/unit/modules/file/test_file_line.py | tomdoherty/salt | f87d5d7abbf9777773c4d91fdafecb8b1a728e76 | [
"Apache-2.0"
] | 9,425 | 2015-01-01T05:59:24.000Z | 2022-03-31T20:44:05.000Z | tests/pytests/unit/modules/file/test_file_line.py | tomdoherty/salt | f87d5d7abbf9777773c4d91fdafecb8b1a728e76 | [
"Apache-2.0"
] | 33,507 | 2015-01-01T00:19:56.000Z | 2022-03-31T23:48:20.000Z | tests/pytests/unit/modules/file/test_file_line.py | tomdoherty/salt | f87d5d7abbf9777773c4d91fdafecb8b1a728e76 | [
"Apache-2.0"
] | 5,810 | 2015-01-01T19:11:45.000Z | 2022-03-31T02:37:20.000Z | """
Unit tests for file.line
"""
import logging
import os
import shutil
import pytest
import salt.config
import salt.loader
import salt.modules.cmdmod as cmdmod
import salt.modules.config as configmod
import salt.modules.file as filemod
import salt.utils.data
import salt.utils.files
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
from tests.support.mock import DEFAULT, MagicMock, mock_open, patch
log = logging.getLogger(__name__)
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@patch("os.path.realpath", MagicMock(wraps=lambda x: x))
@patch("os.path.isfile", MagicMock(return_value=True))
def test_delete_line_in_empty_file(anyattr):
"""
Tests that when calling file.line with ``mode=delete``,
the function doesn't stack trace if the file is empty.
Should return ``False``.
See Issue #38438.
"""
for mode in ["delete", "replace"]:
_log = MagicMock()
with patch("salt.utils.files.fopen", mock_open(read_data="")), patch(
"os.stat", anyattr
), patch("salt.modules.file.log", _log):
assert not filemod.line(
"/dummy/path", content="foo", match="bar", mode=mode
)
warning_call = _log.warning.call_args_list[0][0]
warning_log_msg = warning_call[0] % warning_call[1:]
assert "Cannot find text to {}".format(mode) in warning_log_msg
@patch("os.path.realpath", MagicMock())
@patch("os.path.isfile", MagicMock(return_value=True))
@patch("os.stat", MagicMock())
def test_line_delete_no_match():
"""
Tests that when calling file.line with ``mode=delete``,
with not matching pattern to delete returns False
:return:
"""
file_content = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/custom"]
)
match = "not matching"
for mode in ["delete", "replace"]:
files_fopen = mock_open(read_data=file_content)
with patch("salt.utils.files.fopen", files_fopen):
atomic_opener = mock_open()
with patch("salt.utils.atomicfile.atomic_open", atomic_opener):
assert not filemod.line("foo", content="foo", match=match, mode=mode)
@patch("os.path.realpath", MagicMock(wraps=lambda x: x))
@patch("os.path.isfile", MagicMock(return_value=True))
def test_line_modecheck_failure():
"""
Test for file.line for empty or wrong mode.
Calls unknown or empty mode and expects failure.
:return:
"""
for mode, err_msg in [
(None, "How to process the file"),
("nonsense", "Unknown mode"),
]:
with pytest.raises(CommandExecutionError) as exc_info:
filemod.line("foo", mode=mode)
assert err_msg in str(exc_info.value)
@patch("os.path.realpath", MagicMock(wraps=lambda x: x))
@patch("os.path.isfile", MagicMock(return_value=True))
def test_line_no_content():
"""
Test for file.line for an empty content when not deleting anything.
:return:
"""
for mode in ["insert", "ensure", "replace"]:
with pytest.raises(CommandExecutionError) as exc_info:
filemod.line("foo", mode=mode)
assert 'Content can only be empty if mode is "delete"' in str(exc_info.value)
@patch("os.path.realpath", MagicMock(wraps=lambda x: x))
@patch("os.path.isfile", MagicMock(return_value=True))
@patch("os.stat", MagicMock())
def test_line_insert_no_location_no_before_no_after():
"""
Test for file.line for insertion but define no location/before/after.
:return:
"""
files_fopen = mock_open(read_data="test data")
with patch("salt.utils.files.fopen", files_fopen):
with pytest.raises(CommandExecutionError) as exc_info:
filemod.line("foo", content="test content", mode="insert")
assert '"location" or "before/after"' in str(exc_info.value)
def test_line_insert_after_no_pattern(tempfile_name, get_body):
"""
Test for file.line for insertion after specific line, using no pattern.
See issue #38670
:return:
"""
file_content = os.linesep.join(["file_roots:", " base:", " - /srv/salt"])
file_modified = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/custom"]
)
cfg_content = "- /srv/custom"
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name, content=cfg_content, after="- /srv/salt", mode="insert"
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
def test_line_insert_after_pattern(tempfile_name, get_body):
"""
Test for file.line for insertion after specific line, using pattern.
See issue #38670
:return:
"""
file_content = os.linesep.join(
[
"file_boots:",
" - /rusty",
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/sugar",
]
)
file_modified = os.linesep.join(
[
"file_boots:",
" - /rusty",
"file_roots:",
" custom:",
" - /srv/custom",
" base:",
" - /srv/salt",
" - /srv/sugar",
]
)
cfg_content = os.linesep.join([" custom:", " - /srv/custom"])
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
for after_line in ["file_r.*", ".*roots"]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name,
content=cfg_content,
after=after_line,
mode="insert",
indent=False,
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
# We passed cfg_content with a newline in the middle, so it
# will be written as two lines in the same element of the list
# passed to .writelines()
expected[3] = expected[3] + expected.pop(4)
assert writelines_content[0] == expected, (
writelines_content[0],
expected,
)
def test_line_insert_multi_line_content_after_unicode(tempfile_name, get_body):
"""
Test for file.line for insertion after specific line with Unicode
See issue #48113
:return:
"""
file_content = "This is a line{}This is another line".format(os.linesep)
file_modified = salt.utils.stringutils.to_str(
"This is a line{}"
"This is another line{}"
"This is a line with unicode Ŷ".format(os.linesep, os.linesep)
)
cfg_content = "This is a line with unicode Ŷ"
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
for after_line in ["This is another line"]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name,
content=cfg_content,
after=after_line,
mode="insert",
indent=False,
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (
writelines_content[0],
expected,
)
def test_line_insert_before(tempfile_name, get_body):
"""
Test for file.line for insertion before specific line, using pattern and no patterns.
See issue #38670
:return:
"""
file_content = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/sugar"]
)
file_modified = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/custom",
" - /srv/salt",
" - /srv/sugar",
]
)
cfg_content = "- /srv/custom"
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
for before_line in ["/srv/salt", "/srv/sa.*t"]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name, content=cfg_content, before=before_line, mode="insert"
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
# assert writelines_content[0] == expected, (writelines_content[0], expected)
assert writelines_content[0] == expected
@patch("os.path.realpath", MagicMock(wraps=lambda x: x))
@patch("os.path.isfile", MagicMock(return_value=True))
@patch("os.stat", MagicMock())
def test_line_assert_exception_pattern():
"""
Test for file.line for exception on insert with too general pattern.
:return:
"""
file_content = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/sugar"]
)
cfg_content = "- /srv/custom"
for before_line in ["/sr.*"]:
files_fopen = mock_open(read_data=file_content)
with patch("salt.utils.files.fopen", files_fopen):
atomic_opener = mock_open()
with patch("salt.utils.atomicfile.atomic_open", atomic_opener):
with pytest.raises(CommandExecutionError) as cm:
filemod.line(
"foo",
content=cfg_content,
before=before_line,
mode="insert",
)
assert (
str(cm.value)
== 'Found more than expected occurrences in "before" expression'
)
def test_line_insert_before_after(tempfile_name, get_body):
"""
Test for file.line for insertion before specific line, using pattern and no patterns.
See issue #38670
:return:
"""
file_content = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/pepper",
" - /srv/sugar",
]
)
file_modified = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/pepper",
" - /srv/coriander",
" - /srv/sugar",
]
)
cfg_content = "- /srv/coriander"
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
for b_line, a_line in [("/srv/sugar", "/srv/salt")]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name,
content=cfg_content,
before=b_line,
after=a_line,
mode="insert",
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected
def test_line_insert_start(tempfile_name, get_body):
"""
Test for file.line for insertion at the beginning of the file
:return:
"""
cfg_content = "everything: fantastic"
file_content = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/sugar"]
)
file_modified = os.linesep.join(
[
cfg_content,
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/sugar",
]
)
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name, content=cfg_content, location="start", mode="insert"
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
def test_line_insert_end(tempfile_name, get_body):
"""
Test for file.line for insertion at the end of the file (append)
:return:
"""
cfg_content = "everything: fantastic"
file_content = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/sugar"]
)
file_modified = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/sugar",
" " + cfg_content,
]
)
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(tempfile_name, content=cfg_content, location="end", mode="insert")
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
def test_line_insert_ensure_before(tempfile_name, get_body):
"""
Test for file.line for insertion ensuring the line is before
:return:
"""
cfg_content = "/etc/init.d/someservice restart"
file_content = os.linesep.join(["#!/bin/bash", "", "exit 0"])
file_modified = os.linesep.join(["#!/bin/bash", "", cfg_content, "exit 0"])
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(tempfile_name, content=cfg_content, before="exit 0", mode="ensure")
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
def test_line_insert_duplicate_ensure_before(tempfile_name):
"""
Test for file.line for insertion ensuring the line is before
:return:
"""
cfg_content = "/etc/init.d/someservice restart"
file_content = os.linesep.join(["#!/bin/bash", "", cfg_content, "exit 0"])
file_modified = os.linesep.join(["#!/bin/bash", "", cfg_content, "exit 0"])
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(tempfile_name, content=cfg_content, before="exit 0", mode="ensure")
# If file not modified no handlers in dict
assert atomic_open_mock.filehandles.get(tempfile_name) is None
def test_line_insert_ensure_before_first_line(tempfile_name, get_body):
"""
Test for file.line for insertion ensuring the line is before first line
:return:
"""
cfg_content = "#!/bin/bash"
file_content = os.linesep.join(["/etc/init.d/someservice restart", "exit 0"])
file_modified = os.linesep.join(
[cfg_content, "/etc/init.d/someservice restart", "exit 0"]
)
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name,
content=cfg_content,
before="/etc/init.d/someservice restart",
mode="ensure",
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
def test_line_insert_ensure_after(tempfile_name, get_body):
"""
Test for file.line for insertion ensuring the line is after
:return:
"""
cfg_content = "exit 0"
file_content = os.linesep.join(["#!/bin/bash", "/etc/init.d/someservice restart"])
file_modified = os.linesep.join(
["#!/bin/bash", "/etc/init.d/someservice restart", cfg_content]
)
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name,
content=cfg_content,
after="/etc/init.d/someservice restart",
mode="ensure",
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
def test_line_insert_duplicate_ensure_after(tempfile_name):
"""
Test for file.line for insertion ensuring the line is after
:return:
"""
cfg_content = "exit 0"
file_content = os.linesep.join(
["#!/bin/bash", "/etc/init.d/someservice restart", cfg_content]
)
file_modified = os.linesep.join(
["#!/bin/bash", "/etc/init.d/someservice restart", cfg_content]
)
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name,
content=cfg_content,
after="/etc/init.d/someservice restart",
mode="ensure",
)
# If file not modified no handlers in dict
assert atomic_open_mock.filehandles.get(tempfile_name) is None
def test_line_insert_ensure_beforeafter_twolines(tempfile_name, get_body):
"""
Test for file.line for insertion ensuring the line is between two lines
:return:
"""
cfg_content = 'EXTRA_GROUPS="dialout cdrom floppy audio video plugdev users"'
# pylint: disable=W1401
file_content = os.linesep.join(
[
r'NAME_REGEX="^[a-z][-a-z0-9_]*\$"',
'SKEL_IGNORE_REGEX="dpkg-(old|new|dist|save)"',
]
)
# pylint: enable=W1401
after, before = file_content.split(os.linesep)
file_modified = os.linesep.join([after, cfg_content, before])
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
for (_after, _before) in [(after, before), ("NAME_.*", "SKEL_.*")]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name,
content=cfg_content,
after=_after,
before=_before,
mode="ensure",
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (
writelines_content[0],
expected,
)
def test_line_insert_ensure_beforeafter_twolines_exists(tempfile_name):
"""
Test for file.line for insertion ensuring the line is between two lines
where content already exists
"""
cfg_content = 'EXTRA_GROUPS="dialout"'
# pylint: disable=W1401
file_content = os.linesep.join(
[
r'NAME_REGEX="^[a-z][-a-z0-9_]*\$"',
'EXTRA_GROUPS="dialout"',
'SKEL_IGNORE_REGEX="dpkg-(old|new|dist|save)"',
]
)
# pylint: enable=W1401
after, before = (
file_content.split(os.linesep)[0],
file_content.split(os.linesep)[2],
)
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
for (_after, _before) in [(after, before), ("NAME_.*", "SKEL_.*")]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
result = filemod.line(
"foo",
content=cfg_content,
after=_after,
before=_before,
mode="ensure",
)
# We should not have opened the file
assert not atomic_open_mock.filehandles
# No changes should have been made
assert result is False
@patch("os.path.realpath", MagicMock(wraps=lambda x: x))
@patch("os.path.isfile", MagicMock(return_value=True))
@patch("os.stat", MagicMock())
def test_line_insert_ensure_beforeafter_rangelines():
"""
Test for file.line for insertion ensuring the line is between two lines
within the range. This expected to bring no changes.
"""
cfg_content = 'EXTRA_GROUPS="dialout cdrom floppy audio video plugdev users"'
# pylint: disable=W1401
file_content = (
r'NAME_REGEX="^[a-z][-a-z0-9_]*\$"{}SETGID_HOME=no{}ADD_EXTRA_GROUPS=1{}'
'SKEL_IGNORE_REGEX="dpkg-(old|new|dist|save)"'.format(
os.linesep, os.linesep, os.linesep
)
)
# pylint: enable=W1401
after, before = (
file_content.split(os.linesep)[0],
file_content.split(os.linesep)[-1],
)
for (_after, _before) in [(after, before), ("NAME_.*", "SKEL_.*")]:
files_fopen = mock_open(read_data=file_content)
with patch("salt.utils.files.fopen", files_fopen):
atomic_opener = mock_open()
with patch("salt.utils.atomicfile.atomic_open", atomic_opener):
with pytest.raises(CommandExecutionError) as exc_info:
filemod.line(
"foo",
content=cfg_content,
after=_after,
before=_before,
mode="ensure",
)
assert (
'Found more than one line between boundaries "before" and "after"'
in str(exc_info.value)
)
def test_line_delete(tempfile_name, get_body):
"""
Test for file.line for deletion of specific line
:return:
"""
file_content = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/pepper",
" - /srv/sugar",
]
)
file_modified = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/sugar"]
)
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
for content in ["/srv/pepper", "/srv/pepp*", "/srv/p.*", "/sr.*pe.*"]:
files_fopen = mock_open(read_data=file_content)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", files_fopen), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(tempfile_name, content=content, mode="delete")
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (
writelines_content[0],
expected,
)
def test_line_replace(tempfile_name, get_body):
"""
Test for file.line for replacement of specific line
:return:
"""
file_content = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/pepper",
" - /srv/sugar",
]
)
file_modified = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/natrium-chloride",
" - /srv/sugar",
]
)
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
for match in ["/srv/pepper", "/srv/pepp*", "/srv/p.*", "/sr.*pe.*"]:
files_fopen = mock_open(read_data=file_content)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", files_fopen), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name,
content="- /srv/natrium-chloride",
match=match,
mode="replace",
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (
writelines_content[0],
expected,
)
| 32.548152 | 122 | 0.609141 | """
Unit tests for file.line
"""
import logging
import os
import shutil
import pytest
import salt.config
import salt.loader
import salt.modules.cmdmod as cmdmod
import salt.modules.config as configmod
import salt.modules.file as filemod
import salt.utils.data
import salt.utils.files
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
from tests.support.mock import DEFAULT, MagicMock, mock_open, patch
log = logging.getLogger(__name__)
class DummyStat:
st_mode = 33188
st_ino = 115331251
st_dev = 44
st_nlink = 1
st_uid = 99200001
st_gid = 99200001
st_size = 41743
st_atime = 1552661253
st_mtime = 1552661253
st_ctime = 1552661253
@pytest.fixture
def anyattr():
class AnyAttr:
def __getattr__(self, item):
return 0
def __call__(self, *args, **kwargs):
return self
return AnyAttr()
@pytest.fixture
def configure_loader_modules():
return {
filemod: {
"__salt__": {
"config.manage_mode": configmod.manage_mode,
"cmd.run": cmdmod.run,
"cmd.run_all": cmdmod.run_all,
},
"__opts__": {
"test": False,
"file_roots": {"base": "tmp"},
"pillar_roots": {"base": "tmp"},
"cachedir": "tmp",
"grains": {},
},
"__grains__": {"kernel": "Linux"},
"__utils__": {"stringutils.get_diff": salt.utils.stringutils.get_diff},
}
}
@pytest.fixture
def get_body():
def _get_body(content):
"""
The body is written as bytestrings or strings depending on platform.
This func accepts a string of content and returns the appropriate list
of strings back.
"""
ret = content.splitlines(True)
return salt.utils.data.decode_list(ret, to_str=True)
return _get_body
@pytest.fixture
def tempfile_name(tmp_path):
subdir = tmp_path / "file-line-temp-dir"
subdir.mkdir()
filename = str(subdir / "file-line-temp-file")
# File needs to be created
with salt.utils.files.fopen(filename, "w"):
pass
yield filename
# We need to make sure to remove the tree we just created to avoid clashes with other tests
shutil.rmtree(str(subdir))
def test_set_line_should_raise_command_execution_error_with_no_mode():
with pytest.raises(CommandExecutionError) as err:
filemod._set_line(lines=[], mode=None)
assert str(err.value) == "Mode was not defined. How to process the file?"
def test_set_line_should_raise_command_execution_error_with_unknown_mode():
with pytest.raises(CommandExecutionError) as err:
filemod._set_line(lines=[], mode="fnord")
assert str(err.value) == "Unknown mode: fnord"
def test_if_content_is_none_and_mode_is_valid_but_not_delete_it_should_raise_command_execution_error():
valid_modes = ("insert", "ensure", "replace")
for mode in valid_modes:
with pytest.raises(CommandExecutionError) as err:
filemod._set_line(lines=[], mode=mode)
assert str(err.value) == "Content can only be empty if mode is delete"
def test_if_delete_or_replace_is_called_with_empty_lines_it_should_warn_and_return_empty_body():
for mode in ("delete", "replace"):
with patch("salt.modules.file.log.warning", MagicMock()) as fake_warn:
actual_lines = filemod._set_line(mode=mode, lines=[], content="roscivs")
assert actual_lines == []
fake_warn.assert_called_with("Cannot find text to %s. File is empty.", mode)
def test_if_mode_is_delete_and_not_before_after_or_match_then_content_should_be_used_to_delete_line():
lines = ["foo", "roscivs", "bar"]
to_remove = "roscivs"
expected_lines = ["foo", "bar"]
actual_lines = filemod._set_line(mode="delete", lines=lines, content=to_remove)
assert actual_lines == expected_lines
def test_if_mode_is_replace_and_not_before_after_or_match_and_content_exists_then_lines_should_not_change():
original_lines = ["foo", "roscivs", "bar"]
content = "roscivs"
actual_lines = filemod._set_line(
mode="replace", lines=original_lines, content=content
)
assert actual_lines == original_lines
def test_if_mode_is_replace_and_match_is_set_then_it_should_replace_the_first_match():
to_replace = "quuxy"
replacement = "roscivs"
original_lines = ["foo", to_replace, "bar"]
expected_lines = ["foo", replacement, "bar"]
actual_lines = filemod._set_line(
mode="replace",
lines=original_lines,
content=replacement,
match=to_replace,
)
assert actual_lines == expected_lines
def test_if_mode_is_replace_and_indent_is_true_then_it_should_match_indention_of_existing_line():
indents = "\t\t \t \t"
to_replace = indents + "quuxy"
replacement = "roscivs"
original_lines = ["foo", to_replace, "bar"]
expected_lines = ["foo", indents + replacement, "bar"]
actual_lines = filemod._set_line(
mode="replace",
lines=original_lines,
content=replacement,
match=to_replace,
indent=True,
)
assert actual_lines == expected_lines
def test_if_mode_is_replace_and_indent_is_false_then_it_should_just_use_content():
indents = "\t\t \t \t"
to_replace = indents + "quuxy"
replacement = "\t \t\troscivs"
original_lines = ["foo", to_replace, "bar"]
expected_lines = ["foo", replacement, "bar"]
actual_lines = filemod._set_line(
mode="replace",
lines=original_lines,
content=replacement,
match=to_replace,
indent=False,
)
assert actual_lines == expected_lines
def test_if_mode_is_insert_and_no_location_before_or_after_then_it_should_raise_command_execution_error():
with pytest.raises(CommandExecutionError) as err:
filemod._set_line(
lines=[],
content="fnord",
mode="insert",
location=None,
before=None,
after=None,
)
assert (
str(err.value)
== 'On insert either "location" or "before/after" conditions are required.'
)
def test_if_mode_is_insert_and_location_is_start_it_should_insert_content_at_start():
lines = ["foo", "bar", "bang"]
content = "roscivs"
expected_lines = [content] + lines
with patch("os.linesep", ""):
actual_lines = filemod._set_line(
lines=lines,
content=content,
mode="insert",
location="start",
)
assert actual_lines == expected_lines
def test_if_mode_is_insert_and_lines_have_eol_then_inserted_line_should_have_matching_eol():
linesep = "\r\n"
lines = ["foo" + linesep]
content = "roscivs"
expected_lines = [content + linesep] + lines
actual_lines = filemod._set_line(
lines=lines,
content=content,
mode="insert",
location="start",
)
assert actual_lines == expected_lines
def test_if_mode_is_insert_and_no_lines_then_the_content_should_have_os_linesep_added():
content = "roscivs"
fake_linesep = "\U0001f40d"
expected_lines = [content + fake_linesep]
with patch("os.linesep", fake_linesep):
actual_lines = filemod._set_line(
lines=[],
content=content,
mode="insert",
location="start",
)
assert actual_lines == expected_lines
def test_if_location_is_end_of_empty_file_then_it_should_just_be_content():
content = "roscivs"
expected_lines = [content]
actual_lines = filemod._set_line(
lines=[],
content=content,
mode="insert",
location="end",
)
assert actual_lines == expected_lines
def test_if_location_is_end_of_file_and_indent_is_True_then_line_should_match_previous_indent():
content = "roscivs"
indent = " \t\t\t "
original_lines = [indent + "fnord"]
expected_lines = original_lines + [indent + content]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="insert",
location="end",
indent=True,
)
assert actual_lines == expected_lines
def test_if_location_is_not_set_but_before_and_after_are_then_line_should_appear_as_the_line_before_before():
for indent in ("", " \t \t\t\t "):
content = "roscivs"
after = "after"
before = "before"
original_lines = ["foo", "bar", indent + after, "belowme", indent + before]
expected_lines = [
"foo",
"bar",
indent + after,
"belowme",
indent + content,
indent + before,
]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="insert",
location=None,
before=before,
after=after,
)
assert actual_lines == expected_lines
def test_insert_with_after_and_before_with_no_location_should_indent_to_match_before_indent():
for indent in ("", " \t \t\t\t "):
content = "roscivs"
after = "after"
before = "before"
original_lines = [
"foo",
"bar",
indent + after,
"belowme",
(indent * 2) + before,
]
expected_lines = [
"foo",
"bar",
indent + after,
"belowme",
(indent * 2) + content,
(indent * 2) + before,
]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="insert",
location=None,
before=before,
after=after,
)
assert actual_lines == expected_lines
def test_if_not_location_but_before_and_after_and_more_than_one_after_it_should_CommandExecutionError():
after = "one"
before = "two"
original_lines = [after, after, after, after, before]
with pytest.raises(CommandExecutionError) as err:
filemod._set_line(
lines=original_lines,
content="fnord",
mode="insert",
location=None,
before=before,
after=after,
)
assert (
str(err.value) == 'Found more than expected occurrences in "after" expression'
)
def test_if_not_location_but_before_and_after_and_more_than_one_before_it_should_CommandExecutionError():
after = "one"
before = "two"
original_lines = [after, before, before, before]
with pytest.raises(CommandExecutionError) as err:
filemod._set_line(
lines=original_lines,
content="fnord",
mode="insert",
location=None,
before=before,
after=after,
)
assert (
str(err.value) == 'Found more than expected occurrences in "before" expression'
)
def test_if_not_location_or_before_but_after_and_after_has_more_than_one_it_should_CommandExecutionError():
location = None
before = None
after = "after"
original_lines = [after, after, after]
with pytest.raises(CommandExecutionError) as err:
filemod._set_line(
lines=original_lines,
content="fnord",
mode="insert",
location=location,
before=before,
after=after,
)
assert (
str(err.value) == 'Found more than expected occurrences in "after" expression'
)
def test_if_not_location_or_after_but_before_and_before_has_more_than_one_it_should_CommandExecutionError():
location = None
before = "before"
after = None
original_lines = [before, before, before]
with pytest.raises(CommandExecutionError) as err:
filemod._set_line(
lines=original_lines,
content="fnord",
mode="insert",
location=location,
before=before,
after=after,
)
assert (
str(err.value) == 'Found more than expected occurrences in "before" expression'
)
def test_if_not_location_or_after_and_no_before_in_lines_it_should_CommandExecutionError():
location = None
before = "before"
after = None
original_lines = ["fnord", "fnord"]
with pytest.raises(CommandExecutionError) as err:
filemod._set_line(
lines=original_lines,
content="fnord",
mode="insert",
location=location,
before=before,
after=after,
)
assert str(err.value) == "Neither before or after was found in file"
def test_if_not_location_or_before_and_no_after_in_lines_it_should_CommandExecutionError():
location = None
before = None
after = "after"
original_lines = ["fnord", "fnord"]
with pytest.raises(CommandExecutionError) as err:
filemod._set_line(
lines=original_lines,
content="fnord",
mode="insert",
location=location,
before=before,
after=after,
)
assert str(err.value) == "Neither before or after was found in file"
def test_if_not_location_or_before_but_after_then_line_should_be_inserted_after_after():
location = before = None
after = "indessed"
content = "roscivs"
indent = "\t\t\t "
original_lines = ["foo", indent + after, "bar"]
expected_lines = ["foo", indent + after, indent + content, "bar"]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="insert",
location=location,
before=before,
after=after,
)
assert actual_lines == expected_lines
def test_insert_with_after_should_ignore_line_endings_on_comparison():
after = "after"
content = "roscivs"
line_endings = "\r\n\r\n"
original_lines = [after, content + line_endings]
actual_lines = filemod._set_line(
lines=original_lines[:],
content=content,
mode="insert",
after=after,
)
assert actual_lines == original_lines
def test_insert_with_before_should_ignore_line_endings_on_comparison():
before = "before"
content = "bottia"
line_endings = "\r\n\r\n"
original_lines = [content + line_endings, before]
actual_lines = filemod._set_line(
lines=original_lines[:],
content=content,
mode="insert",
before=before,
)
assert actual_lines == original_lines
def test_if_not_location_or_before_but_after_and_indent_False_then_line_should_be_inserted_after_after_without_indent():
location = before = None
after = "indessed"
content = "roscivs"
indent = "\t\t\t "
original_lines = ["foo", indent + after, "bar"]
expected_lines = ["foo", indent + after, content, "bar"]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="insert",
location=location,
before=before,
after=after,
indent=False,
)
assert actual_lines == expected_lines
def test_if_not_location_or_after_but_before_then_line_should_be_inserted_before_before():
location = after = None
before = "indessed"
content = "roscivs"
indent = "\t\t\t "
original_lines = [indent + "foo", indent + before, "bar"]
expected_lines = [indent + "foo", indent + content, indent + before, "bar"]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="insert",
location=location,
before=before,
after=after,
)
assert actual_lines == expected_lines
def test_if_not_location_or_after_but_before_and_indent_False_then_line_should_be_inserted_before_before_without_indent():
location = after = None
before = "indessed"
content = "roscivs"
indent = "\t\t\t "
original_lines = [indent + "foo", before, "bar"]
expected_lines = [indent + "foo", content, before, "bar"]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="insert",
location=location,
before=before,
after=after,
indent=False,
)
assert actual_lines == expected_lines
def test_insert_after_the_last_line_should_work():
location = before = None
after = "indessed"
content = "roscivs"
original_lines = [after]
expected_lines = [after, content]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="insert",
location=location,
before=before,
after=after,
indent=True,
)
assert actual_lines == expected_lines
def test_insert_should_work_just_like_ensure_on_before():
# I'm pretty sure that this is or should be a bug, but that
# is how things currently work, so I'm calling it out here.
#
# If that should change, then this test should change.
before = "indessed"
content = "roscivs"
original_lines = [content, before]
actual_lines = filemod._set_line(
lines=original_lines[:],
content=content,
mode="insert",
before=before,
)
assert actual_lines == original_lines
def test_insert_should_work_just_like_ensure_on_after():
# I'm pretty sure that this is or should be a bug, but that
# is how things currently work, so I'm calling it out here.
#
# If that should change, then this test should change.
after = "indessed"
content = "roscivs"
original_lines = [after, content]
actual_lines = filemod._set_line(
# If we don't pass in a copy of the lines then it modifies
# them, and our test fails. Oops.
lines=original_lines[:],
content=content,
mode="insert",
after=after,
)
assert actual_lines == original_lines
def test_insert_before_the_first_line_should_work():
location = after = None
before = "indessed"
content = "roscivs"
original_lines = [before]
expected_lines = [content, before]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="insert",
location=location,
before=before,
after=after,
indent=True,
)
assert actual_lines == expected_lines
def test_ensure_with_before_and_too_many_after_should_CommandExecutionError():
location = None
before = "before"
after = "after"
lines = [after, after, before]
content = "fnord"
with pytest.raises(CommandExecutionError) as err:
filemod._set_line(
lines=lines,
content=content,
mode="ensure",
location=location,
before=before,
after=after,
)
assert (
str(err.value) == 'Found more than expected occurrences in "after" expression'
)
def test_ensure_with_too_many_after_should_CommandExecutionError():
after = "fnord"
bad_lines = [after, after]
with pytest.raises(CommandExecutionError) as err:
filemod._set_line(
lines=bad_lines,
content="asdf",
after=after,
mode="ensure",
)
assert (
str(err.value) == 'Found more than expected occurrences in "after" expression'
)
def test_ensure_with_after_and_too_many_before_should_CommandExecutionError():
location = None
before = "before"
after = "after"
lines = [after, before, before]
content = "fnord"
with pytest.raises(CommandExecutionError) as err:
filemod._set_line(
lines=lines,
content=content,
mode="ensure",
location=location,
before=before,
after=after,
)
assert (
str(err.value) == 'Found more than expected occurrences in "before" expression'
)
def test_ensure_with_too_many_before_should_CommandExecutionError():
before = "fnord"
bad_lines = [before, before]
with pytest.raises(CommandExecutionError) as err:
filemod._set_line(
lines=bad_lines,
content="asdf",
before=before,
mode="ensure",
)
assert (
str(err.value) == 'Found more than expected occurrences in "before" expression'
)
def test_ensure_with_before_and_after_that_already_contains_the_line_should_return_original_info():
before = "before"
after = "after"
content = "roscivs"
original_lines = [after, content, before]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="ensure",
after=after,
before=before,
)
assert actual_lines == original_lines
def test_ensure_with_too_many_lines_between_before_and_after_should_CommandExecutionError():
before = "before"
after = "after"
content = "roscivs"
original_lines = [after, "fnord", "fnord", before]
with pytest.raises(CommandExecutionError) as err:
filemod._set_line(
lines=original_lines,
content=content,
mode="ensure",
after=after,
before=before,
)
assert (
str(err.value)
== 'Found more than one line between boundaries "before" and "after".'
)
def test_ensure_with_no_lines_between_before_and_after_should_insert_a_line():
for indent in ("", " \t \t\t\t "):
before = "before"
after = "after"
content = "roscivs"
original_lines = [indent + after, before]
expected_lines = [indent + after, indent + content, before]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
before=before,
after=after,
mode="ensure",
indent=True,
)
assert actual_lines == expected_lines
def test_ensure_with_existing_but_different_line_should_set_the_line():
for indent in ("", " \t \t\t\t "):
before = "before"
after = "after"
content = "roscivs"
original_lines = [indent + after, "fnord", before]
expected_lines = [indent + after, indent + content, before]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
before=before,
after=after,
mode="ensure",
indent=True,
)
assert actual_lines == expected_lines
def test_ensure_with_after_and_existing_content_should_return_same_lines():
for indent in ("", " \t \t\t\t "):
before = None
after = "after"
content = "roscivs"
original_lines = [indent + after, indent + content, "fnord"]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
before=before,
after=after,
mode="ensure",
indent=True,
)
assert actual_lines == original_lines
def test_ensure_with_after_and_missing_content_should_add_it():
for indent in ("", " \t \t\t\t "):
before = None
after = "after"
content = "roscivs"
original_lines = [indent + after, "more fnord", "fnord"]
expected_lines = [indent + after, indent + content, "more fnord", "fnord"]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
before=before,
after=after,
mode="ensure",
indent=True,
)
assert actual_lines == expected_lines
def test_ensure_with_after_and_content_at_the_end_should_not_add_duplicate():
after = "after"
content = "roscivs"
original_lines = [after, content + "\n"]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
after=after,
mode="ensure",
)
assert actual_lines == original_lines
def test_ensure_with_before_and_missing_content_should_add_it():
for indent in ("", " \t \t\t\t "):
before = "before"
after = None
content = "roscivs"
original_lines = [indent + "fnord", indent + "fnord", before]
expected_lines = [
indent + "fnord",
indent + "fnord",
indent + content,
before,
]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
before=before,
after=after,
mode="ensure",
indent=True,
)
assert actual_lines == expected_lines
def test_ensure_with_before_and_existing_content_should_return_same_lines():
for indent in ("", " \t \t\t\t "):
before = "before"
after = None
content = "roscivs"
original_lines = [indent + "fnord", indent + content, before]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
before=before,
after=after,
mode="ensure",
indent=True,
)
assert actual_lines == original_lines
def test_ensure_without_before_and_after_should_CommandExecutionError():
before = "before"
after = "after"
bad_lines = ["fnord", "fnord1", "fnord2"]
with pytest.raises(CommandExecutionError) as err:
filemod._set_line(
lines=bad_lines,
before=before,
after=after,
content="aardvark",
mode="ensure",
)
assert (
str(err.value)
== "Wrong conditions? Unable to ensure line without knowing where"
" to put it before and/or after."
)
@patch("os.path.realpath", MagicMock(wraps=lambda x: x))
@patch("os.path.isfile", MagicMock(return_value=True))
def test_delete_line_in_empty_file(anyattr):
"""
Tests that when calling file.line with ``mode=delete``,
the function doesn't stack trace if the file is empty.
Should return ``False``.
See Issue #38438.
"""
for mode in ["delete", "replace"]:
_log = MagicMock()
with patch("salt.utils.files.fopen", mock_open(read_data="")), patch(
"os.stat", anyattr
), patch("salt.modules.file.log", _log):
assert not filemod.line(
"/dummy/path", content="foo", match="bar", mode=mode
)
warning_call = _log.warning.call_args_list[0][0]
warning_log_msg = warning_call[0] % warning_call[1:]
assert "Cannot find text to {}".format(mode) in warning_log_msg
@patch("os.path.realpath", MagicMock())
@patch("os.path.isfile", MagicMock(return_value=True))
@patch("os.stat", MagicMock())
def test_line_delete_no_match():
"""
Tests that when calling file.line with ``mode=delete``,
with not matching pattern to delete returns False
:return:
"""
file_content = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/custom"]
)
match = "not matching"
for mode in ["delete", "replace"]:
files_fopen = mock_open(read_data=file_content)
with patch("salt.utils.files.fopen", files_fopen):
atomic_opener = mock_open()
with patch("salt.utils.atomicfile.atomic_open", atomic_opener):
assert not filemod.line("foo", content="foo", match=match, mode=mode)
@patch("os.path.realpath", MagicMock(wraps=lambda x: x))
@patch("os.path.isfile", MagicMock(return_value=True))
def test_line_modecheck_failure():
"""
Test for file.line for empty or wrong mode.
Calls unknown or empty mode and expects failure.
:return:
"""
for mode, err_msg in [
(None, "How to process the file"),
("nonsense", "Unknown mode"),
]:
with pytest.raises(CommandExecutionError) as exc_info:
filemod.line("foo", mode=mode)
assert err_msg in str(exc_info.value)
@patch("os.path.realpath", MagicMock(wraps=lambda x: x))
@patch("os.path.isfile", MagicMock(return_value=True))
def test_line_no_content():
"""
Test for file.line for an empty content when not deleting anything.
:return:
"""
for mode in ["insert", "ensure", "replace"]:
with pytest.raises(CommandExecutionError) as exc_info:
filemod.line("foo", mode=mode)
assert 'Content can only be empty if mode is "delete"' in str(exc_info.value)
@patch("os.path.realpath", MagicMock(wraps=lambda x: x))
@patch("os.path.isfile", MagicMock(return_value=True))
@patch("os.stat", MagicMock())
def test_line_insert_no_location_no_before_no_after():
"""
Test for file.line for insertion but define no location/before/after.
:return:
"""
files_fopen = mock_open(read_data="test data")
with patch("salt.utils.files.fopen", files_fopen):
with pytest.raises(CommandExecutionError) as exc_info:
filemod.line("foo", content="test content", mode="insert")
assert '"location" or "before/after"' in str(exc_info.value)
def test_line_insert_after_no_pattern(tempfile_name, get_body):
"""
Test for file.line for insertion after specific line, using no pattern.
See issue #38670
:return:
"""
file_content = os.linesep.join(["file_roots:", " base:", " - /srv/salt"])
file_modified = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/custom"]
)
cfg_content = "- /srv/custom"
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name, content=cfg_content, after="- /srv/salt", mode="insert"
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
def test_line_insert_after_pattern(tempfile_name, get_body):
"""
Test for file.line for insertion after specific line, using pattern.
See issue #38670
:return:
"""
file_content = os.linesep.join(
[
"file_boots:",
" - /rusty",
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/sugar",
]
)
file_modified = os.linesep.join(
[
"file_boots:",
" - /rusty",
"file_roots:",
" custom:",
" - /srv/custom",
" base:",
" - /srv/salt",
" - /srv/sugar",
]
)
cfg_content = os.linesep.join([" custom:", " - /srv/custom"])
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
for after_line in ["file_r.*", ".*roots"]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name,
content=cfg_content,
after=after_line,
mode="insert",
indent=False,
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
# We passed cfg_content with a newline in the middle, so it
# will be written as two lines in the same element of the list
# passed to .writelines()
expected[3] = expected[3] + expected.pop(4)
assert writelines_content[0] == expected, (
writelines_content[0],
expected,
)
def test_line_insert_multi_line_content_after_unicode(tempfile_name, get_body):
"""
Test for file.line for insertion after specific line with Unicode
See issue #48113
:return:
"""
file_content = "This is a line{}This is another line".format(os.linesep)
file_modified = salt.utils.stringutils.to_str(
"This is a line{}"
"This is another line{}"
"This is a line with unicode Ŷ".format(os.linesep, os.linesep)
)
cfg_content = "This is a line with unicode Ŷ"
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
for after_line in ["This is another line"]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name,
content=cfg_content,
after=after_line,
mode="insert",
indent=False,
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (
writelines_content[0],
expected,
)
def test_line_insert_before(tempfile_name, get_body):
"""
Test for file.line for insertion before specific line, using pattern and no patterns.
See issue #38670
:return:
"""
file_content = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/sugar"]
)
file_modified = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/custom",
" - /srv/salt",
" - /srv/sugar",
]
)
cfg_content = "- /srv/custom"
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
for before_line in ["/srv/salt", "/srv/sa.*t"]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name, content=cfg_content, before=before_line, mode="insert"
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
# assert writelines_content[0] == expected, (writelines_content[0], expected)
assert writelines_content[0] == expected
@patch("os.path.realpath", MagicMock(wraps=lambda x: x))
@patch("os.path.isfile", MagicMock(return_value=True))
@patch("os.stat", MagicMock())
def test_line_assert_exception_pattern():
"""
Test for file.line for exception on insert with too general pattern.
:return:
"""
file_content = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/sugar"]
)
cfg_content = "- /srv/custom"
for before_line in ["/sr.*"]:
files_fopen = mock_open(read_data=file_content)
with patch("salt.utils.files.fopen", files_fopen):
atomic_opener = mock_open()
with patch("salt.utils.atomicfile.atomic_open", atomic_opener):
with pytest.raises(CommandExecutionError) as cm:
filemod.line(
"foo",
content=cfg_content,
before=before_line,
mode="insert",
)
assert (
str(cm.value)
== 'Found more than expected occurrences in "before" expression'
)
def test_line_insert_before_after(tempfile_name, get_body):
"""
Test for file.line for insertion before specific line, using pattern and no patterns.
See issue #38670
:return:
"""
file_content = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/pepper",
" - /srv/sugar",
]
)
file_modified = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/pepper",
" - /srv/coriander",
" - /srv/sugar",
]
)
cfg_content = "- /srv/coriander"
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
for b_line, a_line in [("/srv/sugar", "/srv/salt")]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name,
content=cfg_content,
before=b_line,
after=a_line,
mode="insert",
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected
def test_line_insert_start(tempfile_name, get_body):
"""
Test for file.line for insertion at the beginning of the file
:return:
"""
cfg_content = "everything: fantastic"
file_content = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/sugar"]
)
file_modified = os.linesep.join(
[
cfg_content,
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/sugar",
]
)
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name, content=cfg_content, location="start", mode="insert"
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
def test_line_insert_end(tempfile_name, get_body):
"""
Test for file.line for insertion at the end of the file (append)
:return:
"""
cfg_content = "everything: fantastic"
file_content = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/sugar"]
)
file_modified = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/sugar",
" " + cfg_content,
]
)
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(tempfile_name, content=cfg_content, location="end", mode="insert")
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
def test_line_insert_ensure_before(tempfile_name, get_body):
"""
Test for file.line for insertion ensuring the line is before
:return:
"""
cfg_content = "/etc/init.d/someservice restart"
file_content = os.linesep.join(["#!/bin/bash", "", "exit 0"])
file_modified = os.linesep.join(["#!/bin/bash", "", cfg_content, "exit 0"])
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(tempfile_name, content=cfg_content, before="exit 0", mode="ensure")
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
def test_line_insert_duplicate_ensure_before(tempfile_name):
"""
Test for file.line for insertion ensuring the line is before
:return:
"""
cfg_content = "/etc/init.d/someservice restart"
file_content = os.linesep.join(["#!/bin/bash", "", cfg_content, "exit 0"])
file_modified = os.linesep.join(["#!/bin/bash", "", cfg_content, "exit 0"])
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(tempfile_name, content=cfg_content, before="exit 0", mode="ensure")
# If file not modified no handlers in dict
assert atomic_open_mock.filehandles.get(tempfile_name) is None
def test_line_insert_ensure_before_first_line(tempfile_name, get_body):
"""
Test for file.line for insertion ensuring the line is before first line
:return:
"""
cfg_content = "#!/bin/bash"
file_content = os.linesep.join(["/etc/init.d/someservice restart", "exit 0"])
file_modified = os.linesep.join(
[cfg_content, "/etc/init.d/someservice restart", "exit 0"]
)
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name,
content=cfg_content,
before="/etc/init.d/someservice restart",
mode="ensure",
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
def test_line_insert_ensure_after(tempfile_name, get_body):
"""
Test for file.line for insertion ensuring the line is after
:return:
"""
cfg_content = "exit 0"
file_content = os.linesep.join(["#!/bin/bash", "/etc/init.d/someservice restart"])
file_modified = os.linesep.join(
["#!/bin/bash", "/etc/init.d/someservice restart", cfg_content]
)
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name,
content=cfg_content,
after="/etc/init.d/someservice restart",
mode="ensure",
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
def test_line_insert_duplicate_ensure_after(tempfile_name):
"""
Test for file.line for insertion ensuring the line is after
:return:
"""
cfg_content = "exit 0"
file_content = os.linesep.join(
["#!/bin/bash", "/etc/init.d/someservice restart", cfg_content]
)
file_modified = os.linesep.join(
["#!/bin/bash", "/etc/init.d/someservice restart", cfg_content]
)
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name,
content=cfg_content,
after="/etc/init.d/someservice restart",
mode="ensure",
)
# If file not modified no handlers in dict
assert atomic_open_mock.filehandles.get(tempfile_name) is None
def test_line_insert_ensure_beforeafter_twolines(tempfile_name, get_body):
"""
Test for file.line for insertion ensuring the line is between two lines
:return:
"""
cfg_content = 'EXTRA_GROUPS="dialout cdrom floppy audio video plugdev users"'
# pylint: disable=W1401
file_content = os.linesep.join(
[
r'NAME_REGEX="^[a-z][-a-z0-9_]*\$"',
'SKEL_IGNORE_REGEX="dpkg-(old|new|dist|save)"',
]
)
# pylint: enable=W1401
after, before = file_content.split(os.linesep)
file_modified = os.linesep.join([after, cfg_content, before])
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
for (_after, _before) in [(after, before), ("NAME_.*", "SKEL_.*")]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name,
content=cfg_content,
after=_after,
before=_before,
mode="ensure",
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (
writelines_content[0],
expected,
)
def test_line_insert_ensure_beforeafter_twolines_exists(tempfile_name):
"""
Test for file.line for insertion ensuring the line is between two lines
where content already exists
"""
cfg_content = 'EXTRA_GROUPS="dialout"'
# pylint: disable=W1401
file_content = os.linesep.join(
[
r'NAME_REGEX="^[a-z][-a-z0-9_]*\$"',
'EXTRA_GROUPS="dialout"',
'SKEL_IGNORE_REGEX="dpkg-(old|new|dist|save)"',
]
)
# pylint: enable=W1401
after, before = (
file_content.split(os.linesep)[0],
file_content.split(os.linesep)[2],
)
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
for (_after, _before) in [(after, before), ("NAME_.*", "SKEL_.*")]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
result = filemod.line(
"foo",
content=cfg_content,
after=_after,
before=_before,
mode="ensure",
)
# We should not have opened the file
assert not atomic_open_mock.filehandles
# No changes should have been made
assert result is False
@patch("os.path.realpath", MagicMock(wraps=lambda x: x))
@patch("os.path.isfile", MagicMock(return_value=True))
@patch("os.stat", MagicMock())
def test_line_insert_ensure_beforeafter_rangelines():
"""
Test for file.line for insertion ensuring the line is between two lines
within the range. This expected to bring no changes.
"""
cfg_content = 'EXTRA_GROUPS="dialout cdrom floppy audio video plugdev users"'
# pylint: disable=W1401
file_content = (
r'NAME_REGEX="^[a-z][-a-z0-9_]*\$"{}SETGID_HOME=no{}ADD_EXTRA_GROUPS=1{}'
'SKEL_IGNORE_REGEX="dpkg-(old|new|dist|save)"'.format(
os.linesep, os.linesep, os.linesep
)
)
# pylint: enable=W1401
after, before = (
file_content.split(os.linesep)[0],
file_content.split(os.linesep)[-1],
)
for (_after, _before) in [(after, before), ("NAME_.*", "SKEL_.*")]:
files_fopen = mock_open(read_data=file_content)
with patch("salt.utils.files.fopen", files_fopen):
atomic_opener = mock_open()
with patch("salt.utils.atomicfile.atomic_open", atomic_opener):
with pytest.raises(CommandExecutionError) as exc_info:
filemod.line(
"foo",
content=cfg_content,
after=_after,
before=_before,
mode="ensure",
)
assert (
'Found more than one line between boundaries "before" and "after"'
in str(exc_info.value)
)
def test_line_delete(tempfile_name, get_body):
"""
Test for file.line for deletion of specific line
:return:
"""
file_content = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/pepper",
" - /srv/sugar",
]
)
file_modified = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/sugar"]
)
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
for content in ["/srv/pepper", "/srv/pepp*", "/srv/p.*", "/sr.*pe.*"]:
files_fopen = mock_open(read_data=file_content)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", files_fopen), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(tempfile_name, content=content, mode="delete")
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (
writelines_content[0],
expected,
)
def test_line_replace(tempfile_name, get_body):
"""
Test for file.line for replacement of specific line
:return:
"""
file_content = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/pepper",
" - /srv/sugar",
]
)
file_modified = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/natrium-chloride",
" - /srv/sugar",
]
)
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
for match in ["/srv/pepper", "/srv/pepp*", "/srv/p.*", "/sr.*pe.*"]:
files_fopen = mock_open(read_data=file_content)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", files_fopen), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name,
content="- /srv/natrium-chloride",
match=match,
mode="replace",
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (
writelines_content[0],
expected,
)
| 23,875 | 213 | 1,192 |
ed835385395a5601164493a32dc19585e695d9d6 | 4,161 | py | Python | delta/utils/loss/loss_utils.py | awesome-archive/delta | 841d853cf0bdb479260be112432813dcb705f859 | [
"Apache-2.0"
] | 1 | 2019-09-27T10:21:00.000Z | 2019-09-27T10:21:00.000Z | delta/utils/loss/loss_utils.py | awesome-archive/delta | 841d853cf0bdb479260be112432813dcb705f859 | [
"Apache-2.0"
] | null | null | null | delta/utils/loss/loss_utils.py | awesome-archive/delta | 841d853cf0bdb479260be112432813dcb705f859 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
''' loss implementation function '''
import tensorflow as tf
from delta import utils
#pylint: disable=too-many-arguments
def cross_entropy(logits,
labels,
input_length=None,
label_length=None,
smoothing=0.0,
reduction=tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS):
'''
cross entropy function for classfication and seq classfication
:param, label_length, for seq task, this for target seq length, e.g. a b c </s>, 4
'''
del input_length
onehot_labels = tf.cond(
pred=tf.equal(tf.rank(logits) - tf.rank(labels), 1),
true_fn=lambda: tf.one_hot(labels, tf.shape(logits)[-1], dtype=tf.int32),
false_fn=lambda: labels)
if label_length is not None:
weights = utils.len_to_mask(label_length)
else:
weights = 1.0
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels,
logits=logits,
weights=weights,
label_smoothing=smoothing,
reduction=reduction)
return loss
def ctc_lambda_loss(logits, labels, input_length, label_length, blank_index=0):
'''
ctc loss function
psram: logits, (B, T, D)
psram: input_length, (B, 1), input length of encoder
psram: labels, (B, T)
psram: label_length, (B, 1), label length for convert dense label to sparse
returns: loss, scalar
'''
ilen = tf.cond(
pred=tf.equal(tf.rank(input_length), 1),
true_fn=lambda: input_length,
false_fn=lambda: tf.squeeze(input_length),
)
olen = tf.cond(
pred=tf.equal(tf.rank(label_length), 1),
true_fn=lambda: label_length,
false_fn=lambda: tf.squeeze(label_length))
deps = [
tf.assert_rank(labels, 2),
tf.assert_rank(logits, 3),
tf.assert_rank(ilen, 1), # input_length
tf.assert_rank(olen, 1), # output_length
]
with tf.control_dependencies(deps):
# (B, 1)
# blank index is consistent with Espnet, zero
batch_loss = tf.nn.ctc_loss_v2(
labels,
logits,
ilen,
olen,
logits_time_major=False,
blank_index=blank_index)
batch_loss.set_shape([None])
return batch_loss
def crf_log_likelihood(tags_scores, labels, input_length, transitions):
'''
:param tags_scores: [batch_size, max_seq_len, num_tags]
:param labels: [batch_size, max_seq_len]
:param input_length: [batch_size,]
:param transitions: [num_tags, num_tags]
:return: loss, transition_params
'''
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
inputs=tags_scores,
tag_indices=labels,
sequence_lengths=input_length,
transition_params=transitions)
loss = tf.reduce_mean(-log_likelihood)
return loss, transition_params
def mask_sequence_loss(logits,
labels,
input_length,
label_length,
smoothing=0.0):
'''
softmax cross entropy loss for sequence to sequence
:param logits: [batch_size, max_seq_len, vocab_size]
:param labels: [batch_size, max_seq_len]
:param input_length: [batch_size]
:param label_length: [batch_size]
:return: loss, scalar
'''
del smoothing
del input_length
if label_length is not None:
weights = tf.cast(utils.len_to_mask(label_length), tf.float32)
else:
weights = tf.ones_like(labels)
loss = tf.contrib.seq2seq.sequence_loss(logits, labels, weights)
return loss
| 30.822222 | 84 | 0.666667 | # Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
''' loss implementation function '''
import tensorflow as tf
from delta import utils
#pylint: disable=too-many-arguments
def cross_entropy(logits,
labels,
input_length=None,
label_length=None,
smoothing=0.0,
reduction=tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS):
'''
cross entropy function for classfication and seq classfication
:param, label_length, for seq task, this for target seq length, e.g. a b c </s>, 4
'''
del input_length
onehot_labels = tf.cond(
pred=tf.equal(tf.rank(logits) - tf.rank(labels), 1),
true_fn=lambda: tf.one_hot(labels, tf.shape(logits)[-1], dtype=tf.int32),
false_fn=lambda: labels)
if label_length is not None:
weights = utils.len_to_mask(label_length)
else:
weights = 1.0
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels,
logits=logits,
weights=weights,
label_smoothing=smoothing,
reduction=reduction)
return loss
def ctc_lambda_loss(logits, labels, input_length, label_length, blank_index=0):
'''
ctc loss function
psram: logits, (B, T, D)
psram: input_length, (B, 1), input length of encoder
psram: labels, (B, T)
psram: label_length, (B, 1), label length for convert dense label to sparse
returns: loss, scalar
'''
ilen = tf.cond(
pred=tf.equal(tf.rank(input_length), 1),
true_fn=lambda: input_length,
false_fn=lambda: tf.squeeze(input_length),
)
olen = tf.cond(
pred=tf.equal(tf.rank(label_length), 1),
true_fn=lambda: label_length,
false_fn=lambda: tf.squeeze(label_length))
deps = [
tf.assert_rank(labels, 2),
tf.assert_rank(logits, 3),
tf.assert_rank(ilen, 1), # input_length
tf.assert_rank(olen, 1), # output_length
]
with tf.control_dependencies(deps):
# (B, 1)
# blank index is consistent with Espnet, zero
batch_loss = tf.nn.ctc_loss_v2(
labels,
logits,
ilen,
olen,
logits_time_major=False,
blank_index=blank_index)
batch_loss.set_shape([None])
return batch_loss
def crf_log_likelihood(tags_scores, labels, input_length, transitions):
'''
:param tags_scores: [batch_size, max_seq_len, num_tags]
:param labels: [batch_size, max_seq_len]
:param input_length: [batch_size,]
:param transitions: [num_tags, num_tags]
:return: loss, transition_params
'''
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
inputs=tags_scores,
tag_indices=labels,
sequence_lengths=input_length,
transition_params=transitions)
loss = tf.reduce_mean(-log_likelihood)
return loss, transition_params
def mask_sequence_loss(logits,
labels,
input_length,
label_length,
smoothing=0.0):
'''
softmax cross entropy loss for sequence to sequence
:param logits: [batch_size, max_seq_len, vocab_size]
:param labels: [batch_size, max_seq_len]
:param input_length: [batch_size]
:param label_length: [batch_size]
:return: loss, scalar
'''
del smoothing
del input_length
if label_length is not None:
weights = tf.cast(utils.len_to_mask(label_length), tf.float32)
else:
weights = tf.ones_like(labels)
loss = tf.contrib.seq2seq.sequence_loss(logits, labels, weights)
return loss
| 0 | 0 | 0 |
83ba14a36750a86a9eb2314b57f509887f4e4346 | 2,720 | py | Python | IO/Legacy/Testing/Python/TestExtentWriting.py | forestGzh/VTK | bc98327275bd5cfa95c5825f80a2755a458b6da8 | [
"BSD-3-Clause"
] | 1,755 | 2015-01-03T06:55:00.000Z | 2022-03-29T05:23:26.000Z | IO/Legacy/Testing/Python/TestExtentWriting.py | forestGzh/VTK | bc98327275bd5cfa95c5825f80a2755a458b6da8 | [
"BSD-3-Clause"
] | 29 | 2015-04-23T20:58:30.000Z | 2022-03-02T16:16:42.000Z | IO/Legacy/Testing/Python/TestExtentWriting.py | forestGzh/VTK | bc98327275bd5cfa95c5825f80a2755a458b6da8 | [
"BSD-3-Clause"
] | 1,044 | 2015-01-05T22:48:27.000Z | 2022-03-31T02:38:26.000Z | #!/usr/bin/env python
import os
import vtk
#
# If the current directory is writable, then test the witers
#
try:
channel = open("test.tmp", "w")
channel.close()
os.remove("test.tmp")
s = vtk.vtkRTAnalyticSource()
s.SetWholeExtent(5, 10, 5, 10, 5, 10)
s.Update()
d = s.GetOutput()
w = vtk.vtkStructuredPointsWriter()
w.SetInputData(d)
w.SetFileName("test-dim.vtk")
w.Write()
r = vtk.vtkStructuredPointsReader()
r.SetFileName("test-dim.vtk")
r.Update()
os.remove("test-dim.vtk")
assert(r.GetOutput().GetExtent() == (0,5,0,5,0,5))
assert(r.GetOutput().GetOrigin() == (5, 5, 5))
w.SetInputData(d)
w.SetFileName("test-dim.vtk")
w.SetWriteExtent(True)
w.Write()
r.Modified()
r.Update()
os.remove("test-dim.vtk")
assert(r.GetOutput().GetExtent() == (5,10,5,10,5,10))
assert(r.GetOutput().GetOrigin() == (0, 0, 0))
rg = vtk.vtkRectilinearGrid()
extents = (1, 3, 1, 3, 1, 3)
rg.SetExtent(extents)
pts = vtk.vtkFloatArray()
pts.InsertNextTuple1(0)
pts.InsertNextTuple1(1)
pts.InsertNextTuple1(2)
rg.SetXCoordinates(pts)
rg.SetYCoordinates(pts)
rg.SetZCoordinates(pts)
w = vtk.vtkRectilinearGridWriter()
w.SetInputData(rg)
w.SetFileName("test-dim.vtk")
w.Write()
r = vtk.vtkRectilinearGridReader()
r.SetFileName("test-dim.vtk")
r.Update()
os.remove("test-dim.vtk")
assert(r.GetOutput().GetExtent() == (0,2,0,2,0,2))
w.SetInputData(rg)
w.SetFileName("test-dim.vtk")
w.SetWriteExtent(True)
w.Write()
r.Modified()
r.Update()
assert(r.GetOutput().GetExtent() == (1,3,1,3,1,3))
sg = vtk.vtkStructuredGrid()
extents = (1, 3, 1, 3, 1, 3)
sg.SetExtent(extents)
ptsa = vtk.vtkFloatArray()
ptsa.SetNumberOfComponents(3)
ptsa.SetNumberOfTuples(27)
# We don't really care about point coordinates being correct
for i in range(27):
ptsa.InsertNextTuple3(0, 0, 0)
pts = vtk.vtkPoints()
pts.SetData(ptsa)
sg.SetPoints(pts)
w = vtk.vtkStructuredGridWriter()
w.SetInputData(sg)
w.SetFileName("test-dim.vtk")
w.Write()
# comment out reader part of this test as it has been failing
# for over 6 months and no one is willing to fix it
#
# r = vtk.vtkStructuredGridReader()
# r.SetFileName("test-dim.vtk")
# r.Update()
os.remove("test-dim.vtk")
# assert(r.GetOutput().GetExtent() == (0,2,0,2,0,2))
w.SetInputData(sg)
w.SetFileName("test-dim.vtk")
w.SetWriteExtent(True)
w.Write()
# r.Modified()
# r.Update()
# assert(r.GetOutput().GetExtent() == (1,3,1,3,1,3))
except IOError:
pass
| 22.113821 | 65 | 0.615074 | #!/usr/bin/env python
import os
import vtk
#
# If the current directory is writable, then test the witers
#
try:
channel = open("test.tmp", "w")
channel.close()
os.remove("test.tmp")
s = vtk.vtkRTAnalyticSource()
s.SetWholeExtent(5, 10, 5, 10, 5, 10)
s.Update()
d = s.GetOutput()
w = vtk.vtkStructuredPointsWriter()
w.SetInputData(d)
w.SetFileName("test-dim.vtk")
w.Write()
r = vtk.vtkStructuredPointsReader()
r.SetFileName("test-dim.vtk")
r.Update()
os.remove("test-dim.vtk")
assert(r.GetOutput().GetExtent() == (0,5,0,5,0,5))
assert(r.GetOutput().GetOrigin() == (5, 5, 5))
w.SetInputData(d)
w.SetFileName("test-dim.vtk")
w.SetWriteExtent(True)
w.Write()
r.Modified()
r.Update()
os.remove("test-dim.vtk")
assert(r.GetOutput().GetExtent() == (5,10,5,10,5,10))
assert(r.GetOutput().GetOrigin() == (0, 0, 0))
rg = vtk.vtkRectilinearGrid()
extents = (1, 3, 1, 3, 1, 3)
rg.SetExtent(extents)
pts = vtk.vtkFloatArray()
pts.InsertNextTuple1(0)
pts.InsertNextTuple1(1)
pts.InsertNextTuple1(2)
rg.SetXCoordinates(pts)
rg.SetYCoordinates(pts)
rg.SetZCoordinates(pts)
w = vtk.vtkRectilinearGridWriter()
w.SetInputData(rg)
w.SetFileName("test-dim.vtk")
w.Write()
r = vtk.vtkRectilinearGridReader()
r.SetFileName("test-dim.vtk")
r.Update()
os.remove("test-dim.vtk")
assert(r.GetOutput().GetExtent() == (0,2,0,2,0,2))
w.SetInputData(rg)
w.SetFileName("test-dim.vtk")
w.SetWriteExtent(True)
w.Write()
r.Modified()
r.Update()
assert(r.GetOutput().GetExtent() == (1,3,1,3,1,3))
sg = vtk.vtkStructuredGrid()
extents = (1, 3, 1, 3, 1, 3)
sg.SetExtent(extents)
ptsa = vtk.vtkFloatArray()
ptsa.SetNumberOfComponents(3)
ptsa.SetNumberOfTuples(27)
# We don't really care about point coordinates being correct
for i in range(27):
ptsa.InsertNextTuple3(0, 0, 0)
pts = vtk.vtkPoints()
pts.SetData(ptsa)
sg.SetPoints(pts)
w = vtk.vtkStructuredGridWriter()
w.SetInputData(sg)
w.SetFileName("test-dim.vtk")
w.Write()
# comment out reader part of this test as it has been failing
# for over 6 months and no one is willing to fix it
#
# r = vtk.vtkStructuredGridReader()
# r.SetFileName("test-dim.vtk")
# r.Update()
os.remove("test-dim.vtk")
# assert(r.GetOutput().GetExtent() == (0,2,0,2,0,2))
w.SetInputData(sg)
w.SetFileName("test-dim.vtk")
w.SetWriteExtent(True)
w.Write()
# r.Modified()
# r.Update()
# assert(r.GetOutput().GetExtent() == (1,3,1,3,1,3))
except IOError:
pass
| 0 | 0 | 0 |
e3b48257343ad20d6a5aa171258d96e849b9119d | 293 | py | Python | examples/sns_basic.py | sbalnojan/tropo-mods | 63ef70b84ff6d3fb2bbaeea94193e06e1fc64f63 | [
"Apache-2.0"
] | null | null | null | examples/sns_basic.py | sbalnojan/tropo-mods | 63ef70b84ff6d3fb2bbaeea94193e06e1fc64f63 | [
"Apache-2.0"
] | null | null | null | examples/sns_basic.py | sbalnojan/tropo-mods | 63ef70b84ff6d3fb2bbaeea94193e06e1fc64f63 | [
"Apache-2.0"
] | null | null | null | import tropo_mods.auto_sns as auto_sns
from troposphere import Template
t = Template()
my_instance = auto_sns.AutoSNS(
t, topic_name="my_new_topic", email="test@me.com"
)
my_instance.print_to_yaml()
# should also produce: ARN as output (?)
# should also produce: Subscription to E-Mail
| 22.538462 | 53 | 0.761092 | import tropo_mods.auto_sns as auto_sns
from troposphere import Template
t = Template()
my_instance = auto_sns.AutoSNS(
t, topic_name="my_new_topic", email="test@me.com"
)
my_instance.print_to_yaml()
# should also produce: ARN as output (?)
# should also produce: Subscription to E-Mail
| 0 | 0 | 0 |
10db872e231b76f45d443050215a531d2b410c03 | 1,638 | py | Python | rul_prediction/cnn.py | inovex/RCIS2021-degradation-bearing-vessels | 27bd1a2e3f08c5b42011596aa98e5ac627a416d6 | [
"MIT"
] | 2 | 2021-06-21T11:40:38.000Z | 2021-12-29T02:40:30.000Z | rul_prediction/cnn.py | chenzhengkun7/RCIS2021-degradation-estimation-bearing-vessels | 27bd1a2e3f08c5b42011596aa98e5ac627a416d6 | [
"MIT"
] | 2 | 2021-04-08T11:30:28.000Z | 2021-04-12T06:41:31.000Z | rul_prediction/cnn.py | chenzhengkun7/RCIS2021-degradation-estimation-bearing-vessels | 27bd1a2e3f08c5b42011596aa98e5ac627a416d6 | [
"MIT"
] | 2 | 2021-06-21T11:40:43.000Z | 2021-12-29T02:36:51.000Z | import pandas as pd
import numpy as np
from tensorflow.keras import layers, Input, Model
if __name__ == '__main__':
mod = build_multiscale_cnn((129, 21, 2))
"""
REN ET AL. 2018 FOR FEATURE EXTRACTION e.g. FREQUENCY IMAGE Generation based non FFT
ZHU ET AL. 2019 for multisclae CNN applied to PRONOSTIA
DING ET AL. 2017 for multiscale CNN architecture and WT based Image
CHEN ET AL. 2020 Another possible Frequency Image?
"""
| 37.227273 | 109 | 0.710012 | import pandas as pd
import numpy as np
from tensorflow.keras import layers, Input, Model
def build_multiscale_cnn(input_shape: tuple) -> Model:
input_img = Input(shape=input_shape)
conv_1 = layers.Conv2D(10, (6, 6))(input_img)
max_pool_1 = layers.MaxPool2D((2, 2))(conv_1)
max_pool_1 = layers.BatchNormalization()(max_pool_1)
conv_2 = layers.Conv2D(10, (6, 6))(max_pool_1)
max_pool_2 = layers.MaxPool2D((2, 2))(conv_2)
max_pool_2 = layers.BatchNormalization()(max_pool_2)
cnn = layers.Flatten()(max_pool_2)
ffnn = layers.Dense(512, activation='relu')(cnn)
ffnn = layers.Dropout(0.5)(ffnn)
ffnn = layers.Dense(512, activation='relu')(ffnn)
ffnn = layers.Dropout(0.5)(ffnn)
ffnn = layers.Dense(1)(ffnn)
return Model(input_img, ffnn)
def fit_cnn(X: np.array, y: pd.Series, epochs: int = 20, input_shape=(129, 21, 2),
validation_data=None):
cnn_model = build_multiscale_cnn(input_shape)
cnn_model.compile(optimizer='adam', loss='mean_squared_error')
if validation_data is None:
training_history = cnn_model.fit(x=X, y=y, epochs=epochs, verbose=0)
else:
training_history = cnn_model.fit(x=X, y=y, epochs=epochs, verbose=0, validation_data=validation_data)
return cnn_model, training_history
if __name__ == '__main__':
mod = build_multiscale_cnn((129, 21, 2))
"""
REN ET AL. 2018 FOR FEATURE EXTRACTION e.g. FREQUENCY IMAGE Generation based non FFT
ZHU ET AL. 2019 for multisclae CNN applied to PRONOSTIA
DING ET AL. 2017 for multiscale CNN architecture and WT based Image
CHEN ET AL. 2020 Another possible Frequency Image?
"""
| 1,159 | 0 | 46 |
76cc5c691e45f74a9fa2f2a6908c30288c9db878 | 1,726 | py | Python | FakeUAdb.py | aoii103/FakeUA | ae52d9e681b17daaa3d8f4d94a23dfe503991771 | [
"MIT"
] | 32 | 2018-11-06T09:42:58.000Z | 2020-10-29T09:05:45.000Z | FakeUAdb.py | s045pd/FakeUA | ae52d9e681b17daaa3d8f4d94a23dfe503991771 | [
"MIT"
] | null | null | null | FakeUAdb.py | s045pd/FakeUA | ae52d9e681b17daaa3d8f4d94a23dfe503991771 | [
"MIT"
] | 18 | 2019-04-09T02:59:16.000Z | 2020-08-11T15:53:26.000Z | import hashlib
from peewee import *
db = SqliteDatabase('useragents.db') # 初始化数据库
db.connect() # 连接数据库
db.create_tables([UAS]) # 初始化创建不存在的库
def UserAgent(searchwords, methods='and'):
"""
{
"key":[
"words1",
"words2"
]
}
"""
count = 0
resagent = ''
if methods not in ['and', 'or']:
return ''
methods = '&' if not methods == 'or' else '|'
whereQuery = f' {methods} '.join([
f'(UAS.{key} << {str(item)})' for key, item in searchwords.items()
])
try:
count = UAS.select().where(eval(whereQuery)).order_by(fn.Random()).count()
resagent = UAS.select().where(eval(whereQuery)).order_by(
fn.Random()).limit(1)[0].useragent
except Exception as e:
pass
return count, resagent
if __name__ == '__main__':
from pprint import pprint
print(UserAgent({
"software": [
'Android Browser 4.0'
]
}))
# pprint(UserAgentGroups('engine', 5))
| 27.396825 | 178 | 0.585747 | import hashlib
from peewee import *
db = SqliteDatabase('useragents.db') # 初始化数据库
class UAS(Model):
uid = AutoField(primary_key=True, null=True) # 自增ID
useragent = TextField(unique=True) # useragent
software = CharField(null=True) # 软件类型
engine = CharField(null=True) # 引擎
types = CharField(null=True) # 硬件类型
popularity = CharField(null=True) # 通用性
class Meta:
database = db # 指定数据库
db.connect() # 连接数据库
db.create_tables([UAS]) # 初始化创建不存在的库
def UserAgent(searchwords, methods='and'):
"""
{
"key":[
"words1",
"words2"
]
}
"""
count = 0
resagent = ''
if methods not in ['and', 'or']:
return ''
methods = '&' if not methods == 'or' else '|'
whereQuery = f' {methods} '.join([
f'(UAS.{key} << {str(item)})' for key, item in searchwords.items()
])
try:
count = UAS.select().where(eval(whereQuery)).order_by(fn.Random()).count()
resagent = UAS.select().where(eval(whereQuery)).order_by(
fn.Random()).limit(1)[0].useragent
except Exception as e:
pass
return count, resagent
def UserAgentGroups(colname, limit=10):
if colname in ['software', 'engine', 'types', 'popularity']: # 判定查询字段是否合法
target = eval(f'UAS.{colname}') # 取得目标字段类
return {eval(f'item.{colname}'): item.nums for item in UAS.select(target, fn.COUNT(target).alias('nums')).group_by(target).order_by(fn.COUNT(target).desc()).limit(limit)}
if __name__ == '__main__':
from pprint import pprint
print(UserAgent({
"software": [
'Android Browser 4.0'
]
}))
# pprint(UserAgentGroups('engine', 5))
| 359 | 356 | 46 |
9a95379a056d4588b1928abd578dfa7bdbdfc7bd | 952 | py | Python | src/sensors/mqtt.py | jussike/kuappi | 985040dc813c023dc1577f31ca7f6744d42c91de | [
"MIT"
] | null | null | null | src/sensors/mqtt.py | jussike/kuappi | 985040dc813c023dc1577f31ca7f6744d42c91de | [
"MIT"
] | null | null | null | src/sensors/mqtt.py | jussike/kuappi | 985040dc813c023dc1577f31ca7f6744d42c91de | [
"MIT"
] | null | null | null | import json
import logging
import paho.mqtt.client as mqtt
from abstract import AbstractSensor
from config import CONFIG
| 27.2 | 57 | 0.652311 | import json
import logging
import paho.mqtt.client as mqtt
from abstract import AbstractSensor
from config import CONFIG
class MqttSensor(AbstractSensor):
def __init__(self):
self.client_id = CONFIG.get('mqtt_client')
self.data = None
self.client = mqtt.Client(
CONFIG.get('mqtt_client_name')
)
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.connect("localhost", 1883, 60)
self.client.loop_start()
def get_data(self):
return self.data
def on_connect(self, client, userdata, flags, rc):
logging.info("Connected with result code %d", rc)
client.subscribe(self.client_id)
def on_message(self, client, userdata, msg):
data = json.loads(msg.payload.decode('utf-8'))
self.data = data
def cleanup(self):
self.client.loop_stop()
self.client.disconnect()
| 661 | 12 | 157 |
9e70d61c82870a3006a17805a98c695863eb7fb2 | 190 | py | Python | test_settings.py | canarduck/django-picklefield | 2c188c4815c868fce2ed38e25bc9d8d994b3d5ae | [
"MIT"
] | null | null | null | test_settings.py | canarduck/django-picklefield | 2c188c4815c868fce2ed38e25bc9d8d994b3d5ae | [
"MIT"
] | null | null | null | test_settings.py | canarduck/django-picklefield | 2c188c4815c868fce2ed38e25bc9d8d994b3d5ae | [
"MIT"
] | null | null | null | DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
INSTALLED_APPS = [
'picklefield',
]
SECRET_KEY = 'local'
SILENCED_SYSTEM_CHECKS = ['1_7.W001']
| 13.571429 | 47 | 0.6 | DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
INSTALLED_APPS = [
'picklefield',
]
SECRET_KEY = 'local'
SILENCED_SYSTEM_CHECKS = ['1_7.W001']
| 0 | 0 | 0 |
0a18960273edd755f150a40fc800e3fa2e4d2ddd | 1,013 | py | Python | jiraniapp/admin.py | Steve-design/Jirani | e386b1ede05f6c2067af2621c21ce802ec72ae73 | [
"MIT"
] | null | null | null | jiraniapp/admin.py | Steve-design/Jirani | e386b1ede05f6c2067af2621c21ce802ec72ae73 | [
"MIT"
] | 8 | 2020-02-12T03:21:51.000Z | 2022-03-12T00:07:01.000Z | jiraniapp/admin.py | Steve-design/Jirani | e386b1ede05f6c2067af2621c21ce802ec72ae73 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.contrib import admin
from django.contrib import admin
from jiraniapp.models import *
admin.site.register(Location)
admin.site.register(tags)
admin.site.register(Image, ImageAdmin)
admin.site.register(Profile, ProfileAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(Review, ReviewAdmin)
admin.site.register(Neighbourhood, NeighbourhoodAdmin)
| 29.794118 | 114 | 0.760118 | from __future__ import unicode_literals
from django.contrib import admin
from django.contrib import admin
from jiraniapp.models import *
class ImageAdmin(admin.ModelAdmin):
filter_horizontal = ('tags',)
class ProjectAdmin(admin.ModelAdmin):
list_display = ('title',)
class ProfileAdmin(admin.ModelAdmin):
list_display = ('user',)
class ReviewAdmin(admin.ModelAdmin):
model = Review
list_display = ('project', 'usability_rating', 'content_rating', 'design_rating', 'user', 'comment', 'image',)
list_filter = ['user',]
search_fields = ['comment',]
class NeighbourhoodAdmin(admin.ModelAdmin):
model = Neighbourhood
list_display = ( 'neighbourhood_name', 'neighbourhood_location', 'population' )
admin.site.register(Location)
admin.site.register(tags)
admin.site.register(Image, ImageAdmin)
admin.site.register(Profile, ProfileAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(Review, ReviewAdmin)
admin.site.register(Neighbourhood, NeighbourhoodAdmin)
| 0 | 481 | 115 |
6ca84774140ca7829672a7d1d772a57adf023497 | 1,605 | py | Python | exercises/05_Listas/exe_10.py | MariaTrindade/CursoPython | 2c60dd670747db08011d9dd33e3bbfd5795b06e8 | [
"Apache-2.0"
] | 1 | 2021-05-11T18:30:17.000Z | 2021-05-11T18:30:17.000Z | exercises/05_Listas/exe_10.py | MariaTrindade/CursoPython | 2c60dd670747db08011d9dd33e3bbfd5795b06e8 | [
"Apache-2.0"
] | null | null | null | exercises/05_Listas/exe_10.py | MariaTrindade/CursoPython | 2c60dd670747db08011d9dd33e3bbfd5795b06e8 | [
"Apache-2.0"
] | null | null | null | """
Em uma competição de salto em distância cada atleta tem direito a cinco saltos. O resultado do atleta será
determinado pela média dos cinco saltos. Você deve fazer um programa que receba o nome e as cinco distâncias
alcançadas pelo atleta em seus saltos e depois informe o nome, os saltos e a média dos saltos. O programa
deve ser encerrado quando não for informado o nome do atleta. A saída do programa deve ser conforme o exemplo abaixo:
Atleta: Rodrigo Curvêllo
Primeiro Salto: 6.5 m
Segundo Salto: 6.1 m
Terceiro Salto: 6.2 m
Quarto Salto: 5.4 m
Quinto Salto: 5.3 m
Resultado final:
Atleta: Rodrigo Curvêllo
Saltos: 6.5 - 6.1 - 6.2 - 5.4 - 5.3
Média dos saltos: 5.9
"""
saltos = []
registro = list()
while True:
nome = str(input('Nome: ')).title()
for s in range(1, 3):
saltos.append(float(input(f'{s}º salto: ')))
registro.append([nome, saltos[:], sum(saltos) / len(saltos)])
saltos.clear()
while True:
resposta = str(input('\nInserir um novo atleta? [S | N]: ')).upper().strip()
if resposta not in 'NS' or resposta == '':
print('Resposta inválida!')
elif resposta == 'N':
break
else:
print()
break
if resposta == 'N':
break
# --------------------- imprimindo -------------------------
# registro [ [nome, [5saltos], média] , [nome, [5saltos], média] ]
#for c, a in enumerate(registro):
# print(f'{c + 1} >>>> {a}')
linha = '-' * 30
print()
for atleta in registro:
print(f'Nome: {atleta[0]:>5}\nSaltos: {atleta[1]}\nMédia: {atleta[2]:.2f}\n{linha}\n')
| 29.181818 | 117 | 0.612461 | """
Em uma competição de salto em distância cada atleta tem direito a cinco saltos. O resultado do atleta será
determinado pela média dos cinco saltos. Você deve fazer um programa que receba o nome e as cinco distâncias
alcançadas pelo atleta em seus saltos e depois informe o nome, os saltos e a média dos saltos. O programa
deve ser encerrado quando não for informado o nome do atleta. A saída do programa deve ser conforme o exemplo abaixo:
Atleta: Rodrigo Curvêllo
Primeiro Salto: 6.5 m
Segundo Salto: 6.1 m
Terceiro Salto: 6.2 m
Quarto Salto: 5.4 m
Quinto Salto: 5.3 m
Resultado final:
Atleta: Rodrigo Curvêllo
Saltos: 6.5 - 6.1 - 6.2 - 5.4 - 5.3
Média dos saltos: 5.9
"""
saltos = []
registro = list()
while True:
nome = str(input('Nome: ')).title()
for s in range(1, 3):
saltos.append(float(input(f'{s}º salto: ')))
registro.append([nome, saltos[:], sum(saltos) / len(saltos)])
saltos.clear()
while True:
resposta = str(input('\nInserir um novo atleta? [S | N]: ')).upper().strip()
if resposta not in 'NS' or resposta == '':
print('Resposta inválida!')
elif resposta == 'N':
break
else:
print()
break
if resposta == 'N':
break
# --------------------- imprimindo -------------------------
# registro [ [nome, [5saltos], média] , [nome, [5saltos], média] ]
#for c, a in enumerate(registro):
# print(f'{c + 1} >>>> {a}')
linha = '-' * 30
print()
for atleta in registro:
print(f'Nome: {atleta[0]:>5}\nSaltos: {atleta[1]}\nMédia: {atleta[2]:.2f}\n{linha}\n')
| 0 | 0 | 0 |
898b93bc72cd79221ca90d1e6f7205455077f2c2 | 1,888 | py | Python | site/iot_project/house_control/views.py | ProjetoSD-2022/Projeto-02 | 5bfd70310f748ecba0eb5a2c754020306bec6355 | [
"MIT"
] | null | null | null | site/iot_project/house_control/views.py | ProjetoSD-2022/Projeto-02 | 5bfd70310f748ecba0eb5a2c754020306bec6355 | [
"MIT"
] | null | null | null | site/iot_project/house_control/views.py | ProjetoSD-2022/Projeto-02 | 5bfd70310f748ecba0eb5a2c754020306bec6355 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import JsonResponse
import serial
# Create your views here.
# def acende_led(request):
# global init
# if request.method == 'POST':
# try:
# ledCommand = request.POST.get('ledCommand')
# if ledCommand != 'H' or 'l':
# if not init:
# ledCommand = ledCommand.upper()
# init = True
# print(ledCommand)
# else:
# ledCommand = ledCommand.lower()
# init = False
# arduino = serial.Serial('/dev/ttyACM0', 9600, timeout=1)
# command = '{}'.format(ledCommand).encode()
# arduino.write(command)
# resposta = 'Led Aceso!'
# except Exception as e:
# resposta = f'Ocorreu o erro, {e}'
# return JsonResponse({'resposta':resposta}) | 33.122807 | 95 | 0.520127 | from django.shortcuts import render
from django.http import JsonResponse
import serial
# Create your views here.
def index(request):
#Essa funçao juntará as informaçoes iniciais e renderizará de acordo com o que for passado
titulo = "Bem vindo ao sistema de controle inteligente GABIN"
return render(request, 'html/index.html', context={
'titulo':titulo,
})
def acende_led(request):
if request.method == 'POST':
try:
port = request.POST.get('port')
# print('porta:', port)
ledCommand = request.POST.get('ledCommand')
# print(ledCommand)
arduino = serial.Serial(port, 9600, timeout=1)
command = '{}'.format(ledCommand).encode()
arduino.write(command)
arduino.close()
resposta = 'Led Aceso!'
except Exception as e:
# print('erro', e)
resposta = f'Ocorreu o erro, {e}'
return JsonResponse({'resposta':resposta})
# def acende_led(request):
# global init
# if request.method == 'POST':
# try:
# ledCommand = request.POST.get('ledCommand')
# if ledCommand != 'H' or 'l':
# if not init:
# ledCommand = ledCommand.upper()
# init = True
# print(ledCommand)
# else:
# ledCommand = ledCommand.lower()
# init = False
# arduino = serial.Serial('/dev/ttyACM0', 9600, timeout=1)
# command = '{}'.format(ledCommand).encode()
# arduino.write(command)
# resposta = 'Led Aceso!'
# except Exception as e:
# resposta = f'Ocorreu o erro, {e}'
# return JsonResponse({'resposta':resposta}) | 878 | 0 | 48 |
a674189feb88dd6d08e0c93bee738cca08762b3e | 4,609 | py | Python | ap/verification/Control_Server/core.py | MakerPro/TwOpenRobot | ff9c2bcb474613cc1716673a04172fc25b7f0369 | [
"MIT"
] | 1 | 2015-11-09T05:56:48.000Z | 2015-11-09T05:56:48.000Z | ap/verification/Control_Server/core.py | MakerPro/TwOpenRobot | ff9c2bcb474613cc1716673a04172fc25b7f0369 | [
"MIT"
] | null | null | null | ap/verification/Control_Server/core.py | MakerPro/TwOpenRobot | ff9c2bcb474613cc1716673a04172fc25b7f0369 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import platform, serial, serial.tools.list_ports, time
from ctypes import c_ushort
#debug hack
import json
import glob
import time
__author__ = 'Kazuyuki TAKASE'
__author__ = 'Yugo KAJIWARA'
__copyright__ = 'PLEN Project Company Ltd., and all authors.'
__license__ = 'The MIT License'
| 23.757732 | 93 | 0.620742 | # -*- coding: utf-8 -*-
import platform, serial, serial.tools.list_ports, time
from ctypes import c_ushort
#debug hack
import json
import glob
import time
__author__ = 'Kazuyuki TAKASE'
__author__ = 'Yugo KAJIWARA'
__copyright__ = 'PLEN Project Company Ltd., and all authors.'
__license__ = 'The MIT License'
class Core:
def __init__(self, device_map):
self._serial = None
self._DEVICE_MAP = device_map
self._values = [ 0 for x in range(24) ]
self.debugmode=1 #debug hack
def serial_write(self,cmd,fun_name=""):
if self.debugmode==1:
print("%s cmd=%s" %(fun_name, cmd))
if not isinstance(cmd, basestring):
str1=""
for i in cmd:
str1=str1+chr(i)
print("String format=%s" %(str1))
self._serial.write(cmd)
def debug(self,print_str):
if self.debugmode==1:
print(print_str)
def output(self, device, value):
if self._serial == None:
return False
cmd = "$AD%02x%03x" % (self._DEVICE_MAP[device], c_ushort(value).value)
self.serial_write(cmd,"output")
return True
def play(self, slot):
if self._serial == None:
return False
cmd = "$PM%02x" % slot
#print("cmd=" + cmd)
self.serial_write(cmd,"play")
return True
def stop(self):
if self._serial == None:
return False
cmd = "$SM"
self.serial_write(cmd,"stop")
return True
def mytest(self,slot_id):
print("mytest slot id=%d" %(slot_id))
json_list = glob.glob("./motion/*.json")
for json1 in json_list:
print("json name=" + json1)
if str.find(json1,"Empty")>=0:
print("skip " + json1)
else:
with open(json1) as json_file:
json_data = json.load(json_file)
#print(json_data)
print("Installing "+ json1)
self.install(json_data)
time.sleep(1)
def install(self, json):
if self._serial == None:
return False
#debug hack, when slot id = 0x45. We will install all json file in the motion sub directory
if(json["slot"]==0x45):
self.mytest(json["slot"])
return
else:
pass
#print(str(json))
# コマンドの指定
cmd = ">IN"
# スロット番号の指定
self.debug("slot=%02x"%(json["slot"]))
cmd += "%02x" % (json["slot"])
# モーション名の指定
if len(json["name"]) < 20:
cmd += json["name"].ljust(20)
else:
cmd += json["name"][:19]
self.debug("name=%s"%(json["name"]))
# 制御機能の指定
if (len(json["codes"]) != 0):
for code in json["codes"]:
if (code["func"] == "loop"):
cmd += "01%02x%02x" % (code["args"][0], code["args"][1])
self.debug("func_loop=01%02x%02x"%(code["args"][0], code["args"][1]))
break
if (code["func"] == "jump"):
cmd += "02%02x00" % (code["args"][0])
self.debug("func_jump=02%02x00" % (code["args"][0]))
break
else:
cmd += "000000"
# フレーム数の指定
cmd += "%02x" % (len(json["frames"]))
self.debug("frames=%02x" % (len(json["frames"])))
# フレーム構成要素の指定
for frame in json["frames"]:
# 遷移時間の指定
cmd += "%04x" % (frame["transition_time_ms"])
self.debug("transition_time_ms=%04x" % (frame["transition_time_ms"]))
for output in frame["outputs"]:
self._values[self._DEVICE_MAP[output["device"]]] = c_ushort(output["value"]).value
for value in self._values:
cmd += "%04x" % value
#self.debug("per frame values=%04x" % value)
# Divide command length by payload size.
block = len(cmd) // 20
surplus = len(cmd) % 20
self.debug("cmd buffer(not sending): %s" %(cmd))
for index in range(block):
self.serial_write(map(ord, cmd[20 * index: 20 * (index + 1)]),"install(index)")
time.sleep(0.01)
self.serial_write(map(ord, cmd[-surplus:]),"install")
time.sleep(0.01)
return True
def connect(self):
com = None
for device in list(serial.tools.list_ports.comports()):
if 'Arduino Micro' in device[1]:
com = device[0]
# Fix for old version mac.
if ( ( (com == None)
and (platform.system() == 'Darwin') )
):
for device in list(serial.tools.list_ports.comports()):
if ( ( ('/dev/tty.usbmodem' in device[0])
or ('/dev/tty.usbserial' in device[0])
or ('/dev/cu.usbmodem' in device[0])
or ('/dev/cu.usbserial' in device[0]) )
):
try:
openable = serial.Serial(port = device[0])
openable.close()
com = device[0]
except serial.SerialException:
pass
if com == None:
return False
self.disconnect()
if self._serial == None:
self._serial = serial.Serial(port = com, baudrate = 2000000, timeout = 1)
self._serial.flushInput()
self._serial.flushOutput()
return True
def disconnect(self):
if self._serial == None:
return False
self._serial.close()
self._serial = None
return True
| 4,147 | -10 | 266 |
8dbecd9aff2be8395087262041a1354fff2ecb75 | 6,066 | py | Python | test/test_authorization_code_validator.py | danirod/dummyauth | f21ddda21dd3eed202d36399419f6e77d3a438ae | [
"0BSD"
] | 1 | 2020-04-05T23:54:56.000Z | 2020-04-05T23:54:56.000Z | test/test_authorization_code_validator.py | danirod/dummyauth | f21ddda21dd3eed202d36399419f6e77d3a438ae | [
"0BSD"
] | 1 | 2019-10-01T20:48:21.000Z | 2019-10-01T20:48:50.000Z | test/test_authorization_code_validator.py | danirod/dummyauth | f21ddda21dd3eed202d36399419f6e77d3a438ae | [
"0BSD"
] | null | null | null | import httpretty
import sure
from dummyauth.spider import AuthorizationCodeValidator
from unittest import TestCase
| 48.919355 | 85 | 0.600725 | import httpretty
import sure
from dummyauth.spider import AuthorizationCodeValidator
from unittest import TestCase
class AuthorizationCodeValidatorTestCase(TestCase):
@httpretty.httprettified
def test_spider_handles_valid_requests(self):
httpretty.register_uri(httpretty.POST, 'http://auth.example.com/login',
adding_headers={'content-type': 'application/json'},
body='{"me": "http://johndoe.example.com/"}')
validator_params={
'authorization_endpoint': 'http://auth.example.com/login',
'code': 'deadbeef',
'client_id': 'http://client.example.com/',
'redirect_uri': 'http://client.example.com/callback',
}
validator = AuthorizationCodeValidator(**validator_params)
self.assertTrue(validator.valid)
@httpretty.httprettified
def test_spider_handles_valid_profile_url(self):
httpretty.register_uri(httpretty.POST, 'http://auth.example.com/login',
adding_headers={'content-type': 'application/json'},
body='{"me": "http://johndoe.example.com/"}')
validator_params={
'authorization_endpoint': 'http://auth.example.com/login',
'code': 'deadbeef',
'client_id': 'http://client.example.com/',
'redirect_uri': 'http://client.example.com/callback',
}
validator = AuthorizationCodeValidator(**validator_params)
validator.profile_url.should.equal('http://johndoe.example.com/')
@httpretty.httprettified
def test_spider_handles_invalid_requests(self):
httpretty.register_uri(httpretty.POST, 'http://auth.example.com/login',
adding_headers={'content-type': 'application/json'},
body='{"error": "invalid_request"}',
status=400)
validator_params={
'authorization_endpoint': 'http://auth.example.com/login',
'code': 'deadbeef',
'client_id': 'http://client.example.com/',
'redirect_uri': 'http://client.example.com/callback',
}
validator = AuthorizationCodeValidator(**validator_params)
self.assertFalse(validator.valid)
@httpretty.httprettified
def test_spider_handles_invalid_request_code(self):
httpretty.register_uri(httpretty.POST, 'http://auth.example.com/login',
adding_headers={'content-type': 'application/json'},
body='{"error": "invalid_request"}',
status=400)
validator_params={
'authorization_endpoint': 'http://auth.example.com/login',
'code': 'deadbeef',
'client_id': 'http://client.example.com/',
'redirect_uri': 'http://client.example.com/callback',
}
validator = AuthorizationCodeValidator(**validator_params)
validator.error.should.equal('invalid_request')
@httpretty.httprettified
def test_spider_sends_appropiate_request(self):
httpretty.register_uri(httpretty.POST, 'http://auth.example.com/login',
adding_headers={'content-type': 'application/json'},
body='{"me": "http://johndoe.example.com/"}')
validator_params={
'authorization_endpoint': 'http://auth.example.com/login',
'code': 'deadbeef',
'client_id': 'http://client.example.com/',
'redirect_uri': 'http://client.example.com/callback',
}
AuthorizationCodeValidator(**validator_params).valid
httpretty.has_request().should.be(True)
@httpretty.httprettified
def test_spider_sends_appropiate_code(self):
httpretty.register_uri(httpretty.POST, 'http://auth.example.com/login',
adding_headers={'content-type': 'application/json'},
body='{"me": "http://johndoe.example.com/"}')
validator_params={
'authorization_endpoint': 'http://auth.example.com/login',
'code': 'deadbeef',
'client_id': 'http://client.example.com/',
'redirect_uri': 'http://client.example.com/callback',
}
AuthorizationCodeValidator(**validator_params).valid
payload = httpretty.last_request().parsed_body
payload['code'][0].should.equal('deadbeef')
@httpretty.httprettified
def test_spider_sends_appropiate_client_id(self):
httpretty.register_uri(httpretty.POST, 'http://auth.example.com/login',
adding_headers={'content-type': 'application/json'},
body='{"me": "http://johndoe.example.com/"}')
validator_params={
'authorization_endpoint': 'http://auth.example.com/login',
'code': 'deadbeef',
'client_id': 'http://client.example.com/',
'redirect_uri': 'http://client.example.com/callback',
}
AuthorizationCodeValidator(**validator_params).valid
payload = httpretty.last_request().parsed_body
payload['client_id'][0].should.equal('http://client.example.com/')
@httpretty.httprettified
def test_spider_sends_appropiate_redirect_uri(self):
httpretty.register_uri(httpretty.POST, 'http://auth.example.com/login',
adding_headers={'content-type': 'application/json'},
body='{"me": "http://johndoe.example.com/"}')
validator_params={
'authorization_endpoint': 'http://auth.example.com/login',
'code': 'deadbeef',
'client_id': 'http://client.example.com/',
'redirect_uri': 'http://client.example.com/callback',
}
AuthorizationCodeValidator(**validator_params).valid
payload = httpretty.last_request().parsed_body
payload['redirect_uri'][0].should.equal('http://client.example.com/callback')
| 5,450 | 478 | 23 |
d4592081e6abe7ce60aab9eac049481530951526 | 2,296 | py | Python | pink/context.py | Fogapod/pink | 827eb32d476a04cef1f01f74a920e1f124c37364 | [
"MIT"
] | null | null | null | pink/context.py | Fogapod/pink | 827eb32d476a04cef1f01f74a920e1f124c37364 | [
"MIT"
] | 3 | 2021-02-19T16:48:05.000Z | 2021-02-19T19:17:53.000Z | pink/context.py | Fogapod/pink | 827eb32d476a04cef1f01f74a920e1f124c37364 | [
"MIT"
] | 3 | 2021-02-18T12:39:32.000Z | 2021-02-19T18:55:02.000Z | from typing import Any, Union, Optional
import edgedb
import aiohttp
import discord
from discord.ext import commands
from .hookable import AsyncHookable
| 26.390805 | 81 | 0.61716 | from typing import Any, Union, Optional
import edgedb
import aiohttp
import discord
from discord.ext import commands
from .hookable import AsyncHookable
class Context(commands.Context, AsyncHookable):
@property
def prefix(self) -> str:
return self._prefix # type: ignore
@prefix.setter
def prefix(self, value: Optional[str]) -> None:
# because custom get_prefix can leave spaces
self._prefix = None if value is None else value.rstrip()
@property
def edb(self) -> edgedb.AsyncIOPool:
return self.bot.edb
@property
def session(self) -> aiohttp.ClientSession:
return self.bot.session
@AsyncHookable.hookable()
async def send(
self,
content: Any = None,
*,
target: discord.abc.Messageable = None,
**kwargs: Any,
) -> discord.Message:
if target is None:
target = super()
if content is not None:
# hardcoded 2000 limit because error handling is tricky with 50035
# and this project is EOL
content = str(content)[:2000]
return await target.send(content, **kwargs)
async def reply(self, content: Any = None, **kwargs: Any) -> discord.Message:
return await self.send(content, reference=self.message, **kwargs)
@AsyncHookable.hookable()
async def edit(
self,
message: discord.Message,
*,
content: Any = None,
**kwargs: Any,
) -> None:
await message.edit(content=content, **kwargs)
@AsyncHookable.hookable()
async def react(
self,
emoji: Union[discord.Emoji, str],
message: discord.Message = None,
) -> discord.Message:
if message is None:
message = self.message
await message.add_reaction(emoji)
return message
async def ok(self, message: discord.Message = None) -> discord.Message:
if message is None:
message = self.message
return await self.react("\N{HEAVY CHECK MARK}", message=message)
async def nope(self, message: discord.Message = None) -> discord.Message:
if message is None:
message = self.message
return await self.react("\N{HEAVY MULTIPLICATION X}", message=message)
| 1,670 | 446 | 23 |
a2e36c7f286e278a53f3ef4c84bbf305131d40ae | 4,578 | py | Python | dataset/data.py | patrikmaric/COVID19-search-engine | 42921d4a1fcb1b3dcc7e5936b75377a766102ca7 | [
"MIT"
] | 4 | 2020-06-07T09:33:27.000Z | 2021-11-25T15:28:32.000Z | dataset/data.py | patrikmaric/COVID19-search-engine | 42921d4a1fcb1b3dcc7e5936b75377a766102ca7 | [
"MIT"
] | null | null | null | dataset/data.py | patrikmaric/COVID19-search-engine | 42921d4a1fcb1b3dcc7e5936b75377a766102ca7 | [
"MIT"
] | 4 | 2020-06-05T12:47:40.000Z | 2021-11-25T15:28:05.000Z | import json
from pathlib import Path
import pandas as pd
import tqdm
from nltk import sent_tokenize
from dataset.preprocessing.preprocessing import preprocess_data
from dataset.util import extract_data_from_dict
from dataset.util import join_abstract_text
#from preprocessing.preprocessing import preprocess_data
#from util import extract_data_from_dict
#from util import join_abstract_text
from settings import data_root_path
abstract_keys = ('section', 'text')
body_text_keys = ('section', 'text')
| 39.465517 | 149 | 0.595238 | import json
from pathlib import Path
import pandas as pd
import tqdm
from nltk import sent_tokenize
from dataset.preprocessing.preprocessing import preprocess_data
from dataset.util import extract_data_from_dict
from dataset.util import join_abstract_text
#from preprocessing.preprocessing import preprocess_data
#from util import extract_data_from_dict
#from util import join_abstract_text
from settings import data_root_path
abstract_keys = ('section', 'text')
body_text_keys = ('section', 'text')
class CovidDataLoader():
@staticmethod
def load_articles_paths(root_path=data_root_path, file_extension='json'):
"""
Gets the paths to all files with the given file extension,
in the given directory(root_path) and all its subdirectories.
Args:
root_path: path to directory to get the files from
file_extension: extension to look for
Returns:
list of paths to all articles from the root directory
"""
article_paths = []
for path in Path(root_path).rglob('*.%s' % file_extension):
article_paths.append(str(path))
return article_paths
@staticmethod
def load_data(articles_paths, key='abstract', offset=0, limit=None, keys=abstract_keys, load_sentences=False,
preprocess=False, q=False):
"""
Given the list of paths to articles json files, returns pandas DataFrame containing the info defined by the keys param.
e.g. considering the following scheme
{
...
abstract:
section: "ABSTRACT", \n
text: "lorem ipsum..."
...
}
if key="abstracts" and keys = ["section", "text"], then the method will extract for each abtsract all sections and belonging texts
Args:
articles_paths: list of paths to articles to load
key: defines which part of data to extract from the json-s, e.g. if 'articles' -> extracts articles, if 'body_text' -> extracts body text
offset: loading start index in the articles_paths list
limit: number of articles to load
keys: specifier for the data defined by the key
load_sentences: if true, it divides the sections further into sentences
preprocess: if true, returns preprocessed data
Returns:
DataFrame defined by selected parameter values
"""
N = len(articles_paths)
assert offset < N
last_index = N
if limit and offset + limit < N:
last_index = offset + limit
data_ = []
for path in tqdm.tqdm(articles_paths[offset:last_index]):
with open(path, 'r') as f:
curr_article = json.load(f)
abstract_data = []
if key in curr_article:
for section in curr_article[key]:
curr_part = {'paper_id': curr_article['paper_id']}
try:
curr_part.update(extract_data_from_dict(section, keys, mandatory_keys=['text']))
if key == 'abstract':
abstract_data.append(curr_part)
else:
data_.append(curr_part)
except:
pass
if key == 'abstract' and abstract_data != []:
data_.append(join_abstract_text(abstract_data))
if load_sentences:
return CovidDataLoader.__load_sentences(data_, preprocess, q)
if not load_sentences and preprocess:
return pd.DataFrame(preprocess_data(data_, q)).drop_duplicates(subset='preprocessed_text', keep='first')
return pd.DataFrame(data_).drop_duplicates(subset='text', keep='first')
@staticmethod
def __load_sentences(texts, preprocess, q):
sentences = []
for text in texts:
sents = sent_tokenize(text['text'])
for i in range(len(sents)):
"""ls = len(sents[i].split())
print(sents[i][-1])
"""
sent = {k: v for k, v in text.items()}
sent['text'] = sents[i]
sent['position'] = i
sentences.append(sent)
if (preprocess):
return preprocess_data(sentences, q).drop_duplicates(subset='preprocessed_text', keep='first')
return pd.DataFrame(sentences).drop_duplicates(subset='text', keep='first')
| 650 | 3,400 | 23 |
0c5ec564b893c0e239a70c823a87ff929d623b9b | 1,635 | py | Python | storm_control/dave/update_generator.py | shiwei23/STORM6 | 669067503ebd164b575ce529fcc4a9a3f576b3d7 | [
"MIT"
] | 47 | 2015-02-11T16:05:54.000Z | 2022-03-26T14:13:12.000Z | storm_control/dave/update_generator.py | shiwei23/STORM6 | 669067503ebd164b575ce529fcc4a9a3f576b3d7 | [
"MIT"
] | 110 | 2015-01-30T03:53:41.000Z | 2021-11-03T15:58:44.000Z | storm_control/dave/update_generator.py | shiwei23/STORM6 | 669067503ebd164b575ce529fcc4a9a3f576b3d7 | [
"MIT"
] | 61 | 2015-01-09T18:31:27.000Z | 2021-12-21T13:07:51.000Z | #!/usr/bin/python
#
## @file
#
# Generate HTML for Dave web update & upload it to
# a webserver using the the DAV protocol and the
# tinydav library, available here:
#
# http://code.google.com/p/tinydav/
#
# This is not used.
#
# Hazen 08/11
#
import time
import tinydav
| 27.25 | 80 | 0.625688 | #!/usr/bin/python
#
## @file
#
# Generate HTML for Dave web update & upload it to
# a webserver using the the DAV protocol and the
# tinydav library, available here:
#
# http://code.google.com/p/tinydav/
#
# This is not used.
#
# Hazen 08/11
#
import time
import tinydav
class HTMLUpdate(object):
def __init__(self, parameters):
self.directory = parameters.directory
self.password = parameters.password
self.port = parameters.server_port
self.setup_name = parameters.setup_name
self.server_name = parameters.server_name
self.username = parameters.username
self.update_file = parameters.setup_name + ".txt"
def getTime(self):
return time.asctime(time.localtime(time.time()))
def newMovie(self, movie):
fp = open(self.update_file, "a")
fp.write("Started movie " + movie.name + " on " + self.getTime() + "\n")
fp.close()
#self.updateFileOnServer()
def start(self):
fp = open(self.update_file, "w")
fp.write("Run started on: " + self.getTime() + "\n")
fp.close()
#self.updateFileOnServer()
def stop(self):
fp = open(self.update_file, "a")
fp.write("Run stopped on: " + self.getTime() + "\n")
fp.close()
#self.updateFileOnServer()
def updateFileOnServer(self):
client = tinydav.WebDAVClient(self.server_name, self.port)
client.setbasicauth(self.username, self.password)
local = self.update_file
remote = self.directory + local
with open(local) as fd:
print client.put(remote, fd, "text/plain")
| 1,174 | 4 | 184 |
1c0a8e9a6d73b236560d63e413673b60a9ca7854 | 137 | py | Python | desafio021.py | WebertiBarbosa/python | 640a70c327c262d4e867a4b4620ca50d42398c00 | [
"MIT"
] | null | null | null | desafio021.py | WebertiBarbosa/python | 640a70c327c262d4e867a4b4620ca50d42398c00 | [
"MIT"
] | 1 | 2020-06-06T21:34:44.000Z | 2020-06-06T21:44:58.000Z | desafio021.py | WebertiBarbosa/python | 640a70c327c262d4e867a4b4620ca50d42398c00 | [
"MIT"
] | null | null | null | import pygame
pygame.mixer.init()
pygame.mixer.music.load('ex021.mp3')
pygame.mixer.music.play()
#pygame.event.wait()
input('Agora sim')
| 19.571429 | 36 | 0.751825 | import pygame
pygame.mixer.init()
pygame.mixer.music.load('ex021.mp3')
pygame.mixer.music.play()
#pygame.event.wait()
input('Agora sim')
| 0 | 0 | 0 |
3e36e30485bb67dc452ba3ccca0327ccdca04d87 | 8,864 | py | Python | internal/detectors/tf/tf_detection.py | williamcorsel/teNNo | 67c088f0303e6c53628e2c52c7fcecb9cfdcda62 | [
"Apache-2.0"
] | null | null | null | internal/detectors/tf/tf_detection.py | williamcorsel/teNNo | 67c088f0303e6c53628e2c52c7fcecb9cfdcda62 | [
"Apache-2.0"
] | null | null | null | internal/detectors/tf/tf_detection.py | williamcorsel/teNNo | 67c088f0303e6c53628e2c52c7fcecb9cfdcda62 | [
"Apache-2.0"
] | 2 | 2021-11-22T08:26:08.000Z | 2022-01-19T13:11:04.000Z | import logging
import pathlib
import cv2
import numpy as np
import six.moves.urllib as urllib
import tensorflow as tf
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from internal.detectors import base_detection as od
from internal.detectors import detection_output as do
log = logging.getLogger(__name__)
# patch tf1 into `utils.ops`
utils_ops.tf = tf.compat.v1
# Patch the location of gfile
tf.gfile = tf.io.gfile
'''
Name of model used. More pretrained models can be found here:
https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md
'''
#MODEL_NAME = "ssd_mobilenet_v2_oid_v4_2018_12_12"
#MODEL_NAME = "rfcn_resnet101_coco_2018_01_28"
#MODEL_NAME = "faster_rcnn_inception_v2_coco_2018_01_28"
MODEL_NAME = 'ssd_mobilenet_v2_coco_2018_03_29'
'''
List of the strings that is used to add correct label for each box.
Make sure to match this file to the dataset the model is trained on
'''
#PATH_TO_LABELS = 'tenno/internal/detectors/models/labels/oid_v4_label_map.pbtxt'
PATH_TO_LABELS = 'internal/detectors/tf/models/labels/mscoco_label_map.pbtxt'
if __name__ == '__main__':
logging.basicConfig(format="%(name)s: %(levelname)s: %(message)s" ,level=logging.INFO)
IMG_PATH = "img/";
img1 = cv2.imread(IMG_PATH + "desk.png")
img1 = cv2.resize(img1, (640, 480), interpolation=cv2.INTER_AREA)
detector = Tf_Detection(None)
crop, border = detector.separate_border(img1)
output = detector.show_inference(crop)
border[detector.ymargin:-detector.ymargin+1, detector.xmargin:-detector.xmargin+1] = output
# Draw rectangle to represent margin
border = cv2.rectangle(border, (detector.xmargin, detector.ymargin), (border.shape[1] - detector.xmargin, border.shape[0] - detector.ymargin), (0,0,255), 2)
border = cv2.resize(border, (960, 720), interpolation=cv2.INTER_AREA)
cv2.imshow('Output', border)
cv2.waitKey(0) | 35.314741 | 160 | 0.631656 | import logging
import pathlib
import cv2
import numpy as np
import six.moves.urllib as urllib
import tensorflow as tf
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from internal.detectors import base_detection as od
from internal.detectors import detection_output as do
log = logging.getLogger(__name__)
# patch tf1 into `utils.ops`
utils_ops.tf = tf.compat.v1
# Patch the location of gfile
tf.gfile = tf.io.gfile
'''
Name of model used. More pretrained models can be found here:
https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md
'''
#MODEL_NAME = "ssd_mobilenet_v2_oid_v4_2018_12_12"
#MODEL_NAME = "rfcn_resnet101_coco_2018_01_28"
#MODEL_NAME = "faster_rcnn_inception_v2_coco_2018_01_28"
MODEL_NAME = 'ssd_mobilenet_v2_coco_2018_03_29'
'''
List of the strings that is used to add correct label for each box.
Make sure to match this file to the dataset the model is trained on
'''
#PATH_TO_LABELS = 'tenno/internal/detectors/models/labels/oid_v4_label_map.pbtxt'
PATH_TO_LABELS = 'internal/detectors/tf/models/labels/mscoco_label_map.pbtxt'
class Tf_Detection(od.Base_Detection):
def __init__(self, avoider=None):
super(Tf_Detection, self).__init__(avoider)
self.category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
self.model = self.load_model()
self.size_threshold = 1.12
self.output = do.Detection_Output((self.width - self.xmargin*2), (self.height - self.ymargin*2))
def load_model(self):
base_url = 'http://download.tensorflow.org/models/object_detection/'
model_file = MODEL_NAME + '.tar.gz'
model_dir = tf.keras.utils.get_file(
fname=MODEL_NAME,
origin=base_url + model_file,
untar=True)
model_dir = pathlib.Path(model_dir)/"saved_model"
log.info("Model saved to: " + str(model_dir))
model = tf.saved_model.load(str(model_dir))
model = model.signatures['serving_default']
return model
def process_frame(self, frame, old):
"""
Base function for processing a frame to be displayed
"""
image = cv2.cvtColor(np.array(frame.to_image()), cv2.COLOR_RGB2BGR)
image = self.prepare_workspace(image)
crop, border = self.separate_border(image)
output_frame = self.show_inference(crop);
border[self.ymargin:-self.ymargin+1, self.xmargin:-self.xmargin+1] = output_frame
# Draw rectangle to represent margin
cv2.rectangle(border, (self.xmargin, self.ymargin), (border.shape[1] - self.xmargin, border.shape[0] - self.ymargin), (0,0,255), 2)
self.output.clear()
return border, border
def show_inference(self, image):
# Actual detection.
output_dict = self.run_inference_for_single_image(image)
# Visualization of the results of a detection.
#boxes = self.get_boxes(output_dict)
for i in range(len(output_dict['detection_boxes'])):
score = output_dict['detection_scores'][i]
if score >= self.min_score_thresh:
coordinates = self.box_coordinates(output_dict['detection_boxes'][i])
size = self.box_size(coordinates)
middle = self.box_middle(coordinates)
label = output_dict['detection_classes'][i]
self.output.add_box(coordinates, size, middle, label, score)
if self.output.size() <= 0:
self.output.reset()
return image
index = self.output.find_obstacle()
self.visualize_boxes(image, index)
size_ratio = self.output.get_size_ratio(index)
if size_ratio >= self.size_threshold:
log.info("!!!! OBSTACLE DETECTED")
log.info("Size ratio: " + str(size_ratio))
if self.avoider is not None and self.avoider.enabled:
self.obstacle_detected = True
#cv2.imwrite("Obstacle", image);
stop = size_ratio > 2
self.avoider.avoid(self.calculate_zones(self.output.get_coordinates(index)), stop)
return image
def run_inference_for_single_image(self, image):
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
input_tensor = tf.convert_to_tensor(image)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis,...]
# Run inference
output_dict = self.model(input_tensor)
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(output_dict.pop('num_detections'))
output_dict = {key:value[0, :num_detections].numpy()
for key,value in output_dict.items()}
output_dict['num_detections'] = num_detections
# detection_classes should be ints.
output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)
# Handle models with masks:
if 'detection_masks' in output_dict:
# Reframe the the bbox mask to the image size.
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
output_dict['detection_masks'], output_dict['detection_boxes'],
image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5,
tf.uint8)
output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()
return output_dict
def visualize_boxes(self, image, index):
for i in range(self.output.size()):
xmin, xmax, ymin, ymax = self.output.get_coordinates(i)
if self.output.get_label(i) in self.category_index.keys():
class_name = self.category_index[self.output.get_label(i)]['name']
else:
class_name = 'N/A'
display_str = '{}: {}%'.format(class_name, int(100*self.output.get_score(i)))
color = (0,255,0)
if i == index:
color = (0,0,255)
display_str += ' TRACKING'
vis_util.draw_bounding_box_on_image_array(image, ymin, xmin, ymax, xmax
, color, 2, [display_str], False)
def calculate_zones(self, box):
zonel = box[0]
zoner = (self.width - self.xmargin*2) - box[1]
zoneu = box[2]
zoned = (self.height - self.ymargin*2) - box[3]
return od.Zones(zonel, zoner, zoneu, zoned)
def box_coordinates(self, box):
"""
Returns normalized coordinates of bounding box
Input: [ymin, xmin, ymax, xmax] (from output_dict['detection_boxes'])
"""
return (box[1] * (self.width - self.xmargin*2), box[3] * (self.width - self.xmargin*2),
box[0] * (self.height - self.ymargin*2), box[2] * (self.height - self.ymargin*2))
def box_size(self, box_coordinates):
"""
Returns size of bounding box
Input: (left, right, top, bottom)
"""
return ((box_coordinates[1] - box_coordinates[0]) *
(box_coordinates[3] - box_coordinates[2]))
def box_middle(self, box):
"""
Returns coordinate of middle point of bounding box
Input: (left, right, top, bottom)
"""
box_middle = (box[1] + (box[0] - box[1])/2, box[2] + (box[3] - box[2])/2)
return box_middle
def distance(self, x, y):
return sqrt(((x[0] - y[0]) ** 2) + ((x[1] - y[1]) ** 2))
if __name__ == '__main__':
logging.basicConfig(format="%(name)s: %(levelname)s: %(message)s" ,level=logging.INFO)
IMG_PATH = "img/";
img1 = cv2.imread(IMG_PATH + "desk.png")
img1 = cv2.resize(img1, (640, 480), interpolation=cv2.INTER_AREA)
detector = Tf_Detection(None)
crop, border = detector.separate_border(img1)
output = detector.show_inference(crop)
border[detector.ymargin:-detector.ymargin+1, detector.xmargin:-detector.xmargin+1] = output
# Draw rectangle to represent margin
border = cv2.rectangle(border, (detector.xmargin, detector.ymargin), (border.shape[1] - detector.xmargin, border.shape[0] - detector.ymargin), (0,0,255), 2)
border = cv2.resize(border, (960, 720), interpolation=cv2.INTER_AREA)
cv2.imshow('Output', border)
cv2.waitKey(0) | 4,943 | 1,831 | 23 |
9b5f293b2e9d18e46d9ea7176bb36a877d7985a1 | 14,738 | py | Python | libs/pony/orm/tests/test_diagram_attribute.py | buddyli/android_intership | d4ab90e48472f10623cda9b64b798df924968ab5 | [
"Apache-2.0"
] | null | null | null | libs/pony/orm/tests/test_diagram_attribute.py | buddyli/android_intership | d4ab90e48472f10623cda9b64b798df924968ab5 | [
"Apache-2.0"
] | null | null | null | libs/pony/orm/tests/test_diagram_attribute.py | buddyli/android_intership | d4ab90e48472f10623cda9b64b798df924968ab5 | [
"Apache-2.0"
] | null | null | null | import unittest
from pony.orm.core import *
from pony.orm.core import Attribute
from testutils import *
if __name__ == '__main__':
unittest.main()
| 38.989418 | 127 | 0.604017 | import unittest
from pony.orm.core import *
from pony.orm.core import Attribute
from testutils import *
class TestAttribute(unittest.TestCase):
@raises_exception(TypeError, "Attribute Entity1.id has unknown option 'another_option'")
def test_attribute1(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int, another_option=3)
db.generate_mapping(create_tables=True)
@raises_exception(TypeError, 'Cannot link attribute to Entity class. Must use Entity subclass instead')
def test_attribute2(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
b = Required(db.Entity)
@raises_exception(TypeError, 'Default value for required attribute Entity1.b cannot be None')
def test_attribute3(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
b = Required(int, default=None)
def test_attribute4(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
attr1 = Required('Entity2', reverse='attr2')
class Entity2(db.Entity):
id = PrimaryKey(int)
attr2 = Optional(Entity1)
self.assertEqual(Entity1.attr1.reverse, Entity2.attr2)
def test_attribute5(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
attr1 = Required('Entity2')
class Entity2(db.Entity):
id = PrimaryKey(int)
attr2 = Optional(Entity1, reverse=Entity1.attr1)
self.assertEqual(Entity2.attr2.reverse, Entity1.attr1)
@raises_exception(TypeError, "Value of 'reverse' option must be name of reverse attribute). Got: 123")
def test_attribute6(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
attr1 = Required('Entity2', reverse=123)
@raises_exception(TypeError, "Reverse option cannot be set for this type: <type 'str'>")
def test_attribute7(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
attr1 = Required(str, reverse='attr1')
@raises_exception(TypeError, "'Attribute' is abstract type")
def test_attribute8(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
attr1 = Attribute(str)
@raises_exception(ERDiagramError, "Attribute name cannot both start and end with underscore. Got: _attr1_")
def test_attribute9(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
_attr1_ = Required(str)
@raises_exception(ERDiagramError, "Duplicate use of attribute Entity1.attr1 in entity Entity2")
def test_attribute10(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
attr1 = Required(str)
class Entity2(db.Entity):
id = PrimaryKey(int)
attr2 = Entity1.attr1
@raises_exception(ERDiagramError, "Invalid use of attribute Entity1.a in entity Entity2")
def test_attribute11(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = Required(str)
class Entity2(db.Entity):
b = Required(str)
composite_key(Entity1.a, b)
@raises_exception(ERDiagramError, "Cannot create primary key for Entity1 automatically because name 'id' is alredy in use")
def test_attribute12(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = Optional(str)
@raises_exception(ERDiagramError, "Reverse attribute for Entity1.attr1 was not found")
def test_attribute13(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
attr1 = Required('Entity2')
class Entity2(db.Entity):
id = PrimaryKey(int)
@raises_exception(ERDiagramError, "Reverse attribute Entity1.attr1 not found")
def test_attribute14(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
class Entity2(db.Entity):
id = PrimaryKey(int)
attr2 = Required(Entity1, reverse='attr1')
@raises_exception(ERDiagramError, "Inconsistent reverse attributes Entity3.attr3 and Entity2.attr2")
def test_attribute15(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
attr1 = Optional('Entity2')
class Entity2(db.Entity):
id = PrimaryKey(int)
attr2 = Required(Entity1)
class Entity3(db.Entity):
id = PrimaryKey(int)
attr3 = Required(Entity2, reverse='attr2')
@raises_exception(ERDiagramError, "Inconsistent reverse attributes Entity3.attr3 and Entity2.attr2")
def test_attribute16(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
attr1 = Optional('Entity2')
class Entity2(db.Entity):
id = PrimaryKey(int)
attr2 = Required(Entity1)
class Entity3(db.Entity):
id = PrimaryKey(int)
attr3 = Required(Entity2, reverse=Entity2.attr2)
@raises_exception(ERDiagramError, 'Reverse attribute for Entity2.attr2 not found')
def test_attribute18(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
class Entity2(db.Entity):
id = PrimaryKey(int)
attr2 = Required('Entity1')
@raises_exception(ERDiagramError, 'Ambiguous reverse attribute for Entity2.c')
def test_attribute19(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
a = Required('Entity2')
b = Optional('Entity2')
class Entity2(db.Entity):
id = PrimaryKey(int)
c = Set(Entity1)
d = Set(Entity1)
@raises_exception(ERDiagramError, 'Ambiguous reverse attribute for Entity2.c')
def test_attribute20(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
a = Required('Entity2', reverse='c')
b = Optional('Entity2', reverse='c')
class Entity2(db.Entity):
id = PrimaryKey(int)
c = Set(Entity1)
def test_attribute21(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
a = Required('Entity2', reverse='c')
b = Optional('Entity2')
class Entity2(db.Entity):
id = PrimaryKey(int)
c = Set(Entity1)
d = Set(Entity1)
def test_attribute22(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
a = Required('Entity2', reverse='c')
b = Optional('Entity2')
class Entity2(db.Entity):
id = PrimaryKey(int)
c = Set(Entity1, reverse='a')
d = Set(Entity1)
@raises_exception(TypeError, "Parameters 'column' and 'columns' cannot be specified simultaneously")
def test_columns1(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int)
attr1 = Optional("Entity2", column='a', columns=['b', 'c'])
class Entity2(db.Entity):
id = PrimaryKey(int)
attr2 = Optional(Entity1)
db.generate_mapping(create_tables=True)
def test_columns2(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int, column='a')
self.assertEqual(Entity1.id.columns, ['a'])
def test_columns3(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int, columns=['a'])
self.assertEqual(Entity1.id.column, 'a')
@raises_exception(MappingError, "Too many columns were specified for Entity1.id")
def test_columns5(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int, columns=['a', 'b'])
db.generate_mapping(create_tables=True)
@raises_exception(TypeError, "Parameter 'columns' must be a list. Got: set(['a'])'")
def test_columns6(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int, columns=set(['a']))
db.generate_mapping(create_tables=True)
@raises_exception(TypeError, "Parameter 'column' must be a string. Got: 4")
def test_columns7(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
id = PrimaryKey(int, column=4)
db.generate_mapping(create_tables=True)
def test_columns8(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = Required(int)
b = Required(int)
attr1 = Optional('Entity2')
PrimaryKey(a, b)
class Entity2(db.Entity):
attr2 = Required(Entity1, columns=['x', 'y'])
self.assertEqual(Entity2.attr2.column, None)
self.assertEqual(Entity2.attr2.columns, ['x', 'y'])
@raises_exception(MappingError, 'Invalid number of columns specified for Entity2.attr2')
def test_columns9(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = Required(int)
b = Required(int)
attr1 = Optional('Entity2')
PrimaryKey(a, b)
class Entity2(db.Entity):
attr2 = Required(Entity1, columns=['x', 'y', 'z'])
db.generate_mapping(create_tables=True)
@raises_exception(MappingError, 'Invalid number of columns specified for Entity2.attr2')
def test_columns10(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = Required(int)
b = Required(int)
attr1 = Optional('Entity2')
PrimaryKey(a, b)
class Entity2(db.Entity):
attr2 = Required(Entity1, column='x')
db.generate_mapping(create_tables=True)
@raises_exception(TypeError, "Items of parameter 'columns' must be strings. Got: [1, 2]")
def test_columns11(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = Required(int)
b = Required(int)
attr1 = Optional('Entity2')
PrimaryKey(a, b)
class Entity2(db.Entity):
attr2 = Required(Entity1, columns=[1, 2])
def test_columns12(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
attr1 = Set('Entity1', reverse='attr1', column='column1', reverse_column='column2', reverse_columns=['column2'])
db.generate_mapping(create_tables=True)
@raises_exception(TypeError, "Parameters 'reverse_column' and 'reverse_columns' cannot be specified simultaneously")
def test_columns13(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
attr1 = Set('Entity1', reverse='attr1', column='column1', reverse_column='column2', reverse_columns=['column3'])
db.generate_mapping(create_tables=True)
@raises_exception(TypeError, "Parameter 'reverse_column' must be a string. Got: 5")
def test_columns14(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
attr1 = Set('Entity1', reverse='attr1', column='column1', reverse_column=5)
db.generate_mapping(create_tables=True)
@raises_exception(TypeError, "Parameter 'reverse_columns' must be a list. Got: 'column3'")
def test_columns15(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
attr1 = Set('Entity1', reverse='attr1', column='column1', reverse_columns='column3')
db.generate_mapping(create_tables=True)
@raises_exception(TypeError, "Parameter 'reverse_columns' must be a list of strings. Got: [5]")
def test_columns16(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
attr1 = Set('Entity1', reverse='attr1', column='column1', reverse_columns=[5])
db.generate_mapping(create_tables=True)
def test_columns17(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
attr1 = Set('Entity1', reverse='attr1', column='column1', reverse_columns=['column2'])
db.generate_mapping(create_tables=True)
def test_columns18(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
attr1 = Set('Entity1', reverse='attr1', table='T1')
db.generate_mapping(create_tables=True)
@raises_exception(TypeError, "Parameter 'table' must be a string. Got: 5")
def test_columns19(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
attr1 = Set('Entity1', reverse='attr1', table=5)
db.generate_mapping(create_tables=True)
@raises_exception(TypeError, "Each part of table name must be a string. Got: 1")
def test_columns20(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
attr1 = Set('Entity1', reverse='attr1', table=[1, 'T1'])
db.generate_mapping(create_tables=True)
def test_columns21(self):
db = TestDatabase('sqlite', ':memory:')
class Entity1(db.Entity):
attr1 = Set('Entity1', reverse='attr1', table=['db1', 'T1'])
db.generate_mapping(create_tables=True)
def test_nullable1(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = Optional(unicode, unique=True)
db.generate_mapping(create_tables=True)
self.assertEqual(Entity1.a.nullable, True)
def test_nullable2(self):
db = Database('sqlite', ':memory:')
class Entity1(db.Entity):
a = Optional(unicode, unique=True)
db.generate_mapping(create_tables=True)
with db_session:
Entity1()
commit()
Entity1()
commit()
self.assert_(True)
if __name__ == '__main__':
unittest.main()
| 10,541 | 4,022 | 23 |
226b7fd363e77aaf3601b6f6b3b8225f927a009c | 15,266 | py | Python | apysc/_expression/expression_data_util.py | ynsnf/apysc | b10ffaf76ec6beb187477d0a744fca00e3efc3fb | [
"MIT"
] | null | null | null | apysc/_expression/expression_data_util.py | ynsnf/apysc | b10ffaf76ec6beb187477d0a744fca00e3efc3fb | [
"MIT"
] | null | null | null | apysc/_expression/expression_data_util.py | ynsnf/apysc | b10ffaf76ec6beb187477d0a744fca00e3efc3fb | [
"MIT"
] | null | null | null | """The implementation of manipulating HTL and js expression files.
Mainly following interfaces are defined:
- empty_expression : Empty the current js expression data.
- append_js_expression : Append js expression.
- get_current_expression : Get current expression string.
- get_current_event_handler_scope_expression : Get a current
event handler scope's expression string.
- exec_query : Execute a SQLite sql query.
"""
import sqlite3
from enum import Enum
from typing import Any
from typing import Callable
from typing import List
from typing import Optional
from typing import Tuple
from typing import TypeVar
_SQLITE_IN_MEMORY_SETTING: str = 'file::memory:?cache=shared'
connection = sqlite3.connect(_SQLITE_IN_MEMORY_SETTING, uri=True)
cursor = connection.cursor()
_C = TypeVar('_C', bound=Callable)
def _check_connection(func: _C) -> _C:
"""
The decorator function to check a SQLite connection when a
specified function calling, and if failed, create a new
connection and recall a function.
Parameters
----------
func : Callable
Target function to decorate.
Returns
-------
new_func : Callable
Decorated function.
"""
def new_func(*args: Any, **kwargs: Any) -> Any:
"""
Function for the decoration.
Parameters
----------
*args : list
Any positional arguments.
**kwargs : dict
Any keyword arguments.
Returns
-------
result : Any
Any returned value.
"""
global connection, cursor
try:
result: Any = func(*args, **kwargs)
except Exception:
connection = sqlite3.connect(_SQLITE_IN_MEMORY_SETTING, uri=True)
cursor = connection.cursor()
result = func(*args, **kwargs)
return result
return new_func # type: ignore
@_check_connection
def _table_exists(*, table_name: TableName) -> bool:
"""
Get a boolean value whether a specified table exists or not.
Parameters
----------
table_name : TableName
Target table name.
Returns
-------
result : bool
If exists, returns True.
"""
query: str = (
'SELECT name FROM sqlite_master WHERE type = "table" '
f'AND name = "{table_name.value}" LIMIT 1;'
)
cursor.execute(query)
result: Optional[Tuple] = cursor.fetchone()
connection.commit()
if result:
return True
return False
def _make_create_table_query(
*, table_name: TableName,
column_ddl: str) -> str:
"""
Make a create table sql query.
Parameters
----------
table_name : str
Target table name.
column_ddl : str
Target table columns DDL string.
e.g., ' id INTEGER, ...'
Returns
-------
query : str
A create table sql query.
"""
query: str = (
'CREATE TABLE IF NOT EXISTS '
f'{table_name.value} ('
f'\n{column_ddl}'
'\n);'
)
return query
_EXPRESSION_TABLE_COLUMN_DDL: str = (
' id INTEGER PRIMARY KEY AUTOINCREMENT,'
'\n txt TEXT NOT NULL'
)
@_check_connection
def _create_expression_normal_table() -> None:
"""
Create the normal expression data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.EXPRESSION_NORMAL,
column_ddl=_EXPRESSION_TABLE_COLUMN_DDL)
cursor.execute(query)
@_check_connection
def _create_expression_handler_table() -> None:
"""
Create the handler expression data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.EXPRESSION_HANDLER,
column_ddl=_EXPRESSION_TABLE_COLUMN_DDL)
cursor.execute(query)
_INDENT_NUM_TABLE_COLUMN_DDL: str = ' num INTEGER NOT NULL'
@_check_connection
def _create_indent_num_normal_table() -> None:
"""
Create the normal indentation number data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.INDENT_NUM_NORMAL,
column_ddl=_INDENT_NUM_TABLE_COLUMN_DDL)
cursor.execute(query)
@_check_connection
def _create_indent_num_handler_table() -> None:
"""
Create the handler indentation number data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.INDENT_NUM_HANDLER,
column_ddl=_INDENT_NUM_TABLE_COLUMN_DDL)
cursor.execute(query)
@_check_connection
def _create_last_scope_table() -> None:
"""
Create the last scope data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.LAST_SCOPE,
column_ddl=(
' last_scope INTEGER NOT NULL'
))
cursor.execute(query)
@_check_connection
def _create_event_handler_scope_count_table() -> None:
"""
Create the event handler scope count value SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.EVENT_HANDLER_SCOPE_COUNT,
column_ddl=(
' count INTEGER NOT NULL'
))
cursor.execute(query)
@_check_connection
def _create_loop_count_table() -> None:
"""
Create the loop count value SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.LOOP_COUNT,
column_ddl=(
' count INTEGER NOT NULL'
))
cursor.execute(query)
@_check_connection
def _create_debug_mode_setting_table() -> None:
"""
Create the debug mode setting SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.DEBUG_MODE_SETTING,
column_ddl=(
' is_debug_mode INTEGER NOT NULL'
))
cursor.execute(query)
@_check_connection
def _create_debug_mode_callable_count_table() -> None:
"""
Create the debug mode callable count data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.DEBUG_MODE_CALLABLE_COUNT,
column_ddl=(
' id INTEGER PRIMARY KEY AUTOINCREMENT,'
'\n name TEXT NOT NULL,'
'\n count INTEGER NOT NULL'
))
cursor.execute(query)
@_check_connection
def _create_stage_elem_id_table() -> None:
"""
Create the stage element id data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.STAGE_ELEM_ID,
column_ddl=' elem_id TEXT NOT NULL')
cursor.execute(query)
@_check_connection
def _create_variable_name_count_table() -> None:
"""
Create the variable name count data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.VARIABLE_NAME_COUNT,
column_ddl=(
' id INTEGER PRIMARY KEY AUTOINCREMENT,'
'\n type_name TEXT NOT NULL,'
'\n count INTEGER NOT NULL'
))
cursor.execute(query)
@_check_connection
def _create_handler_calling_stack_table() -> None:
"""
Create the handler calling stack data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.HANDLER_CALLING_STACK,
column_ddl=(
' id INTEGER PRIMARY KEY AUTOINCREMENT,'
'\n handler_name TEXT NOT NULL,'
'\n scope_count INTEGER NOT NULL,'
'\n variable_name TEXT NOT NULL'
))
cursor.execute(query)
@_check_connection
def _create_circular_calling_handler_name_table() -> None:
"""
Create the circular calling handler names data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.CIRCULAR_CALLING_HANDLER_NAME,
column_ddl=(
' id INTEGER PRIMARY KEY AUTOINCREMENT,'
'\n handler_name TEXT NOT NULL,'
'\n prev_handler_name TEXT NOT NULL,'
'\n prev_variable_name TEXT NOT NULL'
))
cursor.execute(query)
@_check_connection
def _create_stage_id_table() -> None:
"""
Create the stage id data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.STAGE_ID,
column_ddl=' stage_id INTEGER NOT NULL')
cursor.execute(query)
def initialize_sqlite_tables_if_not_initialized() -> bool:
"""
Initialize the sqlite tables if they have not been
initialized yet.
Returns
-------
initialized : bool
If initialized, returns True.
"""
table_exists: bool = _table_exists(
table_name=TableName.EXPRESSION_NORMAL)
if table_exists:
return False
_create_expression_normal_table()
_create_expression_handler_table()
_create_indent_num_normal_table()
_create_indent_num_handler_table()
_create_last_scope_table()
_create_event_handler_scope_count_table()
_create_loop_count_table()
_create_debug_mode_setting_table()
_create_debug_mode_callable_count_table()
_create_stage_elem_id_table()
_create_variable_name_count_table()
_create_handler_calling_stack_table()
_create_circular_calling_handler_name_table()
_create_stage_id_table()
return True
def empty_expression() -> None:
"""
Empty the current js expression data.
"""
initialize_sqlite_tables_if_not_initialized()
for table_name in TableName:
if table_name == TableName.NOT_EXISTING:
continue
query: str = f'DELETE FROM {table_name.value};'
cursor.execute(query)
connection.commit()
def append_js_expression(expression: str) -> None:
"""
Append js expression.
Parameters
----------
expression : str
JavaScript Expression string.
References
----------
- append_js_expression interface document
- https://simon-ritchie.github.io/apysc/append_js_expression.html
Examples
--------
>>> import apysc as ap
>>> ap.append_js_expression(expression='console.log("Hello!")')
"""
from apysc._expression import indent_num
from apysc._expression import last_scope
from apysc._string import indent_util
initialize_sqlite_tables_if_not_initialized()
current_indent_num: int = indent_num.get_current_indent_num()
expression = indent_util.append_spaces_to_expression(
expression=expression, indent_num=current_indent_num)
expression = expression.replace('"', '""')
table_name: TableName = _get_expression_table_name()
query: str = (
f'INSERT INTO {table_name.value}(txt) '
f'VALUES ("{expression}");'
)
cursor.execute(query)
connection.commit()
last_scope.set_last_scope(value=last_scope.LastScope.NORMAL)
def _get_expression_table_name() -> TableName:
"""
Get a expression table name. This value will be switched whether
current scope is event handler's one or not.
Returns
-------
table_name : str
Target expression table name.
"""
from apysc._expression import event_handler_scope
event_handler_scope_count: int = \
event_handler_scope.get_current_event_handler_scope_count()
if event_handler_scope_count == 0:
return TableName.EXPRESSION_NORMAL
return TableName.EXPRESSION_HANDLER
def get_current_expression() -> str:
"""
Get a current expression's string.
Notes
-----
If it is necessary to get event handler scope's expression,
then use get_current_event_handler_scope_expression function
instead.
Returns
-------
current_expression : str
Current expression's string.
"""
current_expression: str = _get_current_expression(
table_name=TableName.EXPRESSION_NORMAL)
return current_expression
def get_current_event_handler_scope_expression() -> str:
"""
Get a current event handler scope's expression string.
Notes
-----
If it is necessary to get normal scope's expression, then use
get_current_expression function instead.
Returns
-------
current_expression : str
Current expression's string.
"""
current_expression: str = _get_current_expression(
table_name=TableName.EXPRESSION_HANDLER)
return current_expression
def _get_current_expression(*, table_name: TableName) -> str:
"""
Get a current expression string from a specified table.
Parameters
----------
table_name : TableName
Target table name.
Returns
-------
current_expression : str
Current expression string.
"""
initialize_sqlite_tables_if_not_initialized()
query: str = (
f'SELECT txt FROM {table_name.value}')
cursor.execute(query)
result: List[Tuple[str]] = cursor.fetchall()
if not result:
return ''
expressions: List[str] = [tpl[0] for tpl in result]
current_expression = '\n'.join(expressions)
return current_expression
def _validate_limit_clause(*, sql: str) -> None:
"""
Validate whether a LIMIT clause is used in a UPDATE or DELETE sql.
Parameters
----------
sql : str
Target sql.
Raises
------
_LimitClauseCantUseError
If the LIMIT clause used in a DELETE or UPDATE sql.
"""
sql_: str = sql.lower()
if 'delete ' not in sql_ and 'update ' not in sql_:
return
if 'limit ' not in sql_:
return
raise _LimitClauseCantUseError(
f'LIMIT clause cannot use in the UPDATE or DELETE sql: {sql_}')
def exec_query(*, sql: str, commit: bool = True) -> None:
"""
Execute a SQLite sql query.
Parameters
----------
sql : str
Target sql.
commit : bool, default True
A boolean value whether commit the transaction after the
sql query or not.
Raises
------
_LimitClauseCantUseError
If the LIMIT clause used in a DELETE or UPDATE sql.
"""
_validate_limit_clause(sql=sql)
initialize_sqlite_tables_if_not_initialized()
cursor.execute(sql)
if commit:
connection.commit()
| 27.756364 | 78 | 0.641229 | """The implementation of manipulating HTL and js expression files.
Mainly following interfaces are defined:
- empty_expression : Empty the current js expression data.
- append_js_expression : Append js expression.
- get_current_expression : Get current expression string.
- get_current_event_handler_scope_expression : Get a current
event handler scope's expression string.
- exec_query : Execute a SQLite sql query.
"""
import sqlite3
from enum import Enum
from typing import Any
from typing import Callable
from typing import List
from typing import Optional
from typing import Tuple
from typing import TypeVar
class TableName(Enum):
NOT_EXISTING = 'not_existing'
EXPRESSION_NORMAL = 'expression_normal'
EXPRESSION_HANDLER = 'expression_handler'
INDENT_NUM_NORMAL = 'indent_num_normal'
INDENT_NUM_HANDLER = 'indent_num_handler'
LAST_SCOPE = 'last_scope'
EVENT_HANDLER_SCOPE_COUNT = 'event_handler_scope_count'
LOOP_COUNT = 'loop_count'
DEBUG_MODE_SETTING = 'debug_mode_setting'
DEBUG_MODE_CALLABLE_COUNT = 'debug_mode_callable_count'
STAGE_ELEM_ID = 'stage_elem_id'
VARIABLE_NAME_COUNT = 'variable_name_count'
HANDLER_CALLING_STACK = 'handler_calling_stack'
CIRCULAR_CALLING_HANDLER_NAME = 'circular_calling_handler_name'
STAGE_ID = 'stage_id'
_SQLITE_IN_MEMORY_SETTING: str = 'file::memory:?cache=shared'
connection = sqlite3.connect(_SQLITE_IN_MEMORY_SETTING, uri=True)
cursor = connection.cursor()
_C = TypeVar('_C', bound=Callable)
def _check_connection(func: _C) -> _C:
"""
The decorator function to check a SQLite connection when a
specified function calling, and if failed, create a new
connection and recall a function.
Parameters
----------
func : Callable
Target function to decorate.
Returns
-------
new_func : Callable
Decorated function.
"""
def new_func(*args: Any, **kwargs: Any) -> Any:
"""
Function for the decoration.
Parameters
----------
*args : list
Any positional arguments.
**kwargs : dict
Any keyword arguments.
Returns
-------
result : Any
Any returned value.
"""
global connection, cursor
try:
result: Any = func(*args, **kwargs)
except Exception:
connection = sqlite3.connect(_SQLITE_IN_MEMORY_SETTING, uri=True)
cursor = connection.cursor()
result = func(*args, **kwargs)
return result
return new_func # type: ignore
@_check_connection
def _table_exists(*, table_name: TableName) -> bool:
"""
Get a boolean value whether a specified table exists or not.
Parameters
----------
table_name : TableName
Target table name.
Returns
-------
result : bool
If exists, returns True.
"""
query: str = (
'SELECT name FROM sqlite_master WHERE type = "table" '
f'AND name = "{table_name.value}" LIMIT 1;'
)
cursor.execute(query)
result: Optional[Tuple] = cursor.fetchone()
connection.commit()
if result:
return True
return False
def _make_create_table_query(
*, table_name: TableName,
column_ddl: str) -> str:
"""
Make a create table sql query.
Parameters
----------
table_name : str
Target table name.
column_ddl : str
Target table columns DDL string.
e.g., ' id INTEGER, ...'
Returns
-------
query : str
A create table sql query.
"""
query: str = (
'CREATE TABLE IF NOT EXISTS '
f'{table_name.value} ('
f'\n{column_ddl}'
'\n);'
)
return query
_EXPRESSION_TABLE_COLUMN_DDL: str = (
' id INTEGER PRIMARY KEY AUTOINCREMENT,'
'\n txt TEXT NOT NULL'
)
@_check_connection
def _create_expression_normal_table() -> None:
"""
Create the normal expression data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.EXPRESSION_NORMAL,
column_ddl=_EXPRESSION_TABLE_COLUMN_DDL)
cursor.execute(query)
@_check_connection
def _create_expression_handler_table() -> None:
"""
Create the handler expression data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.EXPRESSION_HANDLER,
column_ddl=_EXPRESSION_TABLE_COLUMN_DDL)
cursor.execute(query)
_INDENT_NUM_TABLE_COLUMN_DDL: str = ' num INTEGER NOT NULL'
@_check_connection
def _create_indent_num_normal_table() -> None:
"""
Create the normal indentation number data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.INDENT_NUM_NORMAL,
column_ddl=_INDENT_NUM_TABLE_COLUMN_DDL)
cursor.execute(query)
@_check_connection
def _create_indent_num_handler_table() -> None:
"""
Create the handler indentation number data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.INDENT_NUM_HANDLER,
column_ddl=_INDENT_NUM_TABLE_COLUMN_DDL)
cursor.execute(query)
@_check_connection
def _create_last_scope_table() -> None:
"""
Create the last scope data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.LAST_SCOPE,
column_ddl=(
' last_scope INTEGER NOT NULL'
))
cursor.execute(query)
@_check_connection
def _create_event_handler_scope_count_table() -> None:
"""
Create the event handler scope count value SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.EVENT_HANDLER_SCOPE_COUNT,
column_ddl=(
' count INTEGER NOT NULL'
))
cursor.execute(query)
@_check_connection
def _create_loop_count_table() -> None:
"""
Create the loop count value SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.LOOP_COUNT,
column_ddl=(
' count INTEGER NOT NULL'
))
cursor.execute(query)
@_check_connection
def _create_debug_mode_setting_table() -> None:
"""
Create the debug mode setting SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.DEBUG_MODE_SETTING,
column_ddl=(
' is_debug_mode INTEGER NOT NULL'
))
cursor.execute(query)
@_check_connection
def _create_debug_mode_callable_count_table() -> None:
"""
Create the debug mode callable count data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.DEBUG_MODE_CALLABLE_COUNT,
column_ddl=(
' id INTEGER PRIMARY KEY AUTOINCREMENT,'
'\n name TEXT NOT NULL,'
'\n count INTEGER NOT NULL'
))
cursor.execute(query)
@_check_connection
def _create_stage_elem_id_table() -> None:
"""
Create the stage element id data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.STAGE_ELEM_ID,
column_ddl=' elem_id TEXT NOT NULL')
cursor.execute(query)
@_check_connection
def _create_variable_name_count_table() -> None:
"""
Create the variable name count data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.VARIABLE_NAME_COUNT,
column_ddl=(
' id INTEGER PRIMARY KEY AUTOINCREMENT,'
'\n type_name TEXT NOT NULL,'
'\n count INTEGER NOT NULL'
))
cursor.execute(query)
@_check_connection
def _create_handler_calling_stack_table() -> None:
"""
Create the handler calling stack data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.HANDLER_CALLING_STACK,
column_ddl=(
' id INTEGER PRIMARY KEY AUTOINCREMENT,'
'\n handler_name TEXT NOT NULL,'
'\n scope_count INTEGER NOT NULL,'
'\n variable_name TEXT NOT NULL'
))
cursor.execute(query)
@_check_connection
def _create_circular_calling_handler_name_table() -> None:
"""
Create the circular calling handler names data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.CIRCULAR_CALLING_HANDLER_NAME,
column_ddl=(
' id INTEGER PRIMARY KEY AUTOINCREMENT,'
'\n handler_name TEXT NOT NULL,'
'\n prev_handler_name TEXT NOT NULL,'
'\n prev_variable_name TEXT NOT NULL'
))
cursor.execute(query)
@_check_connection
def _create_stage_id_table() -> None:
"""
Create the stage id data SQLite table.
"""
query: str = _make_create_table_query(
table_name=TableName.STAGE_ID,
column_ddl=' stage_id INTEGER NOT NULL')
cursor.execute(query)
def initialize_sqlite_tables_if_not_initialized() -> bool:
"""
Initialize the sqlite tables if they have not been
initialized yet.
Returns
-------
initialized : bool
If initialized, returns True.
"""
table_exists: bool = _table_exists(
table_name=TableName.EXPRESSION_NORMAL)
if table_exists:
return False
_create_expression_normal_table()
_create_expression_handler_table()
_create_indent_num_normal_table()
_create_indent_num_handler_table()
_create_last_scope_table()
_create_event_handler_scope_count_table()
_create_loop_count_table()
_create_debug_mode_setting_table()
_create_debug_mode_callable_count_table()
_create_stage_elem_id_table()
_create_variable_name_count_table()
_create_handler_calling_stack_table()
_create_circular_calling_handler_name_table()
_create_stage_id_table()
return True
def empty_expression() -> None:
"""
Empty the current js expression data.
"""
initialize_sqlite_tables_if_not_initialized()
for table_name in TableName:
if table_name == TableName.NOT_EXISTING:
continue
query: str = f'DELETE FROM {table_name.value};'
cursor.execute(query)
connection.commit()
def append_js_expression(expression: str) -> None:
"""
Append js expression.
Parameters
----------
expression : str
JavaScript Expression string.
References
----------
- append_js_expression interface document
- https://simon-ritchie.github.io/apysc/append_js_expression.html
Examples
--------
>>> import apysc as ap
>>> ap.append_js_expression(expression='console.log("Hello!")')
"""
from apysc._expression import indent_num
from apysc._expression import last_scope
from apysc._string import indent_util
initialize_sqlite_tables_if_not_initialized()
current_indent_num: int = indent_num.get_current_indent_num()
expression = indent_util.append_spaces_to_expression(
expression=expression, indent_num=current_indent_num)
expression = expression.replace('"', '""')
table_name: TableName = _get_expression_table_name()
query: str = (
f'INSERT INTO {table_name.value}(txt) '
f'VALUES ("{expression}");'
)
cursor.execute(query)
connection.commit()
last_scope.set_last_scope(value=last_scope.LastScope.NORMAL)
def _get_expression_table_name() -> TableName:
"""
Get a expression table name. This value will be switched whether
current scope is event handler's one or not.
Returns
-------
table_name : str
Target expression table name.
"""
from apysc._expression import event_handler_scope
event_handler_scope_count: int = \
event_handler_scope.get_current_event_handler_scope_count()
if event_handler_scope_count == 0:
return TableName.EXPRESSION_NORMAL
return TableName.EXPRESSION_HANDLER
def get_current_expression() -> str:
"""
Get a current expression's string.
Notes
-----
If it is necessary to get event handler scope's expression,
then use get_current_event_handler_scope_expression function
instead.
Returns
-------
current_expression : str
Current expression's string.
"""
current_expression: str = _get_current_expression(
table_name=TableName.EXPRESSION_NORMAL)
return current_expression
def get_current_event_handler_scope_expression() -> str:
"""
Get a current event handler scope's expression string.
Notes
-----
If it is necessary to get normal scope's expression, then use
get_current_expression function instead.
Returns
-------
current_expression : str
Current expression's string.
"""
current_expression: str = _get_current_expression(
table_name=TableName.EXPRESSION_HANDLER)
return current_expression
def _get_current_expression(*, table_name: TableName) -> str:
"""
Get a current expression string from a specified table.
Parameters
----------
table_name : TableName
Target table name.
Returns
-------
current_expression : str
Current expression string.
"""
initialize_sqlite_tables_if_not_initialized()
query: str = (
f'SELECT txt FROM {table_name.value}')
cursor.execute(query)
result: List[Tuple[str]] = cursor.fetchall()
if not result:
return ''
expressions: List[str] = [tpl[0] for tpl in result]
current_expression = '\n'.join(expressions)
return current_expression
class _LimitClauseCantUseError(Exception):
...
def _validate_limit_clause(*, sql: str) -> None:
"""
Validate whether a LIMIT clause is used in a UPDATE or DELETE sql.
Parameters
----------
sql : str
Target sql.
Raises
------
_LimitClauseCantUseError
If the LIMIT clause used in a DELETE or UPDATE sql.
"""
sql_: str = sql.lower()
if 'delete ' not in sql_ and 'update ' not in sql_:
return
if 'limit ' not in sql_:
return
raise _LimitClauseCantUseError(
f'LIMIT clause cannot use in the UPDATE or DELETE sql: {sql_}')
def exec_query(*, sql: str, commit: bool = True) -> None:
"""
Execute a SQLite sql query.
Parameters
----------
sql : str
Target sql.
commit : bool, default True
A boolean value whether commit the transaction after the
sql query or not.
Raises
------
_LimitClauseCantUseError
If the LIMIT clause used in a DELETE or UPDATE sql.
"""
_validate_limit_clause(sql=sql)
initialize_sqlite_tables_if_not_initialized()
cursor.execute(sql)
if commit:
connection.commit()
| 0 | 716 | 50 |
a69486b0f500679b3b3fd3b0ed3d7f34e8f9eb69 | 257 | py | Python | flask-app/utils/settings.py | Mongosaurusrex/kibabot | 065bc83e5c4ce039af86189a4bca191050266c46 | [
"MIT"
] | null | null | null | flask-app/utils/settings.py | Mongosaurusrex/kibabot | 065bc83e5c4ce039af86189a4bca191050266c46 | [
"MIT"
] | null | null | null | flask-app/utils/settings.py | Mongosaurusrex/kibabot | 065bc83e5c4ce039af86189a4bca191050266c46 | [
"MIT"
] | null | null | null | from os import environ
from dotenv import load_dotenv
from pathlib import Path
env_path = Path('../.env')
load_dotenv(dotenv_path=env_path)
MONGODB_CONNECTION_STRING = environ.get('MONGODB_CONNECTION_STRING')
JWT_SECRET_KEY = environ.get('JWT_SECRET_KEY') | 28.555556 | 68 | 0.817121 | from os import environ
from dotenv import load_dotenv
from pathlib import Path
env_path = Path('../.env')
load_dotenv(dotenv_path=env_path)
MONGODB_CONNECTION_STRING = environ.get('MONGODB_CONNECTION_STRING')
JWT_SECRET_KEY = environ.get('JWT_SECRET_KEY') | 0 | 0 | 0 |
db993994d2354879355f0bcbdfd78bc9b8bfae8d | 922 | py | Python | opensanctions/crawlers/wd_curated.py | opensanctions/opensanctions | 7dff9597f982d8918699b2cde3c7c337a941622d | [
"MIT"
] | 23 | 2022-02-09T12:50:36.000Z | 2022-03-30T16:04:19.000Z | opensanctions/crawlers/wd_curated.py | pudo/pepmint | 5ef82f63797f40c9c9848dc2872bd9642c9e666a | [
"MIT"
] | 10 | 2022-02-03T08:44:03.000Z | 2022-03-21T15:27:40.000Z | opensanctions/crawlers/wd_curated.py | pudo/pepmint | 5ef82f63797f40c9c9848dc2872bd9642c9e666a | [
"MIT"
] | 2 | 2022-02-16T11:51:05.000Z | 2022-03-02T16:55:08.000Z | import csv
from typing import Dict
from pantomime.types import CSV
from nomenklatura.util import is_qid
from opensanctions.core import Context
| 29.741935 | 73 | 0.656182 | import csv
from typing import Dict
from pantomime.types import CSV
from nomenklatura.util import is_qid
from opensanctions.core import Context
def crawl_row(context: Context, row: Dict[str, str]):
qid = row.get("qid", "").strip()
if not len(qid):
return
if not is_qid(qid):
context.log.warning("No valid QID", qid=qid)
return
schema = row.get("schema") or "Person"
entity = context.make(schema)
entity.id = qid
topics = [t.strip() for t in row.get("topics", "").split(";")]
topics = [t for t in topics if len(t)]
entity.add("topics", topics)
context.emit(entity, target=True)
def crawl(context: Context):
path = context.fetch_resource("source.csv", context.dataset.data.url)
context.export_resource(path, CSV, title=context.SOURCE_TITLE)
with open(path, "r") as fh:
for row in csv.DictReader(fh):
crawl_row(context, row)
| 730 | 0 | 46 |
11e4f28698d042ea048103df1d79413719a44f57 | 4,187 | py | Python | modules/translate.py | asl97/phenny | 0b9901aa2ec221b82be367551040626607196f56 | [
"EFL-2.0"
] | null | null | null | modules/translate.py | asl97/phenny | 0b9901aa2ec221b82be367551040626607196f56 | [
"EFL-2.0"
] | null | null | null | modules/translate.py | asl97/phenny | 0b9901aa2ec221b82be367551040626607196f56 | [
"EFL-2.0"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
"""
translate.py - Phenny Translation Module
Copyright 2008, Sean B. Palmer, inamidst.com
Licensed under the Eiffel Forum License 2.
http://inamidst.com/phenny/
"""
import re, urllib
import web
def tr(phenny, context):
"""Translates a phrase, with an optional language hint."""
input, output, phrase = context.groups()
phrase = phrase.encode('utf-8')
if (len(phrase) > 350) and (not context.admin):
return phenny.reply('Phrase must be under 350 characters.')
input = input or 'auto'
input = input.encode('utf-8')
output = (output or 'en').encode('utf-8')
if input != output:
msg, input = translate(phrase, input, output)
if isinstance(msg, str):
msg = msg.decode('utf-8')
if msg:
msg = web.decode(msg) # msg.replace(''', "'")
msg = '"%s" (%s to %s, translate.google.com)' % (msg, input, output)
else: msg = 'The %s to %s translation failed, sorry!' % (input, output)
phenny.reply(msg)
else: phenny.reply('Language guessing failed, so try suggesting one!')
tr.rule = ('$nick', ur'(?:([a-z]{2}) +)?(?:([a-z]{2}|en-raw) +)?["“](.+?)["”]\? *$')
tr.example = '$nickname: "mon chien"? or $nickname: fr "mon chien"?'
tr.priority = 'low'
def tr2(phenny, input):
"""Translates a phrase, with an optional language hint."""
command = input.group(2)
if not command:
return phenny.reply("Need something to translate!")
command = command.encode('utf-8')
args = ['auto', 'en']
for i in xrange(2):
if not ' ' in command: break
prefix, cmd = command.split(' ', 1)
if langcode(prefix):
args[i] = prefix[1:]
command = cmd
phrase = command
# if (len(phrase) > 350) and (not input.admin):
# return phenny.reply('Phrase must be under 350 characters.')
src, dest = args
if src != dest:
msg, src = translate(phrase, src, dest)
if isinstance(msg, str):
msg = msg.decode('utf-8')
if msg:
msg = web.decode(msg) # msg.replace(''', "'")
if len(msg) > 450: msg = msg[:450] + '[...]'
msg = '"%s" (%s to %s, translate.google.com)' % (msg, src, dest)
else: msg = 'The %s to %s translation failed, sorry!' % (src, dest)
phenny.reply(msg)
else: phenny.reply('Language guessing failed, so try suggesting one!')
tr2.commands = ['tr']
tr2.priority = 'low'
mangle.commands = ['mangle']
if __name__ == '__main__':
print __doc__.strip()
| 28.678082 | 84 | 0.577979 | #!/usr/bin/env python
# coding=utf-8
"""
translate.py - Phenny Translation Module
Copyright 2008, Sean B. Palmer, inamidst.com
Licensed under the Eiffel Forum License 2.
http://inamidst.com/phenny/
"""
import re, urllib
import web
def translate(text, input='auto', output='en'):
raw = False
if output.endswith('-raw'):
output = output[:-4]
raw = True
import urllib2, json
opener = urllib2.build_opener()
opener.addheaders = [(
'User-Agent', 'Mozilla/5.0' +
'(X11; U; Linux i686)' +
'Gecko/20071127 Firefox/2.0.0.11'
)]
input, output = urllib.quote(input), urllib.quote(output)
text = urllib.quote(text)
result = opener.open('http://translate.google.com/translate_a/t?' +
('client=t&hl=en&sl=%s&tl=%s&multires=1' % (input, output)) +
('&otf=1&ssel=0&tsel=0&uptl=en&sc=1&text=%s' % text)).read()
while ',,' in result:
result = result.replace(',,', ',null,')
data = json.loads(result)
if raw:
return str(data), 'en-raw'
try: language = data[2] # -2][0][0]
except: language = '?'
return ''.join(x[0] for x in data[0]), language
def tr(phenny, context):
"""Translates a phrase, with an optional language hint."""
input, output, phrase = context.groups()
phrase = phrase.encode('utf-8')
if (len(phrase) > 350) and (not context.admin):
return phenny.reply('Phrase must be under 350 characters.')
input = input or 'auto'
input = input.encode('utf-8')
output = (output or 'en').encode('utf-8')
if input != output:
msg, input = translate(phrase, input, output)
if isinstance(msg, str):
msg = msg.decode('utf-8')
if msg:
msg = web.decode(msg) # msg.replace(''', "'")
msg = '"%s" (%s to %s, translate.google.com)' % (msg, input, output)
else: msg = 'The %s to %s translation failed, sorry!' % (input, output)
phenny.reply(msg)
else: phenny.reply('Language guessing failed, so try suggesting one!')
tr.rule = ('$nick', ur'(?:([a-z]{2}) +)?(?:([a-z]{2}|en-raw) +)?["“](.+?)["”]\? *$')
tr.example = '$nickname: "mon chien"? or $nickname: fr "mon chien"?'
tr.priority = 'low'
def tr2(phenny, input):
"""Translates a phrase, with an optional language hint."""
command = input.group(2)
if not command:
return phenny.reply("Need something to translate!")
command = command.encode('utf-8')
def langcode(p):
return p.startswith(':') and (2 < len(p) < 10) and p[1:].isalpha()
args = ['auto', 'en']
for i in xrange(2):
if not ' ' in command: break
prefix, cmd = command.split(' ', 1)
if langcode(prefix):
args[i] = prefix[1:]
command = cmd
phrase = command
# if (len(phrase) > 350) and (not input.admin):
# return phenny.reply('Phrase must be under 350 characters.')
src, dest = args
if src != dest:
msg, src = translate(phrase, src, dest)
if isinstance(msg, str):
msg = msg.decode('utf-8')
if msg:
msg = web.decode(msg) # msg.replace(''', "'")
if len(msg) > 450: msg = msg[:450] + '[...]'
msg = '"%s" (%s to %s, translate.google.com)' % (msg, src, dest)
else: msg = 'The %s to %s translation failed, sorry!' % (src, dest)
phenny.reply(msg)
else: phenny.reply('Language guessing failed, so try suggesting one!')
tr2.commands = ['tr']
tr2.priority = 'low'
def mangle(phenny, input):
import time
phrase = input.group(2).encode('utf-8')
for lang in ['fr', 'de', 'es', 'it', 'ja']:
backup = phrase[:]
phrase, _lang = translate(phrase, 'en', lang)
phrase = phrase.encode("utf-8")
if not phrase:
phrase = backup[:]
break
time.sleep(0.25)
backup = phrase[:]
phrase, _lang = translate(phrase, lang, 'en')
phrase = phrase.encode("utf-8")
if not phrase:
phrase = backup[:]
break
time.sleep(0.25)
phrase = phrase.replace(' ,', ',').replace(' .', '.')
phrase = phrase.strip(' ,')
phenny.reply(phrase or 'ERRORS SRY')
mangle.commands = ['mangle']
if __name__ == '__main__':
print __doc__.strip()
| 1,604 | 0 | 72 |
9eb7897674e07d6361ca8141ff7f37895824feda | 5,503 | py | Python | tests/test_cli.py | tcolligan4/Landsat578 | c45495db5bd48015a87e44c892b4c473df3f95eb | [
"Apache-2.0"
] | 58 | 2017-04-28T09:40:12.000Z | 2022-01-05T08:48:26.000Z | tests/test_cli.py | ramsha-muzaffer/Landsat578 | a829e9ca60a232bc45aabadc533c8e4e2ef373ce | [
"Apache-2.0"
] | 30 | 2017-04-28T21:19:30.000Z | 2021-12-13T19:44:17.000Z | tests/test_cli.py | ramsha-muzaffer/Landsat578 | a829e9ca60a232bc45aabadc533c8e4e2ef373ce | [
"Apache-2.0"
] | 28 | 2017-05-25T18:24:28.000Z | 2021-09-24T19:46:24.000Z | # ===============================================================================
# Copyright 2018 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from __future__ import print_function, absolute_import
import os
import unittest
import shutil
import sys
import pkg_resources
sys.path.append(os.path.dirname(__file__).replace('tests', 'landsat'))
from landsat.landsat_cli import create_parser, main
# this causes systemexit, use only to make a new config
# def test_config_no_config_provided(self):
# args_list = ['--configuration', os.getcwd()]
# args = self.parser.parse_args(args_list)
# main(args)
# pass
# def test_metadata_creation(self):
# wrs = os.path.join(os.path.dirname(__file__).replace('tests', 'landsat'), 'wrs')
# scenes = os.path.join(os.path.dirname(__file__).replace('tests', 'landsat'), 'scenes')
#
# try:
# shutil.rmtree(wrs)
# except:
# pass
# try:
# shutil.rmtree(scenes)
# except:
# pass
#
# args_list = ['--satellite', self.sat, '--start', self.start, '--end',
# self.end, '--return-list',
# '--path', str(self.path), '--row', str(self.row)]
#
# args = self.parser.parse_args(args_list)
# main(args)
# self.assertTrue(os.path.isdir(wrs))
# self.assertTrue(os.path.isfile(os.path.join(wrs, 'wrs2_descending.shp')))
# self.assertTrue(os.path.isdir(scenes))
if __name__ == '__main__':
unittest.main()
# ===============================================================================
| 37.182432 | 104 | 0.565691 | # ===============================================================================
# Copyright 2018 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from __future__ import print_function, absolute_import
import os
import unittest
import shutil
import sys
import pkg_resources
sys.path.append(os.path.dirname(__file__).replace('tests', 'landsat'))
from landsat.landsat_cli import create_parser, main
class CommandLineTestCase(unittest.TestCase):
def setUp(self):
self.parser = create_parser()
self.lat = '45.6'
self.lon = '-107.5'
self.sat = '7'
self.path = '36'
self.row = '28'
self.start = '2007-05-01'
self.end = '2007-05-31'
self.scene_list = ['LE70360282007122EDC00', 'LE70360282007138EDC00']
self.scene_list_2 = ['LE70360292007122EDC00', 'LE70360292007138EDC00']
self.wgs_tile = pkg_resources.resource_filename('tests',
'data/wrs2_036029_WGS.shp')
self.config_scenes = ['LT50430302007131PAC01', 'LT50430302007147PAC01']
def tearDown(self):
pass
def test_latlon(self):
print('Testing valid lat lon...')
args_list = ['--satellite', self.sat, '--start', self.start, '--end',
self.end, '--return-list',
'--lat', self.lat, '--lon', self.lon]
args = self.parser.parse_args(args_list)
scenes = main(args)
self.assertEqual(scenes, self.scene_list)
def test_path_row(self):
print('Testing valid path row...')
args_list = ['--satellite', self.sat, '--start', self.start, '--end',
self.end, '--return-list',
'--path', str(self.path), '--row', str(self.row)]
args = self.parser.parse_args(args_list)
scenes = main(args)
self.assertEqual(scenes, self.scene_list)
def test_zipped(self):
print('Testing zipped...')
root = 'tests'
base = pkg_resources.resource_filename('tests', 'data/downloader_config.yml')
filepath = os.path.join(root, base)
if not os.path.isfile(filepath):
filepath = base
temp = os.path.join(os.path.dirname(filepath), 'temp')
try:
shutil.rmtree(temp)
except Exception:
pass
os.mkdir(temp)
args_list = ['--satellite', self.sat, '--start', self.start, '--end',
self.end, '--zipped',
'--path', str(self.path), '--row', str(self.row), '-o', temp]
args = self.parser.parse_args(args_list)
main(args)
self.assertTrue(os.path.isfile(os.path.join(temp, 'LE70360282007122EDC00.tar.gz')))
shutil.rmtree(temp)
# this causes systemexit, use only to make a new config
# def test_config_no_config_provided(self):
# args_list = ['--configuration', os.getcwd()]
# args = self.parser.parse_args(args_list)
# main(args)
# pass
def test_config(self):
root = 'tests'
base = pkg_resources.resource_filename('tests', 'data/downloader_config.yml')
filepath = os.path.join(root, base)
if not os.path.isfile(filepath):
filepath = base
temp = os.path.join(os.path.dirname(filepath), 'temp')
try:
shutil.rmtree(temp)
except Exception:
pass
os.mkdir(temp)
args_list = ['--configuration', filepath]
args = self.parser.parse_args(args_list)
main(args)
location = os.path.dirname(base)
self.assertTrue(os.path.isdir(os.path.join(location, 'temp', self.config_scenes[0])))
self.assertTrue(os.path.isfile(os.path.join(location, 'temp', self.config_scenes[0],
'LT05_L1TP_043030_20070511_20160908_01_T1_B3.TIF')))
shutil.rmtree(temp)
# def test_metadata_creation(self):
# wrs = os.path.join(os.path.dirname(__file__).replace('tests', 'landsat'), 'wrs')
# scenes = os.path.join(os.path.dirname(__file__).replace('tests', 'landsat'), 'scenes')
#
# try:
# shutil.rmtree(wrs)
# except:
# pass
# try:
# shutil.rmtree(scenes)
# except:
# pass
#
# args_list = ['--satellite', self.sat, '--start', self.start, '--end',
# self.end, '--return-list',
# '--path', str(self.path), '--row', str(self.row)]
#
# args = self.parser.parse_args(args_list)
# main(args)
# self.assertTrue(os.path.isdir(wrs))
# self.assertTrue(os.path.isfile(os.path.join(wrs, 'wrs2_descending.shp')))
# self.assertTrue(os.path.isdir(scenes))
if __name__ == '__main__':
unittest.main()
# ===============================================================================
| 3,051 | 24 | 184 |
c7158a4c57ece4689a55162bbbf6c223afe88ed1 | 2,170 | py | Python | ida_lib/tests/test_dataloader.py | raquelvilas18/ida_lib | 4002e39417edfd62448b503196da9692a245e79d | [
"MIT"
] | 2 | 2020-04-30T03:17:22.000Z | 2020-05-27T09:26:23.000Z | ida_lib/tests/test_dataloader.py | raquelvilas18/ida_lib | 4002e39417edfd62448b503196da9692a245e79d | [
"MIT"
] | null | null | null | ida_lib/tests/test_dataloader.py | raquelvilas18/ida_lib | 4002e39417edfd62448b503196da9692a245e79d | [
"MIT"
] | null | null | null | import pytest
from ida_lib.core.pipeline_geometric_ops import TranslatePipeline, RandomShearPipeline
from ida_lib.image_augmentation.data_loader import AugmentDataLoader
# cp-020
@pytest.mark.parametrize(
["batchsize"], [[1], [2], [3], [5], [10]]
)
# cp-021
@pytest.mark.parametrize(
["resize"], [[(10, 10)], [(10, 50)], [(50, 10)], [(500, 500)]]
)
| 44.285714 | 106 | 0.517051 | import pytest
from ida_lib.core.pipeline_geometric_ops import TranslatePipeline, RandomShearPipeline
from ida_lib.image_augmentation.data_loader import AugmentDataLoader
# cp-020
@pytest.mark.parametrize(
["batchsize"], [[1], [2], [3], [5], [10]]
)
def test_dataloader_work(dataset, batchsize):
dataloader = AugmentDataLoader(dataset=dataset,
batch_size=batchsize,
shuffle=True,
pipeline_operations=(
TranslatePipeline(probability=1, translation=(30, 10)),
RandomShearPipeline(probability=0.5, shear_range=(0, 0.5))),
interpolation='bilinear',
padding_mode='zeros'
)
for i_batch, sample_batched in enumerate(dataloader): # our dataloader works like a normal dataloader
assert 'image' in sample_batched
assert 'mask' in sample_batched
assert sample_batched['image'].shape[0] == batchsize
if i_batch == 2:
break
# cp-021
@pytest.mark.parametrize(
["resize"], [[(10, 10)], [(10, 50)], [(50, 10)], [(500, 500)]]
)
def test_dataloader_resize_work(dataset, resize):
dataloader = AugmentDataLoader(dataset=dataset,
batch_size=1,
shuffle=True,
pipeline_operations=(
TranslatePipeline(probability=1, translation=(30, 10)),
RandomShearPipeline(probability=0.5, shear_range=(0, 0.5))),
resize=resize,
interpolation='bilinear',
padding_mode='zeros'
)
for i_batch, sample_batched in enumerate(dataloader): # our dataloader works like a normal dataloader
assert 'image' in sample_batched
assert sample_batched['image'].shape[2:] == resize
if i_batch == 2:
break
| 1,764 | 0 | 44 |
0cb3187b91837fec91e53d6f882960fceb3644bc | 4,033 | py | Python | util.py | demelin/Noise-Contrastive-Estimation-NCE-for-pyTorch | 8ee790486ba5b7f47d6b016b5e58f90c80d04914 | [
"MIT"
] | 30 | 2017-07-28T13:22:31.000Z | 2021-11-14T23:28:27.000Z | util.py | demelin/Noise-Contrastive-Estimation-NCE-for-pyTorch | 8ee790486ba5b7f47d6b016b5e58f90c80d04914 | [
"MIT"
] | null | null | null | util.py | demelin/Noise-Contrastive-Estimation-NCE-for-pyTorch | 8ee790486ba5b7f47d6b016b5e58f90c80d04914 | [
"MIT"
] | 7 | 2018-05-02T22:25:28.000Z | 2021-11-30T02:18:38.000Z | import pickle
import numpy as np
import torch
from torch.autograd import Variable
def get_probability(class_id, set_size):
""" Calculates the probability of a word occuring in some corpus the classes of which follow a log-uniform
(Zipfian) base distribution"""
class_prob = (np.log(class_id + 2) - np.log(class_id + 1)) / np.log(set_size + 1)
return class_prob
def renormalize(class_probs, rejected_id):
""" Re-normalizes the probabilities of remaining classes within the class set after the rejection of
some class previously present within the set. """
rejected_mass = class_probs[rejected_id]
class_probs[rejected_id] = 0
remaining_mass = 1 - rejected_mass
updated_class_probs = {class_id: class_probs[class_id] / remaining_mass for class_id in class_probs.keys()}
return updated_class_probs
def make_sampling_array(range_max, array_path):
""" Creates and populates the array from which the fake labels are sampled during the NCE loss calculation."""
# Get class probabilities
print('Computing the Zipfian distribution probabilities for the corpus items.')
class_probs = {class_id: get_probability(class_id, range_max) for class_id in range(range_max)}
print('Generating and populating the sampling array. This may take a while.')
# Generate empty array
sampling_array = np.zeros(int(1e8))
# Determine how frequently each index has to appear in array to match its probability
class_counts = {class_id: int(np.round((class_probs[class_id] * 1e8))) for class_id in range(range_max)}
assert(sum(list(class_counts.values())) == 1e8), 'Counts don\'t add up to the array size!'
# Populate sampling array
pos = 0
for key, value in class_counts.items():
while value != 0:
sampling_array[pos] = key
pos += 1
value -= 1
# Save filled array into a pickle, for subsequent reuse
with open(array_path, 'wb') as f:
pickle.dump((sampling_array, class_probs), f)
return sampling_array, class_probs
def sample_values(true_classes, num_sampled, unique, no_accidental_hits, sampling_array, class_probs):
""" Samples negative items for the calculation of the NCE loss. Operates on batches of targets. """
# Initialize output sequences
sampled_candidates = np.zeros(num_sampled)
true_expected_count = np.zeros(true_classes.size())
sampled_expected_count = np.zeros(num_sampled)
# If the true labels should not be sampled as a noise items, add them all to the rejected list
if no_accidental_hits:
rejected = list()
else:
rejected = true_classes.tolist()
# Assign true label probabilities
rows, cols = true_classes.size()
for i in range(rows):
for j in range(cols):
true_expected_count[i][j] = class_probs[true_classes.data[i][j]]
# Obtain sampled items and their probabilities
print('Sampling items and their probabilities.')
for k in range(num_sampled):
sampled_pos = np.random.randint(int(1e8))
sampled_idx = sampling_array[sampled_pos]
if unique:
while sampled_idx in rejected:
sampled_idx = sampling_array[np.random.randint(0, int(1e8))]
# Append sampled candidate and its probability to the output sequences for current target
sampled_candidates[k] = sampled_idx
sampled_expected_count[k] = class_probs[sampled_idx]
# Re-normalize probabilities
if unique:
class_probs = renormalize(class_probs, sampled_idx)
# Process outputs before they are returned
sampled_candidates = sampled_candidates.astype(np.int64, copy=False)
true_expected_count = true_expected_count.astype(np.float32, copy=False)
sampled_expected_count = sampled_expected_count.astype(np.float32, copy=False)
return Variable(torch.LongTensor(sampled_candidates)), \
Variable(torch.FloatTensor(true_expected_count)), \
Variable(torch.FloatTensor(sampled_expected_count))
| 43.836957 | 114 | 0.714357 | import pickle
import numpy as np
import torch
from torch.autograd import Variable
def get_probability(class_id, set_size):
""" Calculates the probability of a word occuring in some corpus the classes of which follow a log-uniform
(Zipfian) base distribution"""
class_prob = (np.log(class_id + 2) - np.log(class_id + 1)) / np.log(set_size + 1)
return class_prob
def renormalize(class_probs, rejected_id):
""" Re-normalizes the probabilities of remaining classes within the class set after the rejection of
some class previously present within the set. """
rejected_mass = class_probs[rejected_id]
class_probs[rejected_id] = 0
remaining_mass = 1 - rejected_mass
updated_class_probs = {class_id: class_probs[class_id] / remaining_mass for class_id in class_probs.keys()}
return updated_class_probs
def make_sampling_array(range_max, array_path):
""" Creates and populates the array from which the fake labels are sampled during the NCE loss calculation."""
# Get class probabilities
print('Computing the Zipfian distribution probabilities for the corpus items.')
class_probs = {class_id: get_probability(class_id, range_max) for class_id in range(range_max)}
print('Generating and populating the sampling array. This may take a while.')
# Generate empty array
sampling_array = np.zeros(int(1e8))
# Determine how frequently each index has to appear in array to match its probability
class_counts = {class_id: int(np.round((class_probs[class_id] * 1e8))) for class_id in range(range_max)}
assert(sum(list(class_counts.values())) == 1e8), 'Counts don\'t add up to the array size!'
# Populate sampling array
pos = 0
for key, value in class_counts.items():
while value != 0:
sampling_array[pos] = key
pos += 1
value -= 1
# Save filled array into a pickle, for subsequent reuse
with open(array_path, 'wb') as f:
pickle.dump((sampling_array, class_probs), f)
return sampling_array, class_probs
def sample_values(true_classes, num_sampled, unique, no_accidental_hits, sampling_array, class_probs):
""" Samples negative items for the calculation of the NCE loss. Operates on batches of targets. """
# Initialize output sequences
sampled_candidates = np.zeros(num_sampled)
true_expected_count = np.zeros(true_classes.size())
sampled_expected_count = np.zeros(num_sampled)
# If the true labels should not be sampled as a noise items, add them all to the rejected list
if no_accidental_hits:
rejected = list()
else:
rejected = true_classes.tolist()
# Assign true label probabilities
rows, cols = true_classes.size()
for i in range(rows):
for j in range(cols):
true_expected_count[i][j] = class_probs[true_classes.data[i][j]]
# Obtain sampled items and their probabilities
print('Sampling items and their probabilities.')
for k in range(num_sampled):
sampled_pos = np.random.randint(int(1e8))
sampled_idx = sampling_array[sampled_pos]
if unique:
while sampled_idx in rejected:
sampled_idx = sampling_array[np.random.randint(0, int(1e8))]
# Append sampled candidate and its probability to the output sequences for current target
sampled_candidates[k] = sampled_idx
sampled_expected_count[k] = class_probs[sampled_idx]
# Re-normalize probabilities
if unique:
class_probs = renormalize(class_probs, sampled_idx)
# Process outputs before they are returned
sampled_candidates = sampled_candidates.astype(np.int64, copy=False)
true_expected_count = true_expected_count.astype(np.float32, copy=False)
sampled_expected_count = sampled_expected_count.astype(np.float32, copy=False)
return Variable(torch.LongTensor(sampled_candidates)), \
Variable(torch.FloatTensor(true_expected_count)), \
Variable(torch.FloatTensor(sampled_expected_count))
| 0 | 0 | 0 |
685e914cd807d11a76bde3fad53c282173c2a256 | 688 | py | Python | project_euler/n001.py | nabin-info/hackerrank.com | da66a470d2e97a093821bfe41eb233d51784b9cc | [
"MIT"
] | null | null | null | project_euler/n001.py | nabin-info/hackerrank.com | da66a470d2e97a093821bfe41eb233d51784b9cc | [
"MIT"
] | null | null | null | project_euler/n001.py | nabin-info/hackerrank.com | da66a470d2e97a093821bfe41eb233d51784b9cc | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
T = int(raw_input().strip())
for t in range(T):
N = int(raw_input().strip())
print_fast35(N)
#if len(sys.argv) > 1:
# for n in map(int, sys.argv[1:]):
# print_3or5_sum(n)
| 19.657143 | 60 | 0.456395 | #!/usr/bin/python
import sys
def print_sum35(n):
a, x = 0.0, 1
while x < n:
if x % 3 == 0 or x % 5 == 0:
a += x
x += 1
print a
def print_fast35(n):
n -= 1
a,b,d = n % 3, n % 5, n % 15
a,b,d = n - a, n - b, n - d
a,b,d = a / 3, b / 5, d / 15
c = 3*a*(a+1)/2 + 5*b*(b+1)/2 - 15*d*(d+1)/2
print c
T = int(raw_input().strip())
for t in range(T):
N = int(raw_input().strip())
print_fast35(N)
def print_3or5_sum(N):
L = [x for x in range(1, N) if x % 3 == 0 or x % 5 == 0]
S = reduce(lambda a,x: a + x, L)
print S
#if len(sys.argv) > 1:
# for n in map(int, sys.argv[1:]):
# print_3or5_sum(n)
| 396 | 0 | 70 |
61f54762177c48f818d5ba5333446986d426d6db | 266 | py | Python | contact_us/api/views.py | OSAMAMOHAMED1234/E-Commerce_django_restful_api | a8f139996a909b02b4463a2620fc3ab3cddee2e0 | [
"MIT"
] | 3 | 2018-05-02T20:37:11.000Z | 2020-10-15T17:19:26.000Z | contact_us/api/views.py | OSAMAMOHAMED1234/E-Commerce_django_restful_api | a8f139996a909b02b4463a2620fc3ab3cddee2e0 | [
"MIT"
] | 1 | 2019-06-10T21:35:13.000Z | 2019-06-10T21:35:13.000Z | contact_us/api/views.py | OSAMAMOHAMED1234/E-Commerce_django_restful_api | a8f139996a909b02b4463a2620fc3ab3cddee2e0 | [
"MIT"
] | 3 | 2018-04-13T17:01:18.000Z | 2020-04-09T07:21:34.000Z | from rest_framework.generics import CreateAPIView
from rest_framework.permissions import AllowAny
from .serializers import ContactUsSerializer
| 26.6 | 49 | 0.838346 | from rest_framework.generics import CreateAPIView
from rest_framework.permissions import AllowAny
from .serializers import ContactUsSerializer
class ContactUsAPIView(CreateAPIView):
serializer_class = ContactUsSerializer
permission_classes = [AllowAny, ]
| 0 | 98 | 23 |
2ea08ef9c0c6a8d6f972a2b84b5ac59a03d6ed2d | 5,154 | py | Python | spats_shape_seq/tests/test_target.py | entzian/spats | cb0d9f1f3c0fbcb3c333096d657ef96f7179de46 | [
"BSL-1.0"
] | 4 | 2016-11-27T04:44:41.000Z | 2019-10-08T18:32:51.000Z | spats_shape_seq/tests/test_target.py | entzian/spats | cb0d9f1f3c0fbcb3c333096d657ef96f7179de46 | [
"BSL-1.0"
] | 1 | 2017-12-18T22:02:28.000Z | 2017-12-18T22:02:28.000Z | spats_shape_seq/tests/test_target.py | entzian/spats | cb0d9f1f3c0fbcb3c333096d657ef96f7179de46 | [
"BSL-1.0"
] | 3 | 2016-06-10T11:36:00.000Z | 2021-08-30T18:25:18.000Z |
import unittest
from spats_shape_seq.parse import fasta_parse
from spats_shape_seq.target import Targets
from spats_shape_seq.util import reverse_complement
TARGET_SRP = "ATCGGGGGCTCTGTTGGTTCTCCCGCAACGCTACTCTGTTTACCAGGTCAGGTCCGGAAGGAAGCAGCCAAGGCAGATGACGCGTGTGCCGGGATGTAGCTGGCAGGGCCCCCACCCGTCCTTGGTGCCCGAGTCAG"
TARGET_5S = open("test/5s/5s.fa", 'rb').read().split('\n')[1]
| 54.829787 | 152 | 0.696352 |
import unittest
from spats_shape_seq.parse import fasta_parse
from spats_shape_seq.target import Targets
from spats_shape_seq.util import reverse_complement
TARGET_SRP = "ATCGGGGGCTCTGTTGGTTCTCCCGCAACGCTACTCTGTTTACCAGGTCAGGTCCGGAAGGAAGCAGCCAAGGCAGATGACGCGTGTGCCGGGATGTAGCTGGCAGGGCCCCCACCCGTCCTTGGTGCCCGAGTCAG"
TARGET_5S = open("test/5s/5s.fa", 'rb').read().split('\n')[1]
class TestMultiple(unittest.TestCase):
def test_multiple(self):
target = Targets()
target.addTarget("SRP", TARGET_SRP)
target.addTarget("5S", TARGET_5S)
target.index()
self.assertEqual(10, target.longest_self_match())
tgt, s, l, i = target.find_partial("GGATGCCTTTTTTTTTTTTTTTTTTTTTTTTTTTT")
self.assertEqual(("5S", 0, 8, 0), (tgt.name, s, l, i))
tgt, s, l, i = target.find_partial('ATGGGGGGCTCTGTTGGTT')
self.assertEqual(("SRP", 3, 16, 3), (tgt.name, s, l, i))
def test_multiple_parse(self):
target = Targets()
for name, seq in fasta_parse("test/panel_RNAs/panel_RNAs_complete.fa"):
target.addTarget(name, seq)
self.assertEqual(14, len(target.targets))
target.index()
self.assertEqual(169, target.longest_self_match())
t, s, l , i = target.find_partial("GGTCGGATGAAGATATGAGGAGAGATTTCATTTT")
self.assertEqual(5, len(t))
t, s, l , i = target.find_partial("CGGCCGTAGCGCGGTGGTCCCACCTGACCCCATGCCGAACTCA")
self.assertEqual("5S", t.name)
t, s, l , i = target.find_partial("CACTCGGGCACCAAGGACGG")
self.assertEqual(8, len(t))
t, s, l , i = target.find_partial("TCTTCCGATCTCACTCGGGCACCAAGGACGG")
self.assertEqual("Glycine_Riboswitch_GG_barcode_T", t.name)
class TestTargetIndexing(unittest.TestCase):
def test_index_srp(self):
target = Targets()
target.addTarget("SRP", TARGET_SRP)
target.index()
self.assertEqual(8, target.longest_self_match())
def test_index_5S(self):
target = Targets()
target.addTarget("5S", TARGET_5S)
target.index()
self.assertEqual(10, target.longest_self_match())
def test_5s_cases(self):
tgt = Targets()
tgt.addTarget("5S", TARGET_5S)
tgt.index()
self.assertEqual([0, 8, 0], tgt.find_partial("GGATGCCTTTTTTTTTTTTTTTTTTTTTTTTTTTT")[1:])
self.assertEqual([0, 8, 135], tgt.find_partial("CCAAGGACTGGAAGATCGGAAGAGCGTCGTGTAGG")[1:])
self.assertEqual([11, 20, 123], tgt.find_partial("CGGGCACCAAGCTGACTCGGGCACCAAGGAC")[1:])
class SRPTargetTest(unittest.TestCase):
def setUp(self):
self.target = Targets()
self.target.addTarget("SRP", TARGET_SRP)
self.target.index()
def tearDown(self):
self.target = None
class TestTarget(SRPTargetTest):
def test_exact(self):
self.assertEqual(self.target.find_exact('ATCGGGGGCT')[1], 0)
self.assertEqual(self.target.find_exact('TCTGTTGGTTCTC')[1], 9)
self.assertEqual(self.target.find_exact('CCCCCCCCCCC')[0], None)
self.assertEqual(self.target.find_exact('CCCCCCCCCCC')[0], None)
self.assertEqual(self.target.find_exact('TCTGTTGGTTCCC')[0], None)
def test_partial(self):
self.assertEqual(self.target.find_partial('ATCGGGGGCTCTGTTGGTT')[1:], [0, 19, 0])
old_min = self.target.minimum_match_length
self.target.minimum_match_length = 12
self.assertEqual(self.target.find_partial('ATGGGGGGCTCTGTTGGTT')[1:], [3, 16, 3])
self.assertEqual(self.target.find_partial('CCCCC' + 'CAGCCAAGGCAGATGA' + 'GGGGG')[1:], [5, 16, 64])
self.target.minimum_match_length = old_min
def test_SRPs(self):
self.assertEqual(self.target.find_partial(reverse_complement("GGGCCTGACTCGGGCACCAAGGACGGGTGGGGGCC"))[1:], [0, 31, 106]) #R1 0_0
self.assertEqual(self.target.find_partial(reverse_complement("CCCGCTGACTCGGGCACCAAGGACGGGTGGGGGCC"))[1:], [0, 31, 106]) #R1 0_1
self.assertEqual(self.target.find_partial(reverse_complement("CCCGCTGACTCGGGCACCAAGGACGGGTGGGGGCC"))[1:], [0, 31, 106]) #R1 106
self.assertEqual(self.target.find_partial(reverse_complement("GGGCCTGACTCGGGCACCAAGGACGGGTGGGGGCA"))[1:], [1, 30, 107]) #R1 107
self.assertEqual(self.target.find_partial(reverse_complement("GGGCCTGACTCGGGCACCAAGGACAGATCGGAAGA"))[1:], [11, 20, 117]) #R1 117
self.assertEqual(self.target.find_partial("ATCGGGGGCTCTGTTGGTTCTCCCGCAACGCTACT")[1:], [0, 35, 0]) #R2 0_0
self.assertEqual(self.target.find_partial("ATCGGGGGCTCTGTTGGTTCTCCCGCAACGCTACT")[1:], [0, 35, 0]) #R2 0_1
self.assertEqual(self.target.find_partial("GCAGGGCCCCCACCCGTCCTTGGTGCCCGAGTCAG")[1:], [0, 35, 102]) #R2 102
self.assertEqual(self.target.find_partial("CAGGGCCCCCACCCGTCCTTGGTGCCCGAGTCAGG")[1:], [0, 34, 103]) #R2 103
self.assertEqual(self.target.find_partial("GGCCCCCACCCGTCCTTGGTGCCCGAGTCAGGCCC")[1:], [0, 31, 106]) #R2 106
self.assertEqual(self.target.find_partial("GCCCCCACCCGTCCTTGGTGCCCGAGTCAGGCCCA")[1:], [0, 30, 107]) #R2 107
self.assertEqual(self.target.find_partial("GTCCTTGGTGCCCGAGTCAGGCCCAGATCGGAAGA")[1:], [0, 20, 117]) #R2 117
| 4,356 | 69 | 354 |
f9f29db4a589dfc14c285569466e70acb1142dbd | 2,014 | py | Python | data/graph.py | freddierice/cdrone | 7e66efa94ebc4d00379d029508a2aea42ee67259 | [
"MIT"
] | 2 | 2018-06-05T14:45:23.000Z | 2018-06-05T20:18:38.000Z | data/graph.py | freddierice/cdrone | 7e66efa94ebc4d00379d029508a2aea42ee67259 | [
"MIT"
] | null | null | null | data/graph.py | freddierice/cdrone | 7e66efa94ebc4d00379d029508a2aea42ee67259 | [
"MIT"
] | null | null | null | import logz
import sys
import matplotlib.pyplot as plt
HEIGHT_FACTOR = 5000
def main(log_filename: str):
"""main."""
with logz.open(log_filename) as l:
rc = l["rc"]
height = l["height"]
height_raw = l["height_raw"]
# imu = l["imu"]
motion = l["camera_motion"]
# apply filter to gyro
gyro = l['gyro']
gyro_x = gyro['x'].copy().astype('i2').astype(float)
gyro_y = gyro['y'].copy().astype('i2').astype(float)
gyro_x /= 4.096
gyro_y /= 4.096
# gyro_x[gyro_x > 140] -= 280
# gyro_y[gyro_y > 140] -= 280
print(l.keys())
has_vrpn = "vrpn" in l.keys()
if has_vrpn:
vrpn = l["vrpn"]
# make plot for roll pitch yaw rc commands
plt.subplot(511)
plt.plot(rc["time"], rc["throttle"])
plt.gca().set_ylim([1350, 1650])
# make plot for roll pitch yaw
plt.subplot(512)
plt.plot(rc["time"], rc["roll"])
plt.plot(rc["time"], rc["pitch"])
plt.plot(rc["time"], rc["yaw"])
# plot for height
plt.subplot(513)
plt.plot(height["time"], height["value"])
plt.plot(height_raw["time"], height_raw["value"])
if has_vrpn:
plt.plot(vrpn["time"], vrpn["y"])
# plot for the angles
plt.subplot(514)
# plt.plot(imu["time"], imu["ang_roll"])
#plt.plot(imu["time"], imu["ang_pitch"])
plt.plot(gyro["time"], gyro_x)
# plt.plot(gyro["time"], gyro_y)
plt.subplot(515)
# plt.plot(imu["time"][1:], imu["dang_roll"][1:])
# plt.plot(imu["time"][1:], imu["dang_pitch"][1:])
# plt.plot(imu["time"], imu["ang_yaw"])
plt.plot(motion['time'], motion['x'])
# plt.plot(motion['time'], motion['y'])
plt.show()
if __name__ == '__main__':
if len(sys.argv) != 2:
print("usage: %s <run>" % (sys.argv[0],))
sys.exit(1)
main(sys.argv[1])
| 29.188406 | 60 | 0.510427 | import logz
import sys
import matplotlib.pyplot as plt
HEIGHT_FACTOR = 5000
def main(log_filename: str):
"""main."""
with logz.open(log_filename) as l:
rc = l["rc"]
height = l["height"]
height_raw = l["height_raw"]
# imu = l["imu"]
motion = l["camera_motion"]
# apply filter to gyro
gyro = l['gyro']
gyro_x = gyro['x'].copy().astype('i2').astype(float)
gyro_y = gyro['y'].copy().astype('i2').astype(float)
gyro_x /= 4.096
gyro_y /= 4.096
# gyro_x[gyro_x > 140] -= 280
# gyro_y[gyro_y > 140] -= 280
print(l.keys())
has_vrpn = "vrpn" in l.keys()
if has_vrpn:
vrpn = l["vrpn"]
# make plot for roll pitch yaw rc commands
plt.subplot(511)
plt.plot(rc["time"], rc["throttle"])
plt.gca().set_ylim([1350, 1650])
# make plot for roll pitch yaw
plt.subplot(512)
plt.plot(rc["time"], rc["roll"])
plt.plot(rc["time"], rc["pitch"])
plt.plot(rc["time"], rc["yaw"])
# plot for height
plt.subplot(513)
plt.plot(height["time"], height["value"])
plt.plot(height_raw["time"], height_raw["value"])
if has_vrpn:
plt.plot(vrpn["time"], vrpn["y"])
# plot for the angles
plt.subplot(514)
# plt.plot(imu["time"], imu["ang_roll"])
#plt.plot(imu["time"], imu["ang_pitch"])
plt.plot(gyro["time"], gyro_x)
# plt.plot(gyro["time"], gyro_y)
plt.subplot(515)
# plt.plot(imu["time"][1:], imu["dang_roll"][1:])
# plt.plot(imu["time"][1:], imu["dang_pitch"][1:])
# plt.plot(imu["time"], imu["ang_yaw"])
plt.plot(motion['time'], motion['x'])
# plt.plot(motion['time'], motion['y'])
plt.show()
if __name__ == '__main__':
if len(sys.argv) != 2:
print("usage: %s <run>" % (sys.argv[0],))
sys.exit(1)
main(sys.argv[1])
| 0 | 0 | 0 |
a7b4e223b58b370c22fb9cb181d200532cc6984e | 798 | py | Python | tests/test_automation_run.py | Files-com/files-sdk-python | 84cedc9be099cd9e4db6249ef7a9d60595487090 | [
"MIT"
] | 14 | 2020-08-05T15:48:06.000Z | 2021-08-18T13:13:39.000Z | tests/test_automation_run.py | Files-com/files-sdk-python | 84cedc9be099cd9e4db6249ef7a9d60595487090 | [
"MIT"
] | 4 | 2020-10-30T14:49:25.000Z | 2021-09-29T17:11:53.000Z | tests/test_automation_run.py | Files-com/files-sdk-python | 84cedc9be099cd9e4db6249ef7a9d60595487090 | [
"MIT"
] | null | null | null | import unittest
import inspect
import files_sdk
from tests.base import TestBase
from files_sdk.models import AutomationRun
from files_sdk import automation_run
if __name__ == '__main__':
unittest.main() | 27.517241 | 118 | 0.669173 | import unittest
import inspect
import files_sdk
from tests.base import TestBase
from files_sdk.models import AutomationRun
from files_sdk import automation_run
class AutomationRunTest(TestBase):
pass
# Instance Methods
# Static Methods
@unittest.skipUnless(TestBase.mock_server_path_exists("GET", "/automation_runs"), "Mock path does not exist")
def test_list(self):
params = {
"automation_id" : 12345,
}
automation_run.list(params)
@unittest.skipUnless(TestBase.mock_server_path_exists("GET", "/automation_runs/{id}"), "Mock path does not exist")
def test_find(self):
id = 12345
params = {
"id" : 12345,
}
automation_run.find(id, params)
if __name__ == '__main__':
unittest.main() | 214 | 354 | 23 |
4be6aface8f054b24a126e27d1cf8ee1600a0385 | 695 | py | Python | scripts/dblog.py | joshand/clientsim | 9651afb8c49e25a8304e00b5addd14fe6e41e59d | [
"MIT"
] | null | null | null | scripts/dblog.py | joshand/clientsim | 9651afb8c49e25a8304e00b5addd14fe6e41e59d | [
"MIT"
] | 7 | 2020-04-15T21:17:18.000Z | 2021-09-22T18:53:14.000Z | scripts/dblog.py | joshand/clientsim | 9651afb8c49e25a8304e00b5addd14fe6e41e59d | [
"MIT"
] | null | null | null | import datetime
from client_sim.models import *
dodebug = False
| 25.740741 | 78 | 0.57554 | import datetime
from client_sim.models import *
dodebug = False
def append_log(log, *data):
if log is None:
log = []
if (isinstance(data, list) or isinstance(data, tuple)) and len(data) > 1:
if dodebug: print(data)
ldata = ""
for ld in data:
ldata += str(ld) + " "
log.append(datetime.datetime.now().isoformat() + " - " + ldata)
else:
if dodebug: print(data[0])
log.append(datetime.datetime.now().isoformat() + " - " + str(data[0]))
def db_log(logtype, logdata):
try:
ld = str("\n".join(logdata))
except:
ld = str(logdata)
t = Task.objects.create(description=logtype, task_data=ld)
| 583 | 0 | 46 |
a31638b6e07aa0fe54d83ce68bf810470a7b3890 | 1,539 | py | Python | fluentxy/__init__.py | bryanwweber/fluentxy | fa4b574255c3752083c418036f8167706e755025 | [
"BSD-3-Clause"
] | null | null | null | fluentxy/__init__.py | bryanwweber/fluentxy | fa4b574255c3752083c418036f8167706e755025 | [
"BSD-3-Clause"
] | null | null | null | fluentxy/__init__.py | bryanwweber/fluentxy | fa4b574255c3752083c418036f8167706e755025 | [
"BSD-3-Clause"
] | null | null | null | """An interface to produce Pandas DataFrames from Fluent XY output files."""
from parse import parse
import numpy as np
import pandas as pd
from typing import List
__version__ = "0.1.0"
def parse_data(lines: List) -> pd.DataFrame:
"""Parse an XY-formatted datafile from Fluent and return a DataFrame."""
axis_labels = parse('(labels "{x}" "{y}")', lines[1])
columns = []
for line in lines[2:]:
if line.startswith("(("):
columns.append(parse('((xy/key/label "{label}")', line)["label"])
index = pd.MultiIndex.from_product([columns, [axis_labels["x"], axis_labels["y"]]])
data = pd.DataFrame(columns=index)
finish = False
this_data = []
for line in lines[2:]:
if line.startswith("(("):
column = parse('((xy/key/label "{label}")', line)["label"]
# Skip blank lines
elif not line.strip() or line.startswith(("(",)):
continue
elif line.startswith(")"):
finish = True
else:
x, y = parse("{:g}\t{:g}", line.strip())
this_data.append([x, y])
if finish:
this_data = np.array(this_data)
data[(column, axis_labels["x"])] = this_data[:, 0]
data[(column, axis_labels["y"])] = this_data[:, 1]
finish = False
this_data = []
return data
def plot_xy(axis, df, column, x_label, y_label):
"""Plot an X-Y line plot from the given column in the df on axis."""
axis.plot(df[(column, x_label)], df[(column, y_label)])
| 32.744681 | 87 | 0.575049 | """An interface to produce Pandas DataFrames from Fluent XY output files."""
from parse import parse
import numpy as np
import pandas as pd
from typing import List
__version__ = "0.1.0"
def parse_data(lines: List) -> pd.DataFrame:
"""Parse an XY-formatted datafile from Fluent and return a DataFrame."""
axis_labels = parse('(labels "{x}" "{y}")', lines[1])
columns = []
for line in lines[2:]:
if line.startswith("(("):
columns.append(parse('((xy/key/label "{label}")', line)["label"])
index = pd.MultiIndex.from_product([columns, [axis_labels["x"], axis_labels["y"]]])
data = pd.DataFrame(columns=index)
finish = False
this_data = []
for line in lines[2:]:
if line.startswith("(("):
column = parse('((xy/key/label "{label}")', line)["label"]
# Skip blank lines
elif not line.strip() or line.startswith(("(",)):
continue
elif line.startswith(")"):
finish = True
else:
x, y = parse("{:g}\t{:g}", line.strip())
this_data.append([x, y])
if finish:
this_data = np.array(this_data)
data[(column, axis_labels["x"])] = this_data[:, 0]
data[(column, axis_labels["y"])] = this_data[:, 1]
finish = False
this_data = []
return data
def plot_xy(axis, df, column, x_label, y_label):
"""Plot an X-Y line plot from the given column in the df on axis."""
axis.plot(df[(column, x_label)], df[(column, y_label)])
| 0 | 0 | 0 |
32bfc281f0f4f9249233d06b02aa1e671c33f1a3 | 21 | py | Python | Magics/__init__.py | old-reliable/magics-python | 141504926562cbe130ed26d69b38d479e400c8b6 | [
"Apache-2.0"
] | null | null | null | Magics/__init__.py | old-reliable/magics-python | 141504926562cbe130ed26d69b38d479e400c8b6 | [
"Apache-2.0"
] | null | null | null | Magics/__init__.py | old-reliable/magics-python | 141504926562cbe130ed26d69b38d479e400c8b6 | [
"Apache-2.0"
] | null | null | null | from Magics import *
| 10.5 | 20 | 0.761905 | from Magics import *
| 0 | 0 | 0 |
514c63c52cadcbbc360b01c1a3fb561b0be9851a | 784 | py | Python | example/tests/admin/test_flag_admin.py | dinoperovic/djangoshop-shopit | b42a2bf0ec319817eb37ef939608b04498fc4ff2 | [
"BSD-3-Clause"
] | 14 | 2016-11-25T16:06:20.000Z | 2018-08-30T19:20:41.000Z | example/tests/admin/test_flag_admin.py | dinoperovic/django-shop | b42a2bf0ec319817eb37ef939608b04498fc4ff2 | [
"BSD-3-Clause"
] | 3 | 2018-11-30T10:47:39.000Z | 2019-10-21T10:21:24.000Z | example/tests/admin/test_flag_admin.py | dinoperovic/django-shop | b42a2bf0ec319817eb37ef939608b04498fc4ff2 | [
"BSD-3-Clause"
] | 6 | 2019-04-07T23:52:54.000Z | 2020-09-20T05:30:07.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib.admin.sites import AdminSite
from shopit.admin.flag import FlagAdmin
from shopit.models.flag import Flag
from ..utils import ShopitTestCase
| 32.666667 | 107 | 0.690051 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib.admin.sites import AdminSite
from shopit.admin.flag import FlagAdmin
from shopit.models.flag import Flag
from ..utils import ShopitTestCase
class FlagAdminTest(ShopitTestCase):
def setUp(self):
self.create_request()
self.site = AdminSite(name="admin")
self.admin = FlagAdmin(Flag, self.site)
def test_get_name(self):
f1 = self.create_flag('F1')
f2 = self.create_flag('F2', parent=f1)
template = '<div style="text-indent:{}px">{}</div>'
self.assertEquals(self.admin.get_name(f1), template.format(0, 'F1'))
self.assertEquals(self.admin.get_name(f2), template.format(1 * self.admin.mptt_level_indent, 'F2'))
| 448 | 15 | 76 |
fe0eff0885d1cdeb6e6ff70e807dd53988d6ffea | 1,461 | py | Python | web-search-engine/search/utils/chk_url_table.py | robertwenquan/nyu-course-assignment | c03a4fb157d385b650c38feb861f283f2e1928fd | [
"Apache-2.0"
] | 1 | 2018-01-22T21:35:42.000Z | 2018-01-22T21:35:42.000Z | web-search-engine/search/utils/chk_url_table.py | robertwenquan/nyu-course-assignment | c03a4fb157d385b650c38feb861f283f2e1928fd | [
"Apache-2.0"
] | null | null | null | web-search-engine/search/utils/chk_url_table.py | robertwenquan/nyu-course-assignment | c03a4fb157d385b650c38feb861f283f2e1928fd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
list all the wordid - word mapping
Sample usage:
$ python utils/chk_word_table.py
URL_TABLE_ENTRY(28B)
- docid(4B)
- loc of url(8B)
- url fileid(2B)
- url offset(4B)
- url length(2B)
- loc of doc(16B)
- wet fileid(2B)
- offset in the file(4B)
- length of the file including header(4B)
- content start offset from the doc(2B)
- content length(4B)
URL_ENTRY(VARIABLE LENGTH)
- url(variable length)
"""
from struct import calcsize
from struct import unpack
import os
import sys
BASE_DIR = './test_data'
URL_TABLE_IDX = os.path.join(BASE_DIR, 'tiny30/output/url_table.idx')
URL_TABLE_DATA = os.path.join(BASE_DIR, 'tiny30/output/url_table.data')
def main():
""" main routine """
url_idx_schema = '=IHIHHIIHI'
# record length
idx_len = calcsize(url_idx_schema)
try:
fd_url_idx = open(URL_TABLE_IDX)
fd_url_data = open(URL_TABLE_DATA)
# iterate word index table
# and fetch word string frmo the word data table
while True:
idx_data = fd_url_idx.read(idx_len)
if idx_data == '':
break
docid, _, offset, length, _, _, _, _, _ = unpack(url_idx_schema, idx_data)
fd_url_data.seek(offset)
url_str = fd_url_data.read(length)
print docid, url_str
except IOError:
# to handle the piped output to head
# like check_word_table.py | head
fd_url_idx.close()
fd_url_data.close()
if __name__ == '__main__':
main()
| 19.223684 | 80 | 0.67488 | #!/usr/bin/env python
"""
list all the wordid - word mapping
Sample usage:
$ python utils/chk_word_table.py
URL_TABLE_ENTRY(28B)
- docid(4B)
- loc of url(8B)
- url fileid(2B)
- url offset(4B)
- url length(2B)
- loc of doc(16B)
- wet fileid(2B)
- offset in the file(4B)
- length of the file including header(4B)
- content start offset from the doc(2B)
- content length(4B)
URL_ENTRY(VARIABLE LENGTH)
- url(variable length)
"""
from struct import calcsize
from struct import unpack
import os
import sys
BASE_DIR = './test_data'
URL_TABLE_IDX = os.path.join(BASE_DIR, 'tiny30/output/url_table.idx')
URL_TABLE_DATA = os.path.join(BASE_DIR, 'tiny30/output/url_table.data')
def main():
""" main routine """
url_idx_schema = '=IHIHHIIHI'
# record length
idx_len = calcsize(url_idx_schema)
try:
fd_url_idx = open(URL_TABLE_IDX)
fd_url_data = open(URL_TABLE_DATA)
# iterate word index table
# and fetch word string frmo the word data table
while True:
idx_data = fd_url_idx.read(idx_len)
if idx_data == '':
break
docid, _, offset, length, _, _, _, _, _ = unpack(url_idx_schema, idx_data)
fd_url_data.seek(offset)
url_str = fd_url_data.read(length)
print docid, url_str
except IOError:
# to handle the piped output to head
# like check_word_table.py | head
fd_url_idx.close()
fd_url_data.close()
if __name__ == '__main__':
main()
| 0 | 0 | 0 |
d94edc6a7ee39c3a0c82295a7c20703f46ca28db | 2,180 | py | Python | StudentRep/main.py | davidp-ro/ai-ml | 616225fb34761bcd9c8477044bc3ba955ba8b549 | [
"MIT"
] | null | null | null | StudentRep/main.py | davidp-ro/ai-ml | 616225fb34761bcd9c8477044bc3ba955ba8b549 | [
"MIT"
] | null | null | null | StudentRep/main.py | davidp-ro/ai-ml | 616225fb34761bcd9c8477044bc3ba955ba8b549 | [
"MIT"
] | null | null | null | # Imports:
import pandas as pd
import numpy as np
import pickle
# sklearn:
import sklearn
from sklearn import linear_model
from sklearn.utils import shuffle
# Matplotlib:
import matplotlib.pyplot as pyplot
from matplotlib import style
# File paths:
DATA_PATH = '../Dataset/student-mat.csv'
SAVE_PATH = '../SavedModels/studentmodel.pickle'
data = pd.read_csv(DATA_PATH, sep=';')
data = data[['G1', 'G2', 'G3', 'studytime', 'failures', 'absences']]
predict = 'G3'
x = np.array(data.drop([predict], 1))
y = np.array(data[predict])
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size=0.1)
# Uncomment to train/calculate models:
"""
best_accuracy = 0
accuracy = 0
best_run_number = -1
for run in range(50):
# Run 30 times and keep the best model that was generated.
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size=0.1)
linear = linear_model.LinearRegression()
linear.fit(x_train, y_train)
accuracy = linear.score(x_test, y_test)
print(f'Accuracy for run {run + 1}: {round(accuracy, 2)}')
if accuracy > best_accuracy:
best_accuracy = accuracy
best_run_number = run
with open(SAVE_PATH, 'wb') as model_save_file:
# Save the generated model
pickle.dump(linear, model_save_file)
print(f"The best model was generated in run {best_run_number} with the accuracy {round(best_accuracy, 2)}")
"""
pickle_in = open(SAVE_PATH, 'rb')
linear = pickle.load(pickle_in)
"""
print(f"Coeficient: {linear.coef_}")
print(f"Intercept: {linear.intercept_}")
"""
predictions = linear.predict(x_test)
for x in range(len(predictions)):
res = round(predictions[x])
close = 'no'
if abs(y_test[x] - res) <= 2:
close = 'yes'
print(round(predictions[x]), x_test[x], y_test[x], f"Accurate? {close}")
style.use("ggplot")
x = 'absences' # This will be X for the graph <-- this is the 'comparison' between G3 and G1 or G2
y = 'G3' # This will be Y for the graph
pyplot.scatter(data[y], data[x])
pyplot.xlabel(f'Comparsion between the {x} grade and the {y} grade')
pyplot.ylabel(f'Compared grade ({y})')
pyplot.show()
| 27.25 | 107 | 0.693119 | # Imports:
import pandas as pd
import numpy as np
import pickle
# sklearn:
import sklearn
from sklearn import linear_model
from sklearn.utils import shuffle
# Matplotlib:
import matplotlib.pyplot as pyplot
from matplotlib import style
# File paths:
DATA_PATH = '../Dataset/student-mat.csv'
SAVE_PATH = '../SavedModels/studentmodel.pickle'
data = pd.read_csv(DATA_PATH, sep=';')
data = data[['G1', 'G2', 'G3', 'studytime', 'failures', 'absences']]
predict = 'G3'
x = np.array(data.drop([predict], 1))
y = np.array(data[predict])
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size=0.1)
# Uncomment to train/calculate models:
"""
best_accuracy = 0
accuracy = 0
best_run_number = -1
for run in range(50):
# Run 30 times and keep the best model that was generated.
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size=0.1)
linear = linear_model.LinearRegression()
linear.fit(x_train, y_train)
accuracy = linear.score(x_test, y_test)
print(f'Accuracy for run {run + 1}: {round(accuracy, 2)}')
if accuracy > best_accuracy:
best_accuracy = accuracy
best_run_number = run
with open(SAVE_PATH, 'wb') as model_save_file:
# Save the generated model
pickle.dump(linear, model_save_file)
print(f"The best model was generated in run {best_run_number} with the accuracy {round(best_accuracy, 2)}")
"""
pickle_in = open(SAVE_PATH, 'rb')
linear = pickle.load(pickle_in)
"""
print(f"Coeficient: {linear.coef_}")
print(f"Intercept: {linear.intercept_}")
"""
predictions = linear.predict(x_test)
for x in range(len(predictions)):
res = round(predictions[x])
close = 'no'
if abs(y_test[x] - res) <= 2:
close = 'yes'
print(round(predictions[x]), x_test[x], y_test[x], f"Accurate? {close}")
style.use("ggplot")
x = 'absences' # This will be X for the graph <-- this is the 'comparison' between G3 and G1 or G2
y = 'G3' # This will be Y for the graph
pyplot.scatter(data[y], data[x])
pyplot.xlabel(f'Comparsion between the {x} grade and the {y} grade')
pyplot.ylabel(f'Compared grade ({y})')
pyplot.show()
| 0 | 0 | 0 |
e6076ef3167f5d0557a7db18ca70aa92a6189f21 | 2,622 | py | Python | smtpdev/cli.py | asyncee/smtpdev | 1b92085b4069457bf724a16e20270b99b9098255 | [
"MIT"
] | 2 | 2019-06-19T09:03:23.000Z | 2020-06-19T18:19:48.000Z | smtpdev/cli.py | asyncee/smtpdev | 1b92085b4069457bf724a16e20270b99b9098255 | [
"MIT"
] | 1 | 2019-06-07T06:36:45.000Z | 2019-06-07T06:36:45.000Z | smtpdev/cli.py | asyncee/smtpdev | 1b92085b4069457bf724a16e20270b99b9098255 | [
"MIT"
] | 1 | 2019-06-04T17:29:21.000Z | 2019-06-04T17:29:21.000Z | import logging
import pathlib
from contextlib import nullcontext
from mailbox import Maildir
from tempfile import TemporaryDirectory
import click
from aiosmtpd.controller import Controller
from .config import Configuration
from .smtp_handlers import MailboxHandler
from .web_server import WebServer
logger = logging.getLogger(__name__)
@click.command()
@click.option(
"--smtp-host",
envvar="SMTPDEV_SMTP_HOST",
default="localhost",
help="Smtp server host (default localhost).",
)
@click.option(
"--smtp-port", envvar="SMTPDEV_SMTP_PORT", default=2500, help="Smtp server port (default 2500)."
)
@click.option(
"--web-host",
envvar="SMTPDEV_WEB_HOST",
default="localhost",
help="Web server host (default localhost).",
)
@click.option(
"--web-port", envvar="SMTPDEV_WEB_PORT", default=8080, help="Web server port (default 8080)."
)
@click.option(
"--develop",
envvar="SMTPDEV_DEVELOP",
default=False,
is_flag=True,
help="Run in developer mode.",
)
@click.option(
"--debug",
envvar="SMTPDEV_DEBUG",
default=False,
is_flag=True,
help="Whether to use debug loglevel.",
)
@click.option(
"--maildir",
envvar="SMTPDEV_MAILDIR",
default=None,
help="Full path to emails directory, temporary directory if not set.",
)
| 27.6 | 100 | 0.680397 | import logging
import pathlib
from contextlib import nullcontext
from mailbox import Maildir
from tempfile import TemporaryDirectory
import click
from aiosmtpd.controller import Controller
from .config import Configuration
from .smtp_handlers import MailboxHandler
from .web_server import WebServer
logger = logging.getLogger(__name__)
@click.command()
@click.option(
"--smtp-host",
envvar="SMTPDEV_SMTP_HOST",
default="localhost",
help="Smtp server host (default localhost).",
)
@click.option(
"--smtp-port", envvar="SMTPDEV_SMTP_PORT", default=2500, help="Smtp server port (default 2500)."
)
@click.option(
"--web-host",
envvar="SMTPDEV_WEB_HOST",
default="localhost",
help="Web server host (default localhost).",
)
@click.option(
"--web-port", envvar="SMTPDEV_WEB_PORT", default=8080, help="Web server port (default 8080)."
)
@click.option(
"--develop",
envvar="SMTPDEV_DEVELOP",
default=False,
is_flag=True,
help="Run in developer mode.",
)
@click.option(
"--debug",
envvar="SMTPDEV_DEBUG",
default=False,
is_flag=True,
help="Whether to use debug loglevel.",
)
@click.option(
"--maildir",
envvar="SMTPDEV_MAILDIR",
default=None,
help="Full path to emails directory, temporary directory if not set.",
)
def main(smtp_host, smtp_port, web_host, web_port, develop, debug, maildir):
if debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logger.info("SMTP server is running on %s:%s", smtp_host, smtp_port)
logger.info("Web server is running on %s:%s", web_host, web_port)
if develop:
logger.info("Running in developer mode")
dir_context = TemporaryDirectory if maildir is None else lambda: nullcontext(maildir)
with dir_context() as maildir_path:
maildir_path = pathlib.Path(maildir_path)
maildir_path.mkdir(parents=True, exist_ok=True)
logger.info("Mail directory: %s", maildir_path)
config = Configuration(
smtp_host=smtp_host,
smtp_port=smtp_port,
web_host=web_host,
web_port=web_port,
develop=develop,
debug=debug,
)
maildir = Maildir(maildir_path / "maildir")
mailbox = MailboxHandler(maildir_path / "maildir")
controller = Controller(mailbox, hostname=config.smtp_host, port=config.smtp_port)
web_server = WebServer(config, maildir)
mailbox.register_message_observer(web_server)
controller.start()
web_server.start()
controller.stop()
| 1,293 | 0 | 22 |
34c178e47df7b723231427d71156f2e94fd3ed20 | 313 | py | Python | ctutils/PSR.py | Combustion-Zhen/pyutils | dc675f2087d531fbd0ac5477dadbb5cebb9ccf79 | [
"MIT"
] | null | null | null | ctutils/PSR.py | Combustion-Zhen/pyutils | dc675f2087d531fbd0ac5477dadbb5cebb9ccf79 | [
"MIT"
] | null | null | null | ctutils/PSR.py | Combustion-Zhen/pyutils | dc675f2087d531fbd0ac5477dadbb5cebb9ccf79 | [
"MIT"
] | null | null | null | # %%
import cantera as ct
# %%
gas = ct.Solution('h2_Burke_n2.cti')
r = ct.IdealGasConstPressureReactor(gas)
# %%
dt = 1e-5
t = 0.
# %%
r.T
t += dt
sim = ct.ReactorNet([r])
sim.advance(t)
r.T
# %%
gas.TPX = 300, 101325, {'H':1.}
r.syncState()
r.T
# %%
sim.reinitialize()
# %%
t += dt
sim.advance(t)
r.T
# %% | 10.096774 | 40 | 0.571885 | # %%
import cantera as ct
# %%
gas = ct.Solution('h2_Burke_n2.cti')
r = ct.IdealGasConstPressureReactor(gas)
# %%
dt = 1e-5
t = 0.
# %%
r.T
t += dt
sim = ct.ReactorNet([r])
sim.advance(t)
r.T
# %%
gas.TPX = 300, 101325, {'H':1.}
r.syncState()
r.T
# %%
sim.reinitialize()
# %%
t += dt
sim.advance(t)
r.T
# %% | 0 | 0 | 0 |
2d74716a061b9c90a72957614d5e209d6f7bcfd9 | 689 | py | Python | Unit_4/Data_Analysis.py | coffeelabor/CIS289_Reed_James | 6d1bc126d6b50411f2bad1d65cfeebd47b68a6ff | [
"MIT"
] | null | null | null | Unit_4/Data_Analysis.py | coffeelabor/CIS289_Reed_James | 6d1bc126d6b50411f2bad1d65cfeebd47b68a6ff | [
"MIT"
] | null | null | null | Unit_4/Data_Analysis.py | coffeelabor/CIS289_Reed_James | 6d1bc126d6b50411f2bad1d65cfeebd47b68a6ff | [
"MIT"
] | null | null | null | '''
/***************************************************************
* Name: Pandas Data Analysis
* Author: Reed James
* Created: 22 Sept 2021
* Course: CIS 289 - Python
* Version: Python 3.8.2
* OS: Windows 10
* Copyright: This is my own original work based on
* specifications issued by our instructor
* Description:
* Input:
* Output:
* Academic Honesty: I attest that this is my original work.
* I have not used unauthorized source code, either modified or
* unmodified. I have not given other fellow student(s) access to my program.
***************************************************************/
'''
if __name__ == "__main__":
pass | 31.318182 | 76 | 0.539913 | '''
/***************************************************************
* Name: Pandas Data Analysis
* Author: Reed James
* Created: 22 Sept 2021
* Course: CIS 289 - Python
* Version: Python 3.8.2
* OS: Windows 10
* Copyright: This is my own original work based on
* specifications issued by our instructor
* Description:
* Input:
* Output:
* Academic Honesty: I attest that this is my original work.
* I have not used unauthorized source code, either modified or
* unmodified. I have not given other fellow student(s) access to my program.
***************************************************************/
'''
if __name__ == "__main__":
pass | 0 | 0 | 0 |
2dc5f02dd8c7980771551c3a8f2cf90faab3d060 | 15,111 | py | Python | Dados/Events/Evento/Effect/Scroll.py | On0n0k1/2020ASSEditor | fd61595696683b440eb8a030163020100e070ed5 | [
"MIT"
] | null | null | null | Dados/Events/Evento/Effect/Scroll.py | On0n0k1/2020ASSEditor | fd61595696683b440eb8a030163020100e070ed5 | [
"MIT"
] | null | null | null | Dados/Events/Evento/Effect/Scroll.py | On0n0k1/2020ASSEditor | fd61595696683b440eb8a030163020100e070ed5 | [
"MIT"
] | null | null | null | """
Extends Effect. Contains Scroll, which is one the 3 effects employed by the event.
Copyright 2020 Lucas Alessandro do Carmo Lemos
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# from Dados.ErrorEditorSSA import ErrorEditorSSA
# from Dados.ErrorPackage.ErrorPackage import ErrorPackage
__author__ = "Lucas Alessandro do Carmo Lemos"
__copyright__ = "Copyright (C) 2020 Lucas Alessandro do Carmo Lemos"
__license__ = "MIT"
__credits__ = []
__version__ = "0.2.1"
__maintainer__ = "Lucas Alessandro do Carmo Lemos"
__email__ = "stiltztinkerstein@gmail.com"
__status__ = (["Prototype", "Development", "Production"])[2]
from typing import Union
class Scroll:
""" Scroll makes the event (image or message) scroll vertically through the screen.
Extends 'Dados.Events.Evento.Effect.Effect'.
Methods:
__init__(entrada = None, subtitlerplugin = None): Create the object with no parameters, a String, or another
Scroll object to copy. String reading format changes according to subtitlerplugin.
gety1(): returns y1. Non-negative Integer.
gety2(): returns y2. Non-negative Integer.
getdelay(): returns delay. Integer, value from 0 to 100.
getfadeawayheight(): returns fadeawayheight. Non-negative Integer.
issubtitlerplugin(): returns subtitlerplugin. True or False.
getdirection(): returns direction. String. "up" or "down".
sety1(y1): set y1. Non-negative integer.
sety2(y2) set y2. Non-negative integer.
setdelay(delay) set delay. Integer. From 0 to 100, inclusive.
setfadeawayheight(fadeawayheight): set fadeawayheight. Non-negative integer.
setsubtitlerplugin(subtitlerplugin) set subtitlerplygin. Which decides how to read and print this object.
setdirection(direction): set direction. "up" or "down".
__repr__(): String format changes according to subtitlerplugin.
"""
# self.direction: 'up' or 'down'
# self.subtitlerplugin: True or False. If it is using Avery Lee's Subtitler plugin order or not.
# self.y1: Integer. First height. In pixels.
# self.y2: Integer. Second height. In pixels.
# (doesn't matter the order between y1 and y2)
# self.delay: Integer. From 0 to 100. How much will the scrolling be delayed.
# self.fadeawayheight: Integer. Optional. The documentation doesn't explain well how to treat this value,
# so I won't care much about it.
def __init__(self, entrada: Union[str, 'Scroll', None] = None, subtitlerplugin: Union[bool, None] = None):
""" Constructs the object. Can use a string or copy from a similar object.
:param entrada: String, Dados.Events.Evento.Effect.Scroll.Scroll object, or None. String is used for loading
a SSA file. Scroll object will have copied values. None will start with direction, y1, y2, delay,
fadeawayheight and subtitlerplugin as "up", 0, 0, 0, Non e and False, respectively.
:param subtitlerplugin: True, False or None. True means the format will be read and written as
f"Scroll {direction}; {delay}; {y1}; {y2}; {fadeawayheight}". False means the format will be read and
written as f"Scroll {direction}; {y1}; {y2}; {delay}; {fadeawayheight}". If None, the constructor will
try to guess it.
"""
if entrada is None:
self.direction, self.y1, self.y2, self.delay, self.fadeawayheight = ["up", 0, 0, 0, None]
self.subtitlerplugin = False
elif isinstance(entrada, Scroll):
self.direction, self.y1, self.y2 = [entrada.getdirection(), entrada.gety1(), entrada.gety2()]
self.delay, self.fadeawayheight = [entrada.getdelay(), entrada.getfadeawayheight()]
self.setsubtitlerplugin(entrada.issubtitlerplugin())
else:
if isinstance(entrada, str) is False:
raise TypeError(f"{entrada} has to be a string or Scroll object.")
# assert(isinstance(entrada, str)), f"{entrada} has to be a string or Scroll object."
_ = f"{subtitlerplugin} must be a boolean or omitted."
if subtitlerplugin is not None:
if isinstance(subtitlerplugin, bool) is False:
raise TypeError(_)
# assert((subtitlerplugin is None) or isinstance(subtitlerplugin, bool)), _
texto = (entrada.strip()).lower()
if texto.startswith("scroll"):
texto = texto[6:].strip()
if texto.startswith("up"):
self.direction = "up"
texto = texto[2:]
elif texto.startswith("down"):
self.direction = "down"
texto = texto[4:]
else:
_ = f"{entrada} : 'up' or 'down' Not Found after Scroll."
raise ValueError(f"{_}")
parameters = texto.split(";")
# since the section starts with ';', the first value of the list must be removed
del parameters[0]
if len(parameters) < 3:
# Line too long
_ = f"{entrada} : too few arguments after Scroll {self.direction}({len(parameters)} "
raise ValueError(_)
if len(parameters) > 4:
# Line too long
_ = f"{entrada} : too many arguments after Scroll {self.direction}({len(parameters)}) "
raise ValueError(_)
try:
if len(parameters) == 3:
parameters = [int(parameters[0]), int(parameters[1]), int(parameters[2])]
else:
parameters = [int(parameters[0]), int(parameters[1]), int(parameters[2]), int(parameters[3])]
except ValueError:
raise ValueError(f"{entrada} the arguments aren't integers.")
# Here comes another messup of this format
# SSA reads Scroll up/down parameters as y1;y2;delay[;fadeawayheight]
# But 'Avery Lee's "Subtitler" plugin' reads the parameters as 'delay;y1;y2[;fadeawayheight]'
# The reader will try to guess which of the styles is being used based on the values. It will focus on using
# ';y1;y2;delay' normally though.
# if subtitlerplugin was not defined
if subtitlerplugin is not None:
self.subtitlerplugin = subtitlerplugin
else:
# try to guess if the order is ';delay;y1;y2' based on constraints
# delay has to be a value that goes from 0 to 100, so at least one of the values will be in that range
if (0 <= parameters[0]) and (parameters[0] <= 100) and parameters[2] > 100:
self.subtitlerplugin = True
else:
self.subtitlerplugin = False
if self.subtitlerplugin:
if len(parameters) == 3:
self.delay, self.y1, self.y2 = parameters
self.fadeawayheight = None
else:
self.delay, self.y1, self.y2, self.fadeawayheight = parameters
else:
if len(parameters) == 3:
self.y1, self.y2, self.delay = parameters
self.fadeawayheight = None
else:
self.y1, self.y2, self.delay, self.fadeawayheight = parameters
def gety1(self) -> int:
""" Y1 and Y2 are the height values where the text will scroll.
There's no respective order for both values. Any of the two can be the highest or lowest.
:return: Non-negative integer.
"""
return int(self.y1)
def gety2(self) -> int:
""" Y1 and Y2 are the height values where the text will scroll.
There's no respective order for both values. Any of the two can be the highest or lowest.
:return: Non-negative integer.
"""
return int(self.y2)
def getdelay(self) -> int:
""" Return the delay value of this object.
Integer from 0 to 100. The higher the value, the slower it scrolls.
Calculated as 1000/delay second/pixel.
0: no delay.
100: 0.1 second per pixel.
:return: Integer. From 0 to 100.
"""
return int(self.delay)
# Not sure if fadeawayheight is the distance that the scroll has to cover before fading,
# or the position on the screen where it starts fading.
# should return 0 be ok?
def getfadeawayheight(self) -> int:
""" Get fadeawayheight value of this object.
:return: Integer. Non-negative value.
"""
return self.fadeawayheight
def issubtitlerplugin(self) -> bool:
""" Get subtitlerplugin value.
True: f"Scroll {direction}; {delay}; {y1}; {y2}; {fadeawayheight}"
False: f"Scroll {direction}; {y1}; {y2}; {delay}; {fadeawayheight}"
:return: True or False.
"""
return self.subtitlerplugin
def getdirection(self) -> str:
""" Get direction.
:return: String. "up" or "down" only.
"""
return self.direction
def sety1(self, y1: int) -> 'Scroll':
""" Set y1 value of this object.
:param y1: Integer. Non-negative value.
:return: self.
"""
if isinstance(y1, int) is False:
raise TypeError(f"{y1} must be an integer.")
if y1 < 0:
raise ValueError(f"{y1} must be a non-negative value")
# assert (isinstance(y1, int)), f"{y1} must be an integer."
# assert (y1 >= 0), f"{y1} must be a non-negative value"
self.y1 = y1
return self
def sety2(self, y2: int) -> 'Scroll':
""" Set y2 value of this object.
:param y2: Integer. Non-negative value.
:return: self.
"""
if isinstance(y2, int) is False:
raise TypeError(f"{y2} must be an integer.")
# assert (isinstance(y2, int)), f"{y2} must be an integer."
if y2 < 0:
raise ValueError(f"{y2} must be a non-negative value")
# assert (y2 >= 0), f"{y2} must be a non-negative value"
self.y2 = y2
return self
def setdelay(self, delay: int) -> 'Scroll':
""" Set delay value of this object.
Integer from 0 to 100. The higher the value, the slower it scrolls.
Calculated as 1000/delay second/pixel.
0: no delay.
100: 0.1 second per pixel.
:param delay: Integer. From 0 to 100.
:return: self.
"""
if isinstance(delay, int) is False:
raise TypeError(f"{delay} must be an integer")
# assert (isinstance(delay, int)), f"{delay} must be an integer"
if (delay < 0) or (delay > 100):
raise ValueError(f"{delay} must be a value from 0 to 100")
# assert (0 <= delay) and (delay <= 100), f"{delay} must be a value from 0 to 100"
self.delay = delay
return self
def setfadeawayheight(self, fadeawayheight: int) -> 'Scroll':
""" Set fadeawayheight value of this object.
:param fadeawayheight: Integer. Non-negative value.
:return: self.
"""
if isinstance(fadeawayheight, int) is False:
raise TypeError(f"{fadeawayheight} must be an integer")
# assert(isinstance(fadeawayheight, int)), f"{fadeawayheight} must be an integer"
if fadeawayheight < 0:
raise ValueError(f"{fadeawayheight} must be a positive value")
# assert (fadeawayheight >= 0), f"{fadeawayheight} must be a positive value"
self.fadeawayheight = fadeawayheight
return self
def setsubtitlerplugin(self, subtitlerplugin: bool) -> "Scroll":
""" Set subtitlerplugin value.
If True:
f"Scroll {direction}; {delay}; {y1}; {y2}; {fadeawayheight}"
if False:
f"Scroll {direction}; {y1}; {y2}; {delay}; {fadeawayheight}"
:param subtitlerplugin: True or False.
:return: self.
"""
if isinstance(subtitlerplugin, bool) is False:
raise TypeError(f"{subtitlerplugin} must be True or False")
# assert(isinstance(subtitlerplugin, bool)), f"{subtitlerplugin} must be True or False"
self.subtitlerplugin = subtitlerplugin
return self
def setdirection(self, direction: str) -> 'Scroll':
""" Set Scroll direction.
:param direction: String. "up" or "down" only.
:return: self.
"""
if isinstance(direction, str) is False:
raise TypeError(f"{direction} must be 'up' or 'down'.")
# assert(isinstance(direction, str)), f"{direction} must be 'up' or 'down'."
if (direction.lower() != "up") and (direction.lower() != "down"):
raise ValueError(f"{direction} must be 'up' or 'down'.")
# assert(direction.lower() == "up") or (direction.lower() == "down"), f"{direction} must be 'up' or 'down'."
self.direction = direction.lower()
return self
def __repr__(self) -> str:
""" Returns this object string format.
If subtitlerplugin is set to true. The return will be:
f"Scroll {direction}; {delay}; {y1}; {y2}; {fadeawayheight}"
if subtitlerplugin is set to false. The return will be:
f"Scroll {direction}; {y1}; {y2}; {delay}; {fadeawayheight}"
:return: This object string in SSA format.
"""
saida = f"Scroll {self.direction.lower()};"
if self.subtitlerplugin:
saida = f"{saida} {self.delay}; {self.y1}; {self.y2}"
else:
saida = f"{saida} {self.y1}; {self.y2}; {self.delay}"
if self.fadeawayheight is None:
return saida
else:
return f"{saida}; {self.fadeawayheight}"
| 40.296 | 121 | 0.599232 | """
Extends Effect. Contains Scroll, which is one the 3 effects employed by the event.
Copyright 2020 Lucas Alessandro do Carmo Lemos
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# from Dados.ErrorEditorSSA import ErrorEditorSSA
# from Dados.ErrorPackage.ErrorPackage import ErrorPackage
__author__ = "Lucas Alessandro do Carmo Lemos"
__copyright__ = "Copyright (C) 2020 Lucas Alessandro do Carmo Lemos"
__license__ = "MIT"
__credits__ = []
__version__ = "0.2.1"
__maintainer__ = "Lucas Alessandro do Carmo Lemos"
__email__ = "stiltztinkerstein@gmail.com"
__status__ = (["Prototype", "Development", "Production"])[2]
from typing import Union
class Scroll:
""" Scroll makes the event (image or message) scroll vertically through the screen.
Extends 'Dados.Events.Evento.Effect.Effect'.
Methods:
__init__(entrada = None, subtitlerplugin = None): Create the object with no parameters, a String, or another
Scroll object to copy. String reading format changes according to subtitlerplugin.
gety1(): returns y1. Non-negative Integer.
gety2(): returns y2. Non-negative Integer.
getdelay(): returns delay. Integer, value from 0 to 100.
getfadeawayheight(): returns fadeawayheight. Non-negative Integer.
issubtitlerplugin(): returns subtitlerplugin. True or False.
getdirection(): returns direction. String. "up" or "down".
sety1(y1): set y1. Non-negative integer.
sety2(y2) set y2. Non-negative integer.
setdelay(delay) set delay. Integer. From 0 to 100, inclusive.
setfadeawayheight(fadeawayheight): set fadeawayheight. Non-negative integer.
setsubtitlerplugin(subtitlerplugin) set subtitlerplygin. Which decides how to read and print this object.
setdirection(direction): set direction. "up" or "down".
__repr__(): String format changes according to subtitlerplugin.
"""
# self.direction: 'up' or 'down'
# self.subtitlerplugin: True or False. If it is using Avery Lee's Subtitler plugin order or not.
# self.y1: Integer. First height. In pixels.
# self.y2: Integer. Second height. In pixels.
# (doesn't matter the order between y1 and y2)
# self.delay: Integer. From 0 to 100. How much will the scrolling be delayed.
# self.fadeawayheight: Integer. Optional. The documentation doesn't explain well how to treat this value,
# so I won't care much about it.
def __init__(self, entrada: Union[str, 'Scroll', None] = None, subtitlerplugin: Union[bool, None] = None):
""" Constructs the object. Can use a string or copy from a similar object.
:param entrada: String, Dados.Events.Evento.Effect.Scroll.Scroll object, or None. String is used for loading
a SSA file. Scroll object will have copied values. None will start with direction, y1, y2, delay,
fadeawayheight and subtitlerplugin as "up", 0, 0, 0, Non e and False, respectively.
:param subtitlerplugin: True, False or None. True means the format will be read and written as
f"Scroll {direction}; {delay}; {y1}; {y2}; {fadeawayheight}". False means the format will be read and
written as f"Scroll {direction}; {y1}; {y2}; {delay}; {fadeawayheight}". If None, the constructor will
try to guess it.
"""
if entrada is None:
self.direction, self.y1, self.y2, self.delay, self.fadeawayheight = ["up", 0, 0, 0, None]
self.subtitlerplugin = False
elif isinstance(entrada, Scroll):
self.direction, self.y1, self.y2 = [entrada.getdirection(), entrada.gety1(), entrada.gety2()]
self.delay, self.fadeawayheight = [entrada.getdelay(), entrada.getfadeawayheight()]
self.setsubtitlerplugin(entrada.issubtitlerplugin())
else:
if isinstance(entrada, str) is False:
raise TypeError(f"{entrada} has to be a string or Scroll object.")
# assert(isinstance(entrada, str)), f"{entrada} has to be a string or Scroll object."
_ = f"{subtitlerplugin} must be a boolean or omitted."
if subtitlerplugin is not None:
if isinstance(subtitlerplugin, bool) is False:
raise TypeError(_)
# assert((subtitlerplugin is None) or isinstance(subtitlerplugin, bool)), _
texto = (entrada.strip()).lower()
if texto.startswith("scroll"):
texto = texto[6:].strip()
if texto.startswith("up"):
self.direction = "up"
texto = texto[2:]
elif texto.startswith("down"):
self.direction = "down"
texto = texto[4:]
else:
_ = f"{entrada} : 'up' or 'down' Not Found after Scroll."
raise ValueError(f"{_}")
parameters = texto.split(";")
# since the section starts with ';', the first value of the list must be removed
del parameters[0]
if len(parameters) < 3:
# Line too long
_ = f"{entrada} : too few arguments after Scroll {self.direction}({len(parameters)} "
raise ValueError(_)
if len(parameters) > 4:
# Line too long
_ = f"{entrada} : too many arguments after Scroll {self.direction}({len(parameters)}) "
raise ValueError(_)
try:
if len(parameters) == 3:
parameters = [int(parameters[0]), int(parameters[1]), int(parameters[2])]
else:
parameters = [int(parameters[0]), int(parameters[1]), int(parameters[2]), int(parameters[3])]
except ValueError:
raise ValueError(f"{entrada} the arguments aren't integers.")
# Here comes another messup of this format
# SSA reads Scroll up/down parameters as y1;y2;delay[;fadeawayheight]
# But 'Avery Lee's "Subtitler" plugin' reads the parameters as 'delay;y1;y2[;fadeawayheight]'
# The reader will try to guess which of the styles is being used based on the values. It will focus on using
# ';y1;y2;delay' normally though.
# if subtitlerplugin was not defined
if subtitlerplugin is not None:
self.subtitlerplugin = subtitlerplugin
else:
# try to guess if the order is ';delay;y1;y2' based on constraints
# delay has to be a value that goes from 0 to 100, so at least one of the values will be in that range
if (0 <= parameters[0]) and (parameters[0] <= 100) and parameters[2] > 100:
self.subtitlerplugin = True
else:
self.subtitlerplugin = False
if self.subtitlerplugin:
if len(parameters) == 3:
self.delay, self.y1, self.y2 = parameters
self.fadeawayheight = None
else:
self.delay, self.y1, self.y2, self.fadeawayheight = parameters
else:
if len(parameters) == 3:
self.y1, self.y2, self.delay = parameters
self.fadeawayheight = None
else:
self.y1, self.y2, self.delay, self.fadeawayheight = parameters
def gety1(self) -> int:
""" Y1 and Y2 are the height values where the text will scroll.
There's no respective order for both values. Any of the two can be the highest or lowest.
:return: Non-negative integer.
"""
return int(self.y1)
def gety2(self) -> int:
""" Y1 and Y2 are the height values where the text will scroll.
There's no respective order for both values. Any of the two can be the highest or lowest.
:return: Non-negative integer.
"""
return int(self.y2)
def getdelay(self) -> int:
""" Return the delay value of this object.
Integer from 0 to 100. The higher the value, the slower it scrolls.
Calculated as 1000/delay second/pixel.
0: no delay.
100: 0.1 second per pixel.
:return: Integer. From 0 to 100.
"""
return int(self.delay)
# Not sure if fadeawayheight is the distance that the scroll has to cover before fading,
# or the position on the screen where it starts fading.
# should return 0 be ok?
def getfadeawayheight(self) -> int:
""" Get fadeawayheight value of this object.
:return: Integer. Non-negative value.
"""
return self.fadeawayheight
def issubtitlerplugin(self) -> bool:
""" Get subtitlerplugin value.
True: f"Scroll {direction}; {delay}; {y1}; {y2}; {fadeawayheight}"
False: f"Scroll {direction}; {y1}; {y2}; {delay}; {fadeawayheight}"
:return: True or False.
"""
return self.subtitlerplugin
def getdirection(self) -> str:
""" Get direction.
:return: String. "up" or "down" only.
"""
return self.direction
def sety1(self, y1: int) -> 'Scroll':
""" Set y1 value of this object.
:param y1: Integer. Non-negative value.
:return: self.
"""
if isinstance(y1, int) is False:
raise TypeError(f"{y1} must be an integer.")
if y1 < 0:
raise ValueError(f"{y1} must be a non-negative value")
# assert (isinstance(y1, int)), f"{y1} must be an integer."
# assert (y1 >= 0), f"{y1} must be a non-negative value"
self.y1 = y1
return self
def sety2(self, y2: int) -> 'Scroll':
""" Set y2 value of this object.
:param y2: Integer. Non-negative value.
:return: self.
"""
if isinstance(y2, int) is False:
raise TypeError(f"{y2} must be an integer.")
# assert (isinstance(y2, int)), f"{y2} must be an integer."
if y2 < 0:
raise ValueError(f"{y2} must be a non-negative value")
# assert (y2 >= 0), f"{y2} must be a non-negative value"
self.y2 = y2
return self
def setdelay(self, delay: int) -> 'Scroll':
""" Set delay value of this object.
Integer from 0 to 100. The higher the value, the slower it scrolls.
Calculated as 1000/delay second/pixel.
0: no delay.
100: 0.1 second per pixel.
:param delay: Integer. From 0 to 100.
:return: self.
"""
if isinstance(delay, int) is False:
raise TypeError(f"{delay} must be an integer")
# assert (isinstance(delay, int)), f"{delay} must be an integer"
if (delay < 0) or (delay > 100):
raise ValueError(f"{delay} must be a value from 0 to 100")
# assert (0 <= delay) and (delay <= 100), f"{delay} must be a value from 0 to 100"
self.delay = delay
return self
def setfadeawayheight(self, fadeawayheight: int) -> 'Scroll':
""" Set fadeawayheight value of this object.
:param fadeawayheight: Integer. Non-negative value.
:return: self.
"""
if isinstance(fadeawayheight, int) is False:
raise TypeError(f"{fadeawayheight} must be an integer")
# assert(isinstance(fadeawayheight, int)), f"{fadeawayheight} must be an integer"
if fadeawayheight < 0:
raise ValueError(f"{fadeawayheight} must be a positive value")
# assert (fadeawayheight >= 0), f"{fadeawayheight} must be a positive value"
self.fadeawayheight = fadeawayheight
return self
def setsubtitlerplugin(self, subtitlerplugin: bool) -> "Scroll":
""" Set subtitlerplugin value.
If True:
f"Scroll {direction}; {delay}; {y1}; {y2}; {fadeawayheight}"
if False:
f"Scroll {direction}; {y1}; {y2}; {delay}; {fadeawayheight}"
:param subtitlerplugin: True or False.
:return: self.
"""
if isinstance(subtitlerplugin, bool) is False:
raise TypeError(f"{subtitlerplugin} must be True or False")
# assert(isinstance(subtitlerplugin, bool)), f"{subtitlerplugin} must be True or False"
self.subtitlerplugin = subtitlerplugin
return self
def setdirection(self, direction: str) -> 'Scroll':
""" Set Scroll direction.
:param direction: String. "up" or "down" only.
:return: self.
"""
if isinstance(direction, str) is False:
raise TypeError(f"{direction} must be 'up' or 'down'.")
# assert(isinstance(direction, str)), f"{direction} must be 'up' or 'down'."
if (direction.lower() != "up") and (direction.lower() != "down"):
raise ValueError(f"{direction} must be 'up' or 'down'.")
# assert(direction.lower() == "up") or (direction.lower() == "down"), f"{direction} must be 'up' or 'down'."
self.direction = direction.lower()
return self
def __repr__(self) -> str:
""" Returns this object string format.
If subtitlerplugin is set to true. The return will be:
f"Scroll {direction}; {delay}; {y1}; {y2}; {fadeawayheight}"
if subtitlerplugin is set to false. The return will be:
f"Scroll {direction}; {y1}; {y2}; {delay}; {fadeawayheight}"
:return: This object string in SSA format.
"""
saida = f"Scroll {self.direction.lower()};"
if self.subtitlerplugin:
saida = f"{saida} {self.delay}; {self.y1}; {self.y2}"
else:
saida = f"{saida} {self.y1}; {self.y2}; {self.delay}"
if self.fadeawayheight is None:
return saida
else:
return f"{saida}; {self.fadeawayheight}"
| 0 | 0 | 0 |
5ca478338d5f22d1c242e56ababc6292ace19d3b | 362 | py | Python | ai_reader_demo/autoencoder/utils.py | bbueno5000/ai_reader_demo | 4c159d6455cd0f7a76e8277a589afe7514b9cbc4 | [
"Apache-2.0"
] | null | null | null | ai_reader_demo/autoencoder/utils.py | bbueno5000/ai_reader_demo | 4c159d6455cd0f7a76e8277a589afe7514b9cbc4 | [
"Apache-2.0"
] | null | null | null | ai_reader_demo/autoencoder/utils.py | bbueno5000/ai_reader_demo | 4c159d6455cd0f7a76e8277a589afe7514b9cbc4 | [
"Apache-2.0"
] | null | null | null | """
DOCSTRING
"""
import numpy
import tensorflow
def xavier_init(fan_in, fan_out, constant = 1):
"""
DOCSTRING
"""
low = -constant * numpy.sqrt(6.0 / (fan_in + fan_out))
high = constant * numpy.sqrt(6.0 / (fan_in + fan_out))
return tensorflow.random_uniform(
(fan_in, fan_out), minval=low, maxval=high, dtype=tensorflow.float32)
| 24.133333 | 77 | 0.651934 | """
DOCSTRING
"""
import numpy
import tensorflow
def xavier_init(fan_in, fan_out, constant = 1):
"""
DOCSTRING
"""
low = -constant * numpy.sqrt(6.0 / (fan_in + fan_out))
high = constant * numpy.sqrt(6.0 / (fan_in + fan_out))
return tensorflow.random_uniform(
(fan_in, fan_out), minval=low, maxval=high, dtype=tensorflow.float32)
| 0 | 0 | 0 |
22026e61153287b58445150128902a1f2d928f9e | 2,488 | py | Python | server/newconnectionwhodis/tests/testCommentView.py | dylandeco/cmput404-group-project | ddbfba1b2778b9efe916d3e98cc8177ed7f0791b | [
"Apache-2.0"
] | 4 | 2021-10-20T20:50:55.000Z | 2021-12-04T03:26:56.000Z | server/newconnectionwhodis/tests/testCommentView.py | dylandeco/cmput404-group-project | ddbfba1b2778b9efe916d3e98cc8177ed7f0791b | [
"Apache-2.0"
] | 69 | 2021-10-22T01:51:14.000Z | 2021-12-09T02:57:57.000Z | server/newconnectionwhodis/tests/testCommentView.py | dylandeco/cmput404-group-project | ddbfba1b2778b9efe916d3e98cc8177ed7f0791b | [
"Apache-2.0"
] | 1 | 2022-03-13T20:45:12.000Z | 2022-03-13T20:45:12.000Z | from django.test import TestCase
from rest_framework.test import APIClient
from . import util
from .. import serializers
| 36.057971 | 101 | 0.60209 | from django.test import TestCase
from rest_framework.test import APIClient
from . import util
from .. import serializers
class CommentViewTests(TestCase):
def setUp(self):
"""
Create a new post from a new author
"""
AUTHOR_NAME, AUTHOR_GITHUB, POST_CONTENT = "Muhammad", "Exanut", 'Placeholder'
self.client = APIClient()
self.author = util.create_author(AUTHOR_NAME, AUTHOR_GITHUB)
self.client.force_authenticate(self.author.user)
self.author_id = self.author.id
self.post = util.create_post(self.author, POST_CONTENT)
self.post_id = self.post.id
def test_no_comments(self):
"""
Tests that a db with a single author and post has no comments
"""
response = self.client.get(
f'/api/v1/author/{self.author_id}/posts/{self.post_id}/comments/'
)
self.assertEqual(response.status_code, 200)
d = util.response_to_json(response)
self.assertEqual(d['type'], 'comments')
self.assertListEqual(d['comments'], [])
# TODO: More comment tests including pagination tests
def test_comments_pagination(self):
"""
Test that the optional page and size query parameters work
"""
NUM_COMMENTS = 20
PAGE, SIZE = 4, 3
post_response = self.client.get(
f"/api/v1/author/{self.author.id}/posts/{self.post.id}/"
)
author = util.response_to_json(post_response)["author"]
for i in range(NUM_COMMENTS):
util.create_comment(author, self.post, f"Comment_{i}")
response = self.client.get(
f'/api/v1/author/{self.author_id}/posts/{self.post_id}/comments/?page={PAGE}&size={SIZE}'
)
d = util.response_to_json(response)
self.assertEquals(d['type'], 'comments')
comments = d['comments']
self.assertEquals(len(comments), SIZE)
for i in range(SIZE):
self.assertEquals(
comments[i]["comment"], f"Comment_{(NUM_COMMENTS - (PAGE - 1) * SIZE - i) - 1}")
def test_comment_push(self):
data = {
'author': self.author_id,
'post': self.post_id,
'comment': 'very post much wow',
}
response = self.client.post(
f'/api/v1/author/{self.author_id}/posts/{self.post_id}/comments/',
data,
format='json'
)
self.assertEquals(response.status_code, 405)
| 374 | 1,968 | 23 |
ee4500b57720947963eb5889eda0adf1bc435433 | 10,365 | py | Python | mysite/scisheets/core/helpers/cell_types.py | ScienceStacks/JViz | c8de23d90d49d4c9bc10da25f4a87d6f44aab138 | [
"Artistic-2.0",
"Apache-2.0"
] | 31 | 2016-11-16T22:34:35.000Z | 2022-03-22T22:16:11.000Z | mysite/scisheets/core/helpers/cell_types.py | ScienceStacks/JViz | c8de23d90d49d4c9bc10da25f4a87d6f44aab138 | [
"Artistic-2.0",
"Apache-2.0"
] | 6 | 2017-06-24T06:29:36.000Z | 2022-01-23T06:30:01.000Z | mysite/scisheets/core/helpers/cell_types.py | ScienceStacks/JViz | c8de23d90d49d4c9bc10da25f4a87d6f44aab138 | [
"Artistic-2.0",
"Apache-2.0"
] | 4 | 2017-07-27T16:23:50.000Z | 2022-03-12T06:36:13.000Z | '''Utilities used in core scitable code.'''
from extended_array import ExtendedArray
from CommonUtil.prune_nulls import pruneNulls
import collections
import math
import numpy as np
import warnings
THRESHOLD = 0.000001 # Threshold for value comparisons
################### Classes ############################
# Used to define a DataClass
# cls is the data type that can be tested in isinstance
# cons is a function that constructs an instance of cls
# taking as an argument a list
# Usage: data_class = DataClass(cls=ExtendedArray,
# cons=(lambda(x: ExtendedArray(x))))
# Note: Classes must have a public property name that is the
# name of the column
DataClass = collections.namedtuple('DataClass', 'cls cons')
########### CONSTANTS ################
DATACLASS_ARRAY = DataClass(cls=ExtendedArray,
cons=makeArray)
################ Internal Classes ################
class XType(object):
"""
Code common to all extended types
"""
@classmethod
def needsCoercion(cls, types):
"""
Checks if the iterables must be coerced
:param list values: values to check
:return bool: Should invoke coercion if True
"""
return all([cls.isCoercible(t) for t in types])
class XInt(XType):
"""
Extends int type by allowing strings of int.
Note that in python 1 is equivalent to True
and 0 is equivalent to False.
"""
@classmethod
def isBaseType(cls, val):
"""
Checks if the value is an instance of the
base type extended by this class.
:param val: value to check if it's a base type
:return: True if base type; otherwise False
"""
return isinstance(val, int)
@classmethod
def isXType(cls, val):
"""
Checks if the value is an instance of the extended type
defined by this class.
:param val: value to check if it's an extended type
:return bool: True if extended type; otherwise False.
"""
if cls.isBaseType(val):
return True
if isStr(val):
return isinstance(int(val), int)
if isinstance(val, float):
return int(val) == val
return False
@classmethod
def isCoercible(cls, a_type):
"""
Checks if the type can be coerced to the base type for this class.
:param a_type: type considered
:return: True if coercible; otherwise, False.
"""
return a_type in [XInt, int, XBool, bool]
@classmethod
def coerce(cls, val):
"""
Converts an a coercible value to the base type
:param val: value to convert
:return: coerced value
"""
try:
return int(val)
except ValueError:
raise ValueError("%s is not a %s" % (str(val), str(cls)))
class XFloat(XType):
"""
Extends float type by allowing strings of float
and None. None is converted to np.nan
"""
@classmethod
def isBaseType(cls, val):
"""
Checks if the value is an instance of the
base type extended by this class.
:param val: value to check if it's a base type
:return: True if base type; otherwise False
"""
return isinstance(val, float)
@classmethod
def isXType(cls, val):
"""
Checks if the value is an instance of the extended type
defined by this class.
:param val: value to check if it's an extended type
:return: True if extended type; otherwise False.
"""
if cls.isBaseType (val):
return True
if isStr(val):
return isinstance(float(val), float)
@classmethod
def isCoercible(cls, a_type):
"""
Checks if the value can be coerced to the base type for this class.
:param a_type: determines if the type can be coerced
:return: True if coercible; otherwise, False.
"""
return a_type in [XFloat, float, int, XInt, None, bool, XBool]
@classmethod
def coerce(cls, val):
"""
Converts an a coercible value to the base type
:param val: value to convert
:return: coerced value
:raises ValueError:
"""
try:
if val is None:
return np.nan
return float(val)
except ValueError:
raise ValueError("%s is not a %s" % (str(val), str(cls)))
class XBool(XType):
"""
Extends Boolean type by allowing the strings
'True' and 'False'
"""
@classmethod
def isBaseType(cls, val):
"""
Checks if the value is an instance of the base type.
:param val: value to check
:return: True if base type; otherwise False
Note: python can treat 1.0 as True
"""
return (not isinstance(val, float)) and val in [True, False]
@classmethod
def isXType(cls, val):
"""
Checks if the value is an instance of the extended type
defined by this class.
:param val: value to check if it's an extended type
:return: True if extended type; otherwise False.
"""
if isinstance(val, collections.Iterable) and not isStr(val):
return False
is_base = cls.isBaseType (val)
is_bool = val in ['True', 'False']
return is_base or is_bool
@classmethod
def isCoercible(cls, a_type):
"""
Checks if the value can be coerced to the base type for this class.
:param a_type: determines if the type can be coerced
:return: True if coercible; otherwise, False.
"""
return a_type in [bool, XBool]
@classmethod
def coerce(cls, val):
"""
Converts an XBool to a bool
:param val: XBool value to convert
:return: bool
"""
if val in [True, 'True']:
return True
if val in [False, 'False']:
return False
else:
raise ValueError("Input is not %s." % str(cls))
################ Functions ################
def isEquivalentFloats(val1, val2):
"""
Determines if two floats are close enough to be equal.
:param float val1, val2:
:return bool:
"""
try:
if np.isnan(val1) and np.isnan(val2):
result = True
elif np.isnan(val1) or np.isnan(val2):
result = False
else:
denom = max(abs(val1), abs(val2))
if denom == 0:
result = True
else:
diff = 1.0*abs(val1 - val2)/denom
result = diff < THRESHOLD
except ValueError:
result = False
return result
def isFloat(value):
"""
:param object value:
:return: True if float or np.nan; otherwise, fasle.
"""
expected_type = getType(value)
return expected_type == XFloat
def isFloats(values):
"""
:param values: single or multiple values
:return: True if float or np.nan; otherwise, fasle.
"""
values = makeIterable(values)
computed_type = getIterableType(values)
expected_type = XFloat # Must do assignment to get correct format
return computed_type == expected_type
def getType(val):
"""
Finds the most restrictive type for the value.
:param val: value to interrogate
:return: type of int, XInt, float, XFloat, bool, XBool, str, object, None
"""
TT = collections.namedtuple('TypeTest', 'typ chk')
types_and_checkers = [
TT(XBool, (lambda x: XBool.isXType(x))),
TT(XInt, (lambda x: XInt.isXType(x))),
TT(XFloat, (lambda x: XFloat.isXType(x))),
TT(None, (lambda x: x is None)),
TT(str, (lambda x: isinstance(x, str))),
TT(unicode, (lambda x: isinstance(x, unicode))), # last test
]
for t_c in types_and_checkers:
try:
if t_c.chk(val):
return t_c.typ
except ValueError:
pass
return object
def getIterableType(values):
"""
Finds the most restrictive type for the set of values
:param values: iterable
:return: type of int, XInt, float, XFloat, bool, XBool, str, object, None
"""
types_of_values = [getType(x) for x in values]
selected_types = [object, unicode, str, XFloat, XInt, XBool, None]
for typ in selected_types:
this_type = typ
if this_type in types_of_values:
return typ
def coerceData(data):
"""
Coreces data in a list to the most restrictive type so that
the resulting list is treated correctly when constructing
a numpy array.
:param data: iterable
:return type, list: coerced data if coercion was required
"""
data = makeIterable(data)
types = [getType(d) for d in data]
# Check for conversion in order from the most restrictive
# type to the most permissive type
for x_type in [XBool, XInt, XFloat]:
if x_type.needsCoercion(types):
return [x_type.coerce(d) for d in data]
return list(data)
def isIterable(val):
"""
Verfies that the value truly is iterable
:return bool: True if iterable
"""
if isStr(val):
return False
return isinstance(val, collections.Iterable)
def isStr(val):
"""
:param object val:
:return bool:
"""
return isinstance(val, str) or isinstance(val, unicode)
def isStrs(vals):
"""
:param iterable vals:
:return bool:
"""
a_list = makeIterable(vals)
return all([isStr(x) for x in a_list])
#return str(array.dtype)[0:2] == '|S'
def makeIterable(val):
"""
Converts val to a list
:param object val:
:return collections.Iterable:
"""
if isinstance(val, collections.Iterable):
#return val
return [x for x in val]
else:
return [val]
def isEquivalentData(val1, val2):
"""
Determines if two objects are equivalent. Recursively
inspects iterables.
:param object val1, val2:
:return bool:
"""
warnings.filterwarnings('error')
try:
if isStr(val1) and isStr(val2):
return val1 == val2 # Catch where this becomes a warning
except Warning:
import pdb; pdb.set_trace()
pass
if isIterable(val1):
try:
pruned_val1 = pruneNulls(val1)
pruned_val2 = pruneNulls(val2)
if len(pruned_val1) == len(pruned_val2):
length = len(pruned_val1)
for idx in range(length):
if not isEquivalentData(pruned_val1[idx], pruned_val2[idx]):
return False
return True
else:
return False
except TypeError as err:
return False
elif isinstance(val2, collections.Iterable) and not isStr(val2):
return False
else:
if isFloat(val1) and isEquivalentFloats(val1, val2):
return True
try:
if val1 == val2:
return True
except:
pass
values = coerceData([val1, val2])
coerced_val1 = values[0]
coerced_val2 = values[1]
if isFloat(coerced_val1):
return isEquivalentFloats(coerced_val1, coerced_val2)
else:
return coerced_val1 == coerced_val2
| 26.922078 | 75 | 0.64602 | '''Utilities used in core scitable code.'''
from extended_array import ExtendedArray
from CommonUtil.prune_nulls import pruneNulls
import collections
import math
import numpy as np
import warnings
THRESHOLD = 0.000001 # Threshold for value comparisons
################### Classes ############################
# Used to define a DataClass
# cls is the data type that can be tested in isinstance
# cons is a function that constructs an instance of cls
# taking as an argument a list
# Usage: data_class = DataClass(cls=ExtendedArray,
# cons=(lambda(x: ExtendedArray(x))))
# Note: Classes must have a public property name that is the
# name of the column
DataClass = collections.namedtuple('DataClass', 'cls cons')
########### CONSTANTS ################
def makeArray(aList):
return ExtendedArray(values=aList)
DATACLASS_ARRAY = DataClass(cls=ExtendedArray,
cons=makeArray)
################ Internal Classes ################
class XType(object):
"""
Code common to all extended types
"""
@classmethod
def needsCoercion(cls, types):
"""
Checks if the iterables must be coerced
:param list values: values to check
:return bool: Should invoke coercion if True
"""
return all([cls.isCoercible(t) for t in types])
class XInt(XType):
"""
Extends int type by allowing strings of int.
Note that in python 1 is equivalent to True
and 0 is equivalent to False.
"""
@classmethod
def isBaseType(cls, val):
"""
Checks if the value is an instance of the
base type extended by this class.
:param val: value to check if it's a base type
:return: True if base type; otherwise False
"""
return isinstance(val, int)
@classmethod
def isXType(cls, val):
"""
Checks if the value is an instance of the extended type
defined by this class.
:param val: value to check if it's an extended type
:return bool: True if extended type; otherwise False.
"""
if cls.isBaseType(val):
return True
if isStr(val):
return isinstance(int(val), int)
if isinstance(val, float):
return int(val) == val
return False
@classmethod
def isCoercible(cls, a_type):
"""
Checks if the type can be coerced to the base type for this class.
:param a_type: type considered
:return: True if coercible; otherwise, False.
"""
return a_type in [XInt, int, XBool, bool]
@classmethod
def coerce(cls, val):
"""
Converts an a coercible value to the base type
:param val: value to convert
:return: coerced value
"""
try:
return int(val)
except ValueError:
raise ValueError("%s is not a %s" % (str(val), str(cls)))
class XFloat(XType):
"""
Extends float type by allowing strings of float
and None. None is converted to np.nan
"""
@classmethod
def isBaseType(cls, val):
"""
Checks if the value is an instance of the
base type extended by this class.
:param val: value to check if it's a base type
:return: True if base type; otherwise False
"""
return isinstance(val, float)
@classmethod
def isXType(cls, val):
"""
Checks if the value is an instance of the extended type
defined by this class.
:param val: value to check if it's an extended type
:return: True if extended type; otherwise False.
"""
if cls.isBaseType (val):
return True
if isStr(val):
return isinstance(float(val), float)
@classmethod
def isCoercible(cls, a_type):
"""
Checks if the value can be coerced to the base type for this class.
:param a_type: determines if the type can be coerced
:return: True if coercible; otherwise, False.
"""
return a_type in [XFloat, float, int, XInt, None, bool, XBool]
@classmethod
def coerce(cls, val):
"""
Converts an a coercible value to the base type
:param val: value to convert
:return: coerced value
:raises ValueError:
"""
try:
if val is None:
return np.nan
return float(val)
except ValueError:
raise ValueError("%s is not a %s" % (str(val), str(cls)))
class XBool(XType):
"""
Extends Boolean type by allowing the strings
'True' and 'False'
"""
@classmethod
def isBaseType(cls, val):
"""
Checks if the value is an instance of the base type.
:param val: value to check
:return: True if base type; otherwise False
Note: python can treat 1.0 as True
"""
return (not isinstance(val, float)) and val in [True, False]
@classmethod
def isXType(cls, val):
"""
Checks if the value is an instance of the extended type
defined by this class.
:param val: value to check if it's an extended type
:return: True if extended type; otherwise False.
"""
if isinstance(val, collections.Iterable) and not isStr(val):
return False
is_base = cls.isBaseType (val)
is_bool = val in ['True', 'False']
return is_base or is_bool
@classmethod
def isCoercible(cls, a_type):
"""
Checks if the value can be coerced to the base type for this class.
:param a_type: determines if the type can be coerced
:return: True if coercible; otherwise, False.
"""
return a_type in [bool, XBool]
@classmethod
def coerce(cls, val):
"""
Converts an XBool to a bool
:param val: XBool value to convert
:return: bool
"""
if val in [True, 'True']:
return True
if val in [False, 'False']:
return False
else:
raise ValueError("Input is not %s." % str(cls))
################ Functions ################
def isEquivalentFloats(val1, val2):
"""
Determines if two floats are close enough to be equal.
:param float val1, val2:
:return bool:
"""
try:
if np.isnan(val1) and np.isnan(val2):
result = True
elif np.isnan(val1) or np.isnan(val2):
result = False
else:
denom = max(abs(val1), abs(val2))
if denom == 0:
result = True
else:
diff = 1.0*abs(val1 - val2)/denom
result = diff < THRESHOLD
except ValueError:
result = False
return result
def isFloat(value):
"""
:param object value:
:return: True if float or np.nan; otherwise, fasle.
"""
expected_type = getType(value)
return expected_type == XFloat
def isFloats(values):
"""
:param values: single or multiple values
:return: True if float or np.nan; otherwise, fasle.
"""
values = makeIterable(values)
computed_type = getIterableType(values)
expected_type = XFloat # Must do assignment to get correct format
return computed_type == expected_type
def getType(val):
"""
Finds the most restrictive type for the value.
:param val: value to interrogate
:return: type of int, XInt, float, XFloat, bool, XBool, str, object, None
"""
TT = collections.namedtuple('TypeTest', 'typ chk')
types_and_checkers = [
TT(XBool, (lambda x: XBool.isXType(x))),
TT(XInt, (lambda x: XInt.isXType(x))),
TT(XFloat, (lambda x: XFloat.isXType(x))),
TT(None, (lambda x: x is None)),
TT(str, (lambda x: isinstance(x, str))),
TT(unicode, (lambda x: isinstance(x, unicode))), # last test
]
for t_c in types_and_checkers:
try:
if t_c.chk(val):
return t_c.typ
except ValueError:
pass
return object
def getIterableType(values):
"""
Finds the most restrictive type for the set of values
:param values: iterable
:return: type of int, XInt, float, XFloat, bool, XBool, str, object, None
"""
types_of_values = [getType(x) for x in values]
selected_types = [object, unicode, str, XFloat, XInt, XBool, None]
for typ in selected_types:
this_type = typ
if this_type in types_of_values:
return typ
def coerceData(data):
"""
Coreces data in a list to the most restrictive type so that
the resulting list is treated correctly when constructing
a numpy array.
:param data: iterable
:return type, list: coerced data if coercion was required
"""
data = makeIterable(data)
types = [getType(d) for d in data]
# Check for conversion in order from the most restrictive
# type to the most permissive type
for x_type in [XBool, XInt, XFloat]:
if x_type.needsCoercion(types):
return [x_type.coerce(d) for d in data]
return list(data)
def isIterable(val):
"""
Verfies that the value truly is iterable
:return bool: True if iterable
"""
if isStr(val):
return False
return isinstance(val, collections.Iterable)
def isStr(val):
"""
:param object val:
:return bool:
"""
return isinstance(val, str) or isinstance(val, unicode)
def isStrs(vals):
"""
:param iterable vals:
:return bool:
"""
a_list = makeIterable(vals)
return all([isStr(x) for x in a_list])
#return str(array.dtype)[0:2] == '|S'
def makeIterable(val):
"""
Converts val to a list
:param object val:
:return collections.Iterable:
"""
if isinstance(val, collections.Iterable):
#return val
return [x for x in val]
else:
return [val]
def isEquivalentData(val1, val2):
"""
Determines if two objects are equivalent. Recursively
inspects iterables.
:param object val1, val2:
:return bool:
"""
warnings.filterwarnings('error')
try:
if isStr(val1) and isStr(val2):
return val1 == val2 # Catch where this becomes a warning
except Warning:
import pdb; pdb.set_trace()
pass
if isIterable(val1):
try:
pruned_val1 = pruneNulls(val1)
pruned_val2 = pruneNulls(val2)
if len(pruned_val1) == len(pruned_val2):
length = len(pruned_val1)
for idx in range(length):
if not isEquivalentData(pruned_val1[idx], pruned_val2[idx]):
return False
return True
else:
return False
except TypeError as err:
return False
elif isinstance(val2, collections.Iterable) and not isStr(val2):
return False
else:
if isFloat(val1) and isEquivalentFloats(val1, val2):
return True
try:
if val1 == val2:
return True
except:
pass
values = coerceData([val1, val2])
coerced_val1 = values[0]
coerced_val2 = values[1]
if isFloat(coerced_val1):
return isEquivalentFloats(coerced_val1, coerced_val2)
else:
return coerced_val1 == coerced_val2
| 37 | 0 | 22 |
0865c3e22a02807f9e34c3b751ce4c4140441666 | 124 | py | Python | insanic/choices.py | crazytruth/insanic | f9b61611317d873fe7688a5fd13eecb9a496ead5 | [
"MIT"
] | 4 | 2020-10-13T04:34:21.000Z | 2022-02-18T05:34:03.000Z | insanic/choices.py | crazytruth/insanic | f9b61611317d873fe7688a5fd13eecb9a496ead5 | [
"MIT"
] | 1 | 2020-09-29T06:59:36.000Z | 2020-09-29T06:59:36.000Z | insanic/choices.py | crazytruth/insanic | f9b61611317d873fe7688a5fd13eecb9a496ead5 | [
"MIT"
] | null | null | null | from enum import IntEnum
| 13.777778 | 26 | 0.653226 | from enum import IntEnum
class UserLevels(IntEnum):
BANNED = 0
DEACTIVATED = 10
ACTIVE = 100
STAFF = 1000
| 0 | 75 | 23 |
07cb4152931f9f69118c1d75abf236e9d13d4533 | 1,762 | py | Python | rbc/externals/__init__.py | brenocfg/rbc | 7274504ff6c72ff50467eaaab83e9611f446ea40 | [
"BSD-3-Clause"
] | 21 | 2019-05-21T14:44:01.000Z | 2021-12-09T21:48:36.000Z | rbc/externals/__init__.py | brenocfg/rbc | 7274504ff6c72ff50467eaaab83e9611f446ea40 | [
"BSD-3-Clause"
] | 349 | 2019-07-31T17:48:21.000Z | 2022-03-31T06:57:52.000Z | rbc/externals/__init__.py | brenocfg/rbc | 7274504ff6c72ff50467eaaab83e9611f446ea40 | [
"BSD-3-Clause"
] | 10 | 2020-01-23T20:14:17.000Z | 2022-02-08T20:43:08.000Z | import types as py_types
from rbc.targetinfo import TargetInfo
from rbc.typesystem import Type
from numba.core import funcdesc, typing
| 26.69697 | 90 | 0.662883 | import types as py_types
from rbc.targetinfo import TargetInfo
from rbc.typesystem import Type
from numba.core import funcdesc, typing
def gen_codegen(fn_name):
def codegen(context, builder, sig, args):
# Need to retrieve the function name again
fndesc = funcdesc.ExternalFunctionDescriptor(fn_name, sig.return_type, sig.args)
func = context.declare_external_function(builder.module, fndesc)
return builder.call(func, args)
return codegen
def dispatch_codegen(cpu, gpu):
def inner(context, builder, sig, args):
impl = cpu if TargetInfo().is_cpu else gpu
return impl(context, builder, sig, args)
return inner
def sanitize(name):
forbidden_names = ('in')
if name in forbidden_names:
return f"{name}_"
return name
def register_external(
fname,
retty,
argtys,
module_name,
module_globals,
typing_registry,
lowering_registry,
doc,
):
# expose
fn = eval(f'lambda {",".join(map(lambda x: sanitize(x.name), argtys))}: None', {}, {})
_key = py_types.FunctionType(fn.__code__, {}, fname)
_key.__module__ = __name__
globals()[fname] = _key
# typing
@typing_registry.register_global(_key)
class ExternalTemplate(typing.templates.AbstractTemplate):
key = _key
def generic(self, args, kws):
retty_ = Type.fromobject(retty).tonumba()
argtys_ = tuple(map(lambda x: Type.fromobject(x.ty).tonumba(), argtys))
codegen = gen_codegen(fname)
lowering_registry.lower(_key, *argtys_)(codegen)
return retty_(*argtys_)
module_globals[fname] = _key
_key.__module__ = module_name
_key.__doc__ = doc
del globals()[fname]
return _key
| 1,531 | 0 | 92 |
2b31a2d424e0ce4d6bb1fb83ca523a0c4bfeb68a | 312 | py | Python | backend/apps/mails/utils.py | KuanWeiLee/froggy-service | 0db6cd90c1641a98c1e06638f8e9591c2daf39e0 | [
"MIT"
] | 174 | 2019-02-19T11:35:45.000Z | 2021-12-20T03:20:28.000Z | backend/apps/mails/utils.py | KuanWeiLee/froggy-service | 0db6cd90c1641a98c1e06638f8e9591c2daf39e0 | [
"MIT"
] | 56 | 2019-01-02T06:49:13.000Z | 2021-03-23T09:31:18.000Z | backend/apps/mails/utils.py | KuanWeiLee/froggy-service | 0db6cd90c1641a98c1e06638f8e9591c2daf39e0 | [
"MIT"
] | 36 | 2018-12-28T02:10:06.000Z | 2021-09-02T03:06:35.000Z | from django.conf import settings
from django.core.mail import send_mail
| 24 | 79 | 0.762821 | from django.conf import settings
from django.core.mail import send_mail
def sendgrid_system_mail(message):
from_email = settings.SERVER_EMAIL
subject = "選服系統系統通知"
admins = [email for name, email in settings.ADMINS]
return send_mail(subject, message, from_email, admins, fail_silently=False)
| 232 | 0 | 23 |
0016809fbb9a5b580b379426d473750d858fb887 | 888 | py | Python | templates/scale.py | diramazioni/blend-sonic | 71f8782aea9e743383324f079dd1b49b31528f44 | [
"RSA-MD"
] | 2 | 2022-01-06T14:47:22.000Z | 2022-02-02T01:55:49.000Z | templates/scale.py | diramazioni/blend-sonic | 71f8782aea9e743383324f079dd1b49b31528f44 | [
"RSA-MD"
] | null | null | null | templates/scale.py | diramazioni/blend-sonic | 71f8782aea9e743383324f079dd1b49b31528f44 | [
"RSA-MD"
] | null | null | null | {% extends "note.py" %}
{% block imports %}
{{ super() }}
from {{ menu_levels[2:] }} constant_def import scale_names
scale_items = [(k,k,'') for k in scale_names ]
{% endblock %}
{%- block classMembers %}
{{ super() }}
scaleList : EnumProperty(name="Scale", items = scale_items, default = ':major', update=propertyChanged)
{%- endblock %}
{%- block draw %}
{{ super() }}
layout.prop(self, "scaleList")
{%- endblock %}
{%- block extra_input %}{% endblock %}
{%- block post_create %}
{{ macro.hideInput(count_args, post_args ) }}
{{ super() }}
{%- endblock %}
{%- block checkEnum %}
if not s["tonic"].isUsed:
yield "args_.append(str(self.noteList))"
if not s["scale"].isUsed:
yield "args_.append(str(self.scaleList ))"
{%- endblock %}
{%- block execode %}
{{ macro.arg_join() }}
{{ macro.inline_send() }}
{%- endblock -%} | 23.368421 | 108 | 0.586712 | {% extends "note.py" %}
{% block imports %}
{{ super() }}
from {{ menu_levels[2:] }} constant_def import scale_names
scale_items = [(k,k,'') for k in scale_names ]
{% endblock %}
{%- block classMembers %}
{{ super() }}
scaleList : EnumProperty(name="Scale", items = scale_items, default = ':major', update=propertyChanged)
{%- endblock %}
{%- block draw %}
{{ super() }}
layout.prop(self, "scaleList")
{%- endblock %}
{%- block extra_input %}{% endblock %}
{%- block post_create %}
{{ macro.hideInput(count_args, post_args ) }}
{{ super() }}
{%- endblock %}
{%- block checkEnum %}
if not s["tonic"].isUsed:
yield "args_.append(str(self.noteList))"
if not s["scale"].isUsed:
yield "args_.append(str(self.scaleList ))"
{%- endblock %}
{%- block execode %}
{{ macro.arg_join() }}
{{ macro.inline_send() }}
{%- endblock -%} | 0 | 0 | 0 |
11aa3369de078a964d9fe5cc1767b393eecbc2c0 | 1,011 | py | Python | api/migrations/0001_initial.py | City-of-Turku/munpalvelut_backend | 9baa530f2f3405322f74ccc145641148f253341b | [
"MIT"
] | null | null | null | api/migrations/0001_initial.py | City-of-Turku/munpalvelut_backend | 9baa530f2f3405322f74ccc145641148f253341b | [
"MIT"
] | null | null | null | api/migrations/0001_initial.py | City-of-Turku/munpalvelut_backend | 9baa530f2f3405322f74ccc145641148f253341b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-03 13:22
from __future__ import unicode_literals
from django.db import migrations, models
| 30.636364 | 114 | 0.579624 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-03 13:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ApiKey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('key', models.CharField(max_length=128, unique=True)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='AuthToken',
fields=[
('key', models.CharField(max_length=40, primary_key=True, serialize=False, verbose_name='Key')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
],
),
]
| 0 | 833 | 23 |
6a5db8c16fcab6275810b911e49f796fc3a324e1 | 2,844 | py | Python | erp_app/models.py | bryanrios/Django-ERP-Apache-License | 51ae1ee15e8816e0f726850f5d143330ef1d4b42 | [
"Apache-2.0"
] | null | null | null | erp_app/models.py | bryanrios/Django-ERP-Apache-License | 51ae1ee15e8816e0f726850f5d143330ef1d4b42 | [
"Apache-2.0"
] | null | null | null | erp_app/models.py | bryanrios/Django-ERP-Apache-License | 51ae1ee15e8816e0f726850f5d143330ef1d4b42 | [
"Apache-2.0"
] | null | null | null | from django.db import models
# Create your models here.
| 38.958904 | 77 | 0.737342 | from django.db import models
# Create your models here.
class Customers(models.Model):
title = models.CharField(max_length=200)
first_name = models.CharField(max_length=200)
middle_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
suffix = models.CharField(max_length=200)
email = models.CharField(max_length=200)
company = models.CharField(max_length=200)
display_name = models.CharField(max_length=200)
print_on_check_as = models.CharField(max_length=200)
billing_street = models.CharField(max_length=200)
billing_city = models.CharField(max_length=200)
billing_state = models.CharField(max_length=2)
billing_zip = models.CharField(max_length=10)
billing_country = models.CharField(max_length=200)
shipping_street = models.CharField(max_length=200)
shipping_city = models.CharField(max_length=200)
shipping_state = models.CharField(max_length=2)
shipping_zip = models.CharField(max_length=10)
shipping_country = models.CharField(max_length=200)
other_details = models.CharField(max_length=500)
def __unicode__(self):
return self.first_name + " " + self.last_name
class Products(models.Model):
name = models.CharField(max_length=500)
description = models.CharField(max_length=500)
price = models.DecimalField(max_digits=20, decimal_places=2)
def __unicode__(self):
return self.name
class Orders(models.Model):
customer = models.ForeignKey(Customers)
invoice_number = models.IntegerField()
invoice_creation_date = models.DateField('Invoice Created Date')
delivery_due_date = models.DateField('Delivery Due Date')
payment_due_date = models.DateField('Payment Due Date')
custom_message = models.TextField()
# purchases = models.ManyToManyField(Products, through='Orders_Products')
class Orders_Products(models.Model):
order = models.ForeignKey(Orders)
product = models.ForeignKey(Products)
quantity = models.IntegerField(default=0)
def cost(self):
costs = self.quantity * self.product_id.price
return costs
class General_Settings(models.Model):
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
company = models.CharField(max_length=200)
street = models.CharField(max_length=200)
city = models.CharField(max_length=200)
state = models.CharField(max_length=2)
zip_code = models.CharField(max_length=10)
class Expenses(models.Model):
expense_name = models.CharField(max_length=200)
description = models.CharField(max_length=500)
date_paid = models.DateField('Expenses Paid Date')
amount_paid = models.DecimalField(max_digits=20, decimal_places=2)
def __unicode__(self):
return self.expense_name
| 190 | 2,446 | 151 |
98260fe09209a487e2122cf77b7cd06ba72652f7 | 13,712 | py | Python | astviewer/tree.py | titusjan/astviewer | 3e3954c3deeba94cf12c6749bd29e0518bfcd974 | [
"MIT"
] | 105 | 2015-02-22T09:13:09.000Z | 2021-12-20T05:09:48.000Z | astviewer/tree.py | titusjan/astviewer | 3e3954c3deeba94cf12c6749bd29e0518bfcd974 | [
"MIT"
] | 2 | 2016-10-30T12:31:39.000Z | 2017-09-05T17:09:16.000Z | astviewer/tree.py | titusjan/astviewer | 3e3954c3deeba94cf12c6749bd29e0518bfcd974 | [
"MIT"
] | 13 | 2015-03-28T13:07:34.000Z | 2021-08-03T01:30:44.000Z | """ Contains the tree widgdet
"""
from __future__ import print_function
import ast, logging
import os.path
from astviewer.iconfactory import IconFactory
from astviewer.misc import class_name, check_class
from astviewer.qtpy import QtCore, QtGui, QtWidgets
from astviewer.toggle_column_mixin import ToggleColumnTreeWidget
from astviewer.version import DEBUGGING
logger = logging.getLogger(__name__)
IDX_LINE, IDX_COL = 0, 1
ROLE_POS = QtCore.Qt.UserRole
ROLE_START_POS = QtCore.Qt.UserRole
ROLE_END_POS = QtCore.Qt.UserRole + 1
# The widget inherits from a Qt class, therefore it has many
# ancestors public methods and attributes.
# pylint: disable=R0901, R0902, R0904, W0201, R0913
def cmpIdx(idx0, idx1):
""" Returns negative if idx0 < idx1, zero if idx0 == idx1 and strictly positive if idx0 > idx1.
If an idx0 or idx1 equals -1 or None, it is interpreted as the last element in a list
and thus larger than a positive integer
:param idx0: positive int, -1 or None
:param idx2: positive int, -1 or None
:return: int
"""
assert idx0 is None or idx0 == -1 or idx0 >=0, \
"Idx0 should be None, -1 or >= 0. Got: {!r}".format(idx0)
assert idx1 is None or idx1 == -1 or idx1 >=0, \
"Idx1 should be None, -1 or >= 0. Got: {!r}".format(idx1)
# Handle -1 the same way as None
if idx0 == -1:
idx0 = None
if idx1 == -1:
idx1 = None
if idx0 == idx1:
return 0
elif idx1 is None:
return -1
elif idx0 is None:
return 1
else:
return -1 if idx0 < idx1 else 1
def cmpPos(pos0, pos1):
""" Returns negative if pos0 < pos1, zero if pos0 == pos1 and strictly positive if pos0 > pos1.
If an index equals -1 or None, it is interpreted as the last element in a list and
therefore larger than a positive integer
:param pos0: positive int, -1 or None
:param pos2: positive int, -1 or None
:return: int
"""
cmpLineNr = cmpIdx(pos0[0], pos1[0])
if cmpLineNr != 0:
return cmpLineNr
else:
return cmpIdx(pos0[1], pos1[1])
class SyntaxTreeWidget(ToggleColumnTreeWidget):
""" Tree widget that holds the AST.
"""
HEADER_LABELS = ["Node", "Field", "Class", "Value", "Line : Col", "Highlight"]
(COL_NODE, COL_FIELD, COL_CLASS, COL_VALUE, COL_POS, COL_HIGHLIGHT) = range(len(HEADER_LABELS))
def __init__(self, parent=None):
""" Constructor
"""
super(SyntaxTreeWidget, self).__init__(parent=parent)
self.setAlternatingRowColors(True)
self.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.setUniformRowHeights(True)
self.setAnimated(False)
self.setHeaderLabels(SyntaxTreeWidget.HEADER_LABELS)
tree_header = self.header()
self.add_header_context_menu(checked={'Node': True}, checkable={'Node': True},
enabled={'Node': False})
# Don't stretch last column, it doesn't play nice when columns hidden and then shown again.
tree_header.setStretchLastSection(False)
self.icon_factory = IconFactory.singleton()
self.row_size_hint = QtCore.QSize()
self.row_size_hint.setHeight(20)
self.setIconSize(QtCore.QSize(20, 20))
def sizeHint(self):
""" The recommended size for the widget.
"""
size = QtCore.QSize()
size.setWidth(600)
size.setHeight(700)
return size
@QtCore.Slot()
def expand_reset(self, tree_item=None):
""" Expands/collapses all nodes as they were at program start up.
"""
if tree_item is None:
tree_item = self.invisibleRootItem()
field = tree_item.text(SyntaxTreeWidget.COL_FIELD)
klass = tree_item.text(SyntaxTreeWidget.COL_CLASS)
tree_item.setExpanded(field == 'body' or
klass in ('Module', 'ClassDef'))
# Expand children recursively
for childIdx in range(tree_item.childCount()):
self.expand_reset(tree_item.child(childIdx))
@QtCore.Slot(int, int)
def select_node(self, line_nr, column_nr):
""" Selects the node given a line and column number.
"""
found_item = self.find_item(self.invisibleRootItem(), (line_nr, column_nr))
self.setCurrentItem(found_item) # Unselects if found_item is None
def get_item_span(self, tree_item):
""" Returns (start_pos, end_pos) tuple where start_pos and end_pos, in turn, are (line, col)
tuples
"""
start_pos = tree_item.data(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_START_POS)
end_pos = tree_item.data(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_END_POS)
return (start_pos, end_pos)
def find_item(self, tree_item, position):
""" Finds the deepest node item that highlights the position at line_nr column_nr, and
has a position defined itself.
:param tree_item: look within this QTreeWidgetItem and its child items
:param position: (line_nr, column_nr) tuple
"""
check_class(position, tuple)
item_pos = tree_item.data(SyntaxTreeWidget.COL_POS, ROLE_POS)
item_start_pos = tuple(tree_item.data(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_START_POS))
item_end_pos = tuple(tree_item.data(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_END_POS))
# See if one of the children matches
for childIdx in range(tree_item.childCount()):
child_item = tree_item.child(childIdx)
found_node = self.find_item(child_item, position)
if found_node is not None:
return found_node
# If start_pos < position < end_pos the current node matches.
if item_start_pos is not None and item_end_pos is not None:
if item_pos is not None and item_start_pos < position < item_end_pos:
return tree_item
# No matching node found in this subtree
return None
def populate(self, syntax_tree, last_pos, root_label=''):
""" Populates the tree widget.
:param syntax_tree: result of the ast.parse() function
:param file_name: used to set the label of the root_node
"""
self.clear()
def add_node(ast_node, parent_item, field_label):
""" Helper function that recursively adds nodes.
:param parent_item: The parent QTreeWidgetItem to which this node will be added
:param field_label: Labels how this node is known to the parent
:return: the QTreeWidgetItem that corresonds to the root item of the AST
"""
node_item = QtWidgets.QTreeWidgetItem(parent_item)
# Recursively descent the AST
if isinstance(ast_node, ast.AST):
value_str = ''
node_str = "{} = {}".format(field_label, class_name(ast_node))
node_item.setIcon(SyntaxTreeWidget.COL_NODE,
self.icon_factory.getIcon(IconFactory.AST_NODE))
if hasattr(ast_node, 'lineno'):
node_item.setData(SyntaxTreeWidget.COL_POS, ROLE_POS,
(ast_node.lineno, ast_node.col_offset))
for key, val in ast.iter_fields(ast_node):
add_node(val, node_item, key)
elif isinstance(ast_node, (list, tuple)):
value_str = ''
node_str = "{} = {}".format(field_label, class_name(ast_node))
node_item.setIcon(SyntaxTreeWidget.COL_NODE,
self.icon_factory.getIcon(IconFactory.LIST_NODE))
for idx, elem in enumerate(ast_node):
add_node(elem, node_item, "{}[{:d}]".format(field_label, idx))
else:
value_str = repr(ast_node)
node_str = "{} = {}".format(field_label, value_str)
node_item.setIcon(SyntaxTreeWidget.COL_NODE,
self.icon_factory.getIcon(IconFactory.PY_NODE))
node_item.setText(SyntaxTreeWidget.COL_NODE, node_str)
node_item.setText(SyntaxTreeWidget.COL_FIELD, field_label)
node_item.setText(SyntaxTreeWidget.COL_CLASS, class_name(ast_node))
node_item.setText(SyntaxTreeWidget.COL_VALUE, value_str)
node_item.setToolTip(SyntaxTreeWidget.COL_NODE, node_str)
node_item.setToolTip(SyntaxTreeWidget.COL_FIELD, field_label)
node_item.setToolTip(SyntaxTreeWidget.COL_CLASS, class_name(ast_node))
node_item.setToolTip(SyntaxTreeWidget.COL_VALUE, value_str)
# To force icon size in Python 2 (not needed)
#node_item.setSizeHint(SyntaxTreeWidget.COL_NODE, self.row_size_hint)
return node_item
# End of helper function
root_item = add_node(syntax_tree, self, root_label)
root_item.setToolTip(SyntaxTreeWidget.COL_NODE, os.path.realpath(root_label))
self._populate_highlighting_pass_1(self.invisibleRootItem(), last_pos)
self._populate_highlighting_pass_2(self.invisibleRootItem())
self._populate_text_from_data(self.invisibleRootItem())
return root_item
def _populate_highlighting_pass_1(self, tree_item, last_pos):
""" Fills the highlight span for items that have a position defined. (pass 1)
Walk depth-first and backwards through the nodes, so that we can keep track of the
end of the span (last_pos)
"""
max_last_pos = last_pos # The maximum last_pos at this level of recursion.
for childIdx in range(tree_item.childCount(), 0, -1):
child_item = tree_item.child(childIdx-1)
children_last_pos = self._populate_highlighting_pass_1(child_item, last_pos)
# Decorator nodes seem to be out-of order in the tree. They occur after the body but
# their line number is smaller. This messes up the highlight spans so we don't
# propagate their value
if tree_item.text(SyntaxTreeWidget.COL_FIELD) != u'decorator_list':
last_pos = children_last_pos
pos = tree_item.data(SyntaxTreeWidget.COL_POS, ROLE_POS)
if pos is not None:
last_pos = pos
assert last_pos is not None
assert max_last_pos is not None
cmp = cmpPos(last_pos, max_last_pos)
if cmp < 0:
tree_item.setData(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_START_POS, last_pos)
tree_item.setData(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_END_POS, max_last_pos)
elif cmp > 0:
# The node positions (line-nr, col) are not always in increasing order when traversing
# the tree. This may result in highlight spans where the start pos is larger than the
# end pos.
logger.info("Nodes out of order. Invalid highlighting {}:{} : {}:{} ({})"
.format(last_pos[0], last_pos[1], max_last_pos[0], max_last_pos[1],
tree_item.text(SyntaxTreeWidget.COL_NODE)))
tree_item.setData(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_START_POS, last_pos)
tree_item.setData(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_END_POS, max_last_pos)
if DEBUGGING:
tree_item.setForeground(SyntaxTreeWidget.COL_HIGHLIGHT,
QtGui.QBrush(QtGui.QColor('red')))
else:
pass # No new position found in the children. These nodes will be filled in later.
return last_pos
@QtCore.Slot()
def _populate_highlighting_pass_2(self, tree_item, parent_start_pos=None, parent_end_pos=None):
""" Fill in the nodes that don't have a highlighting from their parent
"""
start_pos = tree_item.data(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_START_POS)
end_pos = tree_item.data(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_END_POS)
# If the highlight span is still undefined use the value from the parent.
if start_pos is None and end_pos is None:
start_pos = parent_start_pos
end_pos = parent_end_pos
tree_item.setData(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_START_POS, start_pos)
tree_item.setData(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_END_POS, end_pos)
# Populate children recursively
for childIdx in range(tree_item.childCount()):
self._populate_highlighting_pass_2(tree_item.child(childIdx), start_pos, end_pos)
def _populate_text_from_data(self, tree_item):
""" Fills the pos and highlight columns given the underlying data.
"""
# Update the pos column
pos = tree_item.data(SyntaxTreeWidget.COL_POS, ROLE_POS)
if pos is None:
text = ""
else:
text = "{0[0]}:{0[1]}".format(pos)
tree_item.setText(SyntaxTreeWidget.COL_POS, text)
# Update the highlight column
start_pos = tree_item.data(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_START_POS)
end_pos = tree_item.data(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_END_POS)
text = ""
if start_pos is not None:
text += "{0[0]}:{0[1]}".format(start_pos)
if end_pos is not None:
text += " : {0[0]}:{0[1]}".format(end_pos)
tree_item.setText(SyntaxTreeWidget.COL_HIGHLIGHT, text)
# Recursively populate
for childIdx in range(tree_item.childCount()):
child_item = tree_item.child(childIdx)
self._populate_text_from_data(child_item)
| 39.744928 | 100 | 0.641044 | """ Contains the tree widgdet
"""
from __future__ import print_function
import ast, logging
import os.path
from astviewer.iconfactory import IconFactory
from astviewer.misc import class_name, check_class
from astviewer.qtpy import QtCore, QtGui, QtWidgets
from astviewer.toggle_column_mixin import ToggleColumnTreeWidget
from astviewer.version import DEBUGGING
logger = logging.getLogger(__name__)
IDX_LINE, IDX_COL = 0, 1
ROLE_POS = QtCore.Qt.UserRole
ROLE_START_POS = QtCore.Qt.UserRole
ROLE_END_POS = QtCore.Qt.UserRole + 1
# The widget inherits from a Qt class, therefore it has many
# ancestors public methods and attributes.
# pylint: disable=R0901, R0902, R0904, W0201, R0913
def cmpIdx(idx0, idx1):
""" Returns negative if idx0 < idx1, zero if idx0 == idx1 and strictly positive if idx0 > idx1.
If an idx0 or idx1 equals -1 or None, it is interpreted as the last element in a list
and thus larger than a positive integer
:param idx0: positive int, -1 or None
:param idx2: positive int, -1 or None
:return: int
"""
assert idx0 is None or idx0 == -1 or idx0 >=0, \
"Idx0 should be None, -1 or >= 0. Got: {!r}".format(idx0)
assert idx1 is None or idx1 == -1 or idx1 >=0, \
"Idx1 should be None, -1 or >= 0. Got: {!r}".format(idx1)
# Handle -1 the same way as None
if idx0 == -1:
idx0 = None
if idx1 == -1:
idx1 = None
if idx0 == idx1:
return 0
elif idx1 is None:
return -1
elif idx0 is None:
return 1
else:
return -1 if idx0 < idx1 else 1
def cmpPos(pos0, pos1):
""" Returns negative if pos0 < pos1, zero if pos0 == pos1 and strictly positive if pos0 > pos1.
If an index equals -1 or None, it is interpreted as the last element in a list and
therefore larger than a positive integer
:param pos0: positive int, -1 or None
:param pos2: positive int, -1 or None
:return: int
"""
cmpLineNr = cmpIdx(pos0[0], pos1[0])
if cmpLineNr != 0:
return cmpLineNr
else:
return cmpIdx(pos0[1], pos1[1])
class SyntaxTreeWidget(ToggleColumnTreeWidget):
""" Tree widget that holds the AST.
"""
HEADER_LABELS = ["Node", "Field", "Class", "Value", "Line : Col", "Highlight"]
(COL_NODE, COL_FIELD, COL_CLASS, COL_VALUE, COL_POS, COL_HIGHLIGHT) = range(len(HEADER_LABELS))
def __init__(self, parent=None):
""" Constructor
"""
super(SyntaxTreeWidget, self).__init__(parent=parent)
self.setAlternatingRowColors(True)
self.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.setUniformRowHeights(True)
self.setAnimated(False)
self.setHeaderLabels(SyntaxTreeWidget.HEADER_LABELS)
tree_header = self.header()
self.add_header_context_menu(checked={'Node': True}, checkable={'Node': True},
enabled={'Node': False})
# Don't stretch last column, it doesn't play nice when columns hidden and then shown again.
tree_header.setStretchLastSection(False)
self.icon_factory = IconFactory.singleton()
self.row_size_hint = QtCore.QSize()
self.row_size_hint.setHeight(20)
self.setIconSize(QtCore.QSize(20, 20))
def sizeHint(self):
""" The recommended size for the widget.
"""
size = QtCore.QSize()
size.setWidth(600)
size.setHeight(700)
return size
@QtCore.Slot()
def expand_reset(self, tree_item=None):
""" Expands/collapses all nodes as they were at program start up.
"""
if tree_item is None:
tree_item = self.invisibleRootItem()
field = tree_item.text(SyntaxTreeWidget.COL_FIELD)
klass = tree_item.text(SyntaxTreeWidget.COL_CLASS)
tree_item.setExpanded(field == 'body' or
klass in ('Module', 'ClassDef'))
# Expand children recursively
for childIdx in range(tree_item.childCount()):
self.expand_reset(tree_item.child(childIdx))
@QtCore.Slot(int, int)
def select_node(self, line_nr, column_nr):
""" Selects the node given a line and column number.
"""
found_item = self.find_item(self.invisibleRootItem(), (line_nr, column_nr))
self.setCurrentItem(found_item) # Unselects if found_item is None
def get_item_span(self, tree_item):
""" Returns (start_pos, end_pos) tuple where start_pos and end_pos, in turn, are (line, col)
tuples
"""
start_pos = tree_item.data(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_START_POS)
end_pos = tree_item.data(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_END_POS)
return (start_pos, end_pos)
def find_item(self, tree_item, position):
""" Finds the deepest node item that highlights the position at line_nr column_nr, and
has a position defined itself.
:param tree_item: look within this QTreeWidgetItem and its child items
:param position: (line_nr, column_nr) tuple
"""
check_class(position, tuple)
item_pos = tree_item.data(SyntaxTreeWidget.COL_POS, ROLE_POS)
item_start_pos = tuple(tree_item.data(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_START_POS))
item_end_pos = tuple(tree_item.data(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_END_POS))
# See if one of the children matches
for childIdx in range(tree_item.childCount()):
child_item = tree_item.child(childIdx)
found_node = self.find_item(child_item, position)
if found_node is not None:
return found_node
# If start_pos < position < end_pos the current node matches.
if item_start_pos is not None and item_end_pos is not None:
if item_pos is not None and item_start_pos < position < item_end_pos:
return tree_item
# No matching node found in this subtree
return None
def populate(self, syntax_tree, last_pos, root_label=''):
""" Populates the tree widget.
:param syntax_tree: result of the ast.parse() function
:param file_name: used to set the label of the root_node
"""
self.clear()
def add_node(ast_node, parent_item, field_label):
""" Helper function that recursively adds nodes.
:param parent_item: The parent QTreeWidgetItem to which this node will be added
:param field_label: Labels how this node is known to the parent
:return: the QTreeWidgetItem that corresonds to the root item of the AST
"""
node_item = QtWidgets.QTreeWidgetItem(parent_item)
# Recursively descent the AST
if isinstance(ast_node, ast.AST):
value_str = ''
node_str = "{} = {}".format(field_label, class_name(ast_node))
node_item.setIcon(SyntaxTreeWidget.COL_NODE,
self.icon_factory.getIcon(IconFactory.AST_NODE))
if hasattr(ast_node, 'lineno'):
node_item.setData(SyntaxTreeWidget.COL_POS, ROLE_POS,
(ast_node.lineno, ast_node.col_offset))
for key, val in ast.iter_fields(ast_node):
add_node(val, node_item, key)
elif isinstance(ast_node, (list, tuple)):
value_str = ''
node_str = "{} = {}".format(field_label, class_name(ast_node))
node_item.setIcon(SyntaxTreeWidget.COL_NODE,
self.icon_factory.getIcon(IconFactory.LIST_NODE))
for idx, elem in enumerate(ast_node):
add_node(elem, node_item, "{}[{:d}]".format(field_label, idx))
else:
value_str = repr(ast_node)
node_str = "{} = {}".format(field_label, value_str)
node_item.setIcon(SyntaxTreeWidget.COL_NODE,
self.icon_factory.getIcon(IconFactory.PY_NODE))
node_item.setText(SyntaxTreeWidget.COL_NODE, node_str)
node_item.setText(SyntaxTreeWidget.COL_FIELD, field_label)
node_item.setText(SyntaxTreeWidget.COL_CLASS, class_name(ast_node))
node_item.setText(SyntaxTreeWidget.COL_VALUE, value_str)
node_item.setToolTip(SyntaxTreeWidget.COL_NODE, node_str)
node_item.setToolTip(SyntaxTreeWidget.COL_FIELD, field_label)
node_item.setToolTip(SyntaxTreeWidget.COL_CLASS, class_name(ast_node))
node_item.setToolTip(SyntaxTreeWidget.COL_VALUE, value_str)
# To force icon size in Python 2 (not needed)
#node_item.setSizeHint(SyntaxTreeWidget.COL_NODE, self.row_size_hint)
return node_item
# End of helper function
root_item = add_node(syntax_tree, self, root_label)
root_item.setToolTip(SyntaxTreeWidget.COL_NODE, os.path.realpath(root_label))
self._populate_highlighting_pass_1(self.invisibleRootItem(), last_pos)
self._populate_highlighting_pass_2(self.invisibleRootItem())
self._populate_text_from_data(self.invisibleRootItem())
return root_item
def _populate_highlighting_pass_1(self, tree_item, last_pos):
""" Fills the highlight span for items that have a position defined. (pass 1)
Walk depth-first and backwards through the nodes, so that we can keep track of the
end of the span (last_pos)
"""
max_last_pos = last_pos # The maximum last_pos at this level of recursion.
for childIdx in range(tree_item.childCount(), 0, -1):
child_item = tree_item.child(childIdx-1)
children_last_pos = self._populate_highlighting_pass_1(child_item, last_pos)
# Decorator nodes seem to be out-of order in the tree. They occur after the body but
# their line number is smaller. This messes up the highlight spans so we don't
# propagate their value
if tree_item.text(SyntaxTreeWidget.COL_FIELD) != u'decorator_list':
last_pos = children_last_pos
pos = tree_item.data(SyntaxTreeWidget.COL_POS, ROLE_POS)
if pos is not None:
last_pos = pos
assert last_pos is not None
assert max_last_pos is not None
cmp = cmpPos(last_pos, max_last_pos)
if cmp < 0:
tree_item.setData(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_START_POS, last_pos)
tree_item.setData(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_END_POS, max_last_pos)
elif cmp > 0:
# The node positions (line-nr, col) are not always in increasing order when traversing
# the tree. This may result in highlight spans where the start pos is larger than the
# end pos.
logger.info("Nodes out of order. Invalid highlighting {}:{} : {}:{} ({})"
.format(last_pos[0], last_pos[1], max_last_pos[0], max_last_pos[1],
tree_item.text(SyntaxTreeWidget.COL_NODE)))
tree_item.setData(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_START_POS, last_pos)
tree_item.setData(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_END_POS, max_last_pos)
if DEBUGGING:
tree_item.setForeground(SyntaxTreeWidget.COL_HIGHLIGHT,
QtGui.QBrush(QtGui.QColor('red')))
else:
pass # No new position found in the children. These nodes will be filled in later.
return last_pos
@QtCore.Slot()
def _populate_highlighting_pass_2(self, tree_item, parent_start_pos=None, parent_end_pos=None):
""" Fill in the nodes that don't have a highlighting from their parent
"""
start_pos = tree_item.data(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_START_POS)
end_pos = tree_item.data(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_END_POS)
# If the highlight span is still undefined use the value from the parent.
if start_pos is None and end_pos is None:
start_pos = parent_start_pos
end_pos = parent_end_pos
tree_item.setData(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_START_POS, start_pos)
tree_item.setData(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_END_POS, end_pos)
# Populate children recursively
for childIdx in range(tree_item.childCount()):
self._populate_highlighting_pass_2(tree_item.child(childIdx), start_pos, end_pos)
def _populate_text_from_data(self, tree_item):
""" Fills the pos and highlight columns given the underlying data.
"""
# Update the pos column
pos = tree_item.data(SyntaxTreeWidget.COL_POS, ROLE_POS)
if pos is None:
text = ""
else:
text = "{0[0]}:{0[1]}".format(pos)
tree_item.setText(SyntaxTreeWidget.COL_POS, text)
# Update the highlight column
start_pos = tree_item.data(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_START_POS)
end_pos = tree_item.data(SyntaxTreeWidget.COL_HIGHLIGHT, ROLE_END_POS)
text = ""
if start_pos is not None:
text += "{0[0]}:{0[1]}".format(start_pos)
if end_pos is not None:
text += " : {0[0]}:{0[1]}".format(end_pos)
tree_item.setText(SyntaxTreeWidget.COL_HIGHLIGHT, text)
# Recursively populate
for childIdx in range(tree_item.childCount()):
child_item = tree_item.child(childIdx)
self._populate_text_from_data(child_item)
| 0 | 0 | 0 |
ba9317a90817824e8fff0f9568251496292de06c | 44,174 | py | Python | venv/lib/python2.7/site-packages/jinja/parser.py | mallika2011/Recreating-VLabs-CSO-Experiment | 307849d66072b3504b9f1bf914007eaf9a03a1dc | [
"MIT"
] | 7 | 2016-05-08T22:32:57.000Z | 2019-07-16T09:54:45.000Z | venv/lib/python2.7/site-packages/jinja/parser.py | mallika2011/Recreating-VLabs-CSO-Experiment | 307849d66072b3504b9f1bf914007eaf9a03a1dc | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/jinja/parser.py | mallika2011/Recreating-VLabs-CSO-Experiment | 307849d66072b3504b9f1bf914007eaf9a03a1dc | [
"MIT"
] | 2 | 2017-01-22T14:43:33.000Z | 2020-11-02T14:35:19.000Z | # -*- coding: utf-8 -*-
"""
jinja.parser
~~~~~~~~~~~~
Implements the template parser.
The Jinja template parser is not a real parser but a combination of the
python compiler package and some postprocessing. The tokens yielded by
the lexer are used to separate template data and expressions. The
expression tokens are then converted into strings again and processed
by the python parser.
:copyright: 2007 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from jinja import nodes
from jinja.datastructure import StateTest
from jinja.exceptions import TemplateSyntaxError
from jinja.utils import set
__all__ = ['Parser']
# general callback functions for the parser
end_of_block = StateTest.expect_token('block_end',
msg='expected end of block tag')
end_of_variable = StateTest.expect_token('variable_end',
msg='expected end of variable')
end_of_comment = StateTest.expect_token('comment_end',
msg='expected end of comment')
# internal tag callbacks
switch_for = StateTest.expect_token('else', 'endfor')
end_of_for = StateTest.expect_token('endfor')
switch_if = StateTest.expect_token('else', 'elif', 'endif')
end_of_if = StateTest.expect_token('endif')
end_of_filter = StateTest.expect_token('endfilter')
end_of_macro = StateTest.expect_token('endmacro')
end_of_call = StateTest.expect_token('endcall')
end_of_block_tag = StateTest.expect_token('endblock')
end_of_trans = StateTest.expect_token('endtrans')
# this ends a tuple
tuple_edge_tokens = set(['rparen', 'block_end', 'variable_end', 'in',
'recursive'])
class Parser(object):
"""
The template parser class.
Transforms sourcecode into an abstract syntax tree.
"""
def parse_raw_directive(self):
"""
Handle fake raw directive. (real raw directives are handled by
the lexer. But if there are arguments to raw or the end tag
is missing the parser tries to resolve this directive. In that
case present the user a useful error message.
"""
if self.stream:
raise TemplateSyntaxError('raw directive does not support '
'any arguments.', self.stream.lineno,
self.filename)
raise TemplateSyntaxError('missing end tag for raw directive.',
self.stream.lineno, self.filename)
def parse_extends_directive(self):
"""
Handle the extends directive used for inheritance.
"""
raise TemplateSyntaxError('mispositioned extends tag. extends must '
'be the first tag of a template.',
self.stream.lineno, self.filename)
def parse_for_loop(self):
"""
Handle a for directive and return a ForLoop node
"""
token = self.stream.expect('for')
item = self.parse_tuple_expression(simplified=True)
if not item.allows_assignments():
raise TemplateSyntaxError('cannot assign to expression',
token.lineno, self.filename)
self.stream.expect('in')
seq = self.parse_tuple_expression()
if self.stream.current.type == 'recursive':
self.stream.next()
recursive = True
else:
recursive = False
self.stream.expect('block_end')
body = self.subparse(switch_for)
# do we have an else section?
if self.stream.current.type == 'else':
self.stream.next()
self.stream.expect('block_end')
else_ = self.subparse(end_of_for, True)
else:
self.stream.next()
else_ = None
self.stream.expect('block_end')
return nodes.ForLoop(item, seq, body, else_, recursive,
token.lineno, self.filename)
def parse_if_condition(self):
"""
Handle if/else blocks.
"""
token = self.stream.expect('if')
expr = self.parse_expression()
self.stream.expect('block_end')
tests = [(expr, self.subparse(switch_if))]
else_ = None
# do we have an else section?
while True:
if self.stream.current.type == 'else':
self.stream.next()
self.stream.expect('block_end')
else_ = self.subparse(end_of_if, True)
elif self.stream.current.type == 'elif':
self.stream.next()
expr = self.parse_expression()
self.stream.expect('block_end')
tests.append((expr, self.subparse(switch_if)))
continue
else:
self.stream.next()
break
self.stream.expect('block_end')
return nodes.IfCondition(tests, else_, token.lineno, self.filename)
def parse_cycle_directive(self):
"""
Handle {% cycle foo, bar, baz %}.
"""
token = self.stream.expect('cycle')
expr = self.parse_tuple_expression()
self.stream.expect('block_end')
return nodes.Cycle(expr, token.lineno, self.filename)
def parse_set_directive(self):
"""
Handle {% set foo = 'value of foo' %}.
"""
token = self.stream.expect('set')
name = self.stream.expect('name')
self.test_name(name.value)
self.stream.expect('assign')
value = self.parse_expression()
if self.stream.current.type == 'bang':
self.stream.next()
scope_local = False
else:
scope_local = True
self.stream.expect('block_end')
return nodes.Set(name.value, value, scope_local,
token.lineno, self.filename)
def parse_filter_directive(self):
"""
Handle {% filter foo|bar %} directives.
"""
token = self.stream.expect('filter')
filters = []
while self.stream.current.type != 'block_end':
if filters:
self.stream.expect('pipe')
token = self.stream.expect('name')
args = []
if self.stream.current.type == 'lparen':
self.stream.next()
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
args.append(self.parse_expression())
self.stream.expect('rparen')
filters.append((token.value, args))
self.stream.expect('block_end')
body = self.subparse(end_of_filter, True)
self.stream.expect('block_end')
return nodes.Filter(body, filters, token.lineno, self.filename)
def parse_print_directive(self):
"""
Handle {% print foo %}.
"""
token = self.stream.expect('print')
expr = self.parse_tuple_expression()
node = nodes.Print(expr, token.lineno, self.filename)
self.stream.expect('block_end')
return node
def parse_macro_directive(self):
"""
Handle {% macro foo bar, baz %} as well as
{% macro foo(bar, baz) %}.
"""
token = self.stream.expect('macro')
macro_name = self.stream.expect('name')
self.test_name(macro_name.value)
if self.stream.current.type == 'lparen':
self.stream.next()
needle_token = 'rparen'
else:
needle_token = 'block_end'
args = []
while self.stream.current.type != needle_token:
if args:
self.stream.expect('comma')
name = self.stream.expect('name').value
self.test_name(name)
if self.stream.current.type == 'assign':
self.stream.next()
default = self.parse_expression()
else:
default = None
args.append((name, default))
self.stream.next()
if needle_token == 'rparen':
self.stream.expect('block_end')
body = self.subparse(end_of_macro, True)
self.stream.expect('block_end')
return nodes.Macro(macro_name.value, args, body, token.lineno,
self.filename)
def parse_call_directive(self):
"""
Handle {% call foo() %}...{% endcall %}
"""
token = self.stream.expect('call')
expr = self.parse_call_expression()
self.stream.expect('block_end')
body = self.subparse(end_of_call, True)
self.stream.expect('block_end')
return nodes.Call(expr, body, token.lineno, self.filename)
def parse_block_directive(self):
"""
Handle block directives used for inheritance.
"""
token = self.stream.expect('block')
name = self.stream.expect('name').value
# check if this block does not exist by now.
if name in self.blocks:
raise TemplateSyntaxError('block %r defined twice' %
name, token.lineno,
self.filename)
self.blocks.add(name)
if self.stream.current.type != 'block_end':
lineno = self.stream.lineno
expr = self.parse_tuple_expression()
node = nodes.Print(expr, lineno, self.filename)
body = nodes.NodeList([node], lineno, self.filename)
self.stream.expect('block_end')
else:
# otherwise parse the body and attach it to the block
self.stream.expect('block_end')
body = self.subparse(end_of_block_tag, True)
self.stream.expect('block_end')
return nodes.Block(name, body, token.lineno, self.filename)
def parse_include_directive(self):
"""
Handle the include directive used for template inclusion.
"""
token = self.stream.expect('include')
template = self.stream.expect('string').value
self.stream.expect('block_end')
return nodes.Include(template, token.lineno, self.filename)
def parse_trans_directive(self):
"""
Handle translatable sections.
"""
trans_token = self.stream.expect('trans')
# string based translations {% trans "foo" %}
if self.stream.current.type == 'string':
text = self.stream.expect('string')
self.stream.expect('block_end')
return nodes.Trans(text.value, None, None, None,
trans_token.lineno, self.filename)
# block based translations
replacements = {}
plural_var = None
while self.stream.current.type != 'block_end':
if replacements:
self.stream.expect('comma')
name = self.stream.expect('name')
if self.stream.current.type == 'assign':
self.stream.next()
value = self.parse_expression()
else:
value = nodes.NameExpression(name.value, name.lineno,
self.filename)
if name.value in replacements:
raise TemplateSyntaxError('translation variable %r '
'is defined twice' % name.value,
name.lineno, self.filename)
replacements[name.value] = value
if plural_var is None:
plural_var = name.value
self.stream.expect('block_end')
buf = singular = []
plural = None
while True:
token = self.stream.current
if token.type == 'data':
buf.append(token.value.replace('%', '%%'))
self.stream.next()
elif token.type == 'variable_begin':
self.stream.next()
process_variable()
self.stream.expect('variable_end')
elif token.type == 'block_begin':
self.stream.next()
if plural is None and self.stream.current.type == 'pluralize':
self.stream.next()
if self.stream.current.type == 'name':
plural_var = self.stream.expect('name').value
plural = buf = []
elif self.stream.current.type == 'endtrans':
self.stream.next()
self.stream.expect('block_end')
break
else:
if self.no_variable_block:
process_variable()
else:
raise TemplateSyntaxError('blocks are not allowed '
'in trans tags',
self.stream.lineno,
self.filename)
self.stream.expect('block_end')
else:
assert False, 'something very strange happened'
singular = u''.join(singular)
if plural is not None:
plural = u''.join(plural)
return nodes.Trans(singular, plural, plural_var, replacements,
trans_token.lineno, self.filename)
def parse_expression(self):
"""
Parse one expression from the stream.
"""
return self.parse_conditional_expression()
def parse_subscribed_expression(self):
"""
Like parse_expression but parses slices too. Because this
parsing function requires a border the two tokens rbracket
and comma mark the end of the expression in some situations.
"""
lineno = self.stream.lineno
if self.stream.current.type == 'colon':
self.stream.next()
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != 'colon':
return node
self.stream.next()
args = [node]
if self.stream.current.type == 'colon':
args.append(None)
elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == 'colon':
self.stream.next()
if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.SliceExpression(*(args + [lineno, self.filename]))
def parse_conditional_expression(self):
"""
Parse a conditional expression (foo if bar else baz)
"""
lineno = self.stream.lineno
expr1 = self.parse_or_expression()
while self.stream.current.type == 'if':
self.stream.next()
expr2 = self.parse_or_expression()
self.stream.expect('else')
expr3 = self.parse_conditional_expression()
expr1 = nodes.ConditionalExpression(expr2, expr1, expr3,
lineno, self.filename)
lineno = self.stream.lineno
return expr1
def parse_or_expression(self):
"""
Parse something like {{ foo or bar }}.
"""
lineno = self.stream.lineno
left = self.parse_and_expression()
while self.stream.current.type == 'or':
self.stream.next()
right = self.parse_and_expression()
left = nodes.OrExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_and_expression(self):
"""
Parse something like {{ foo and bar }}.
"""
lineno = self.stream.lineno
left = self.parse_compare_expression()
while self.stream.current.type == 'and':
self.stream.next()
right = self.parse_compare_expression()
left = nodes.AndExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_compare_expression(self):
"""
Parse something like {{ foo == bar }}.
"""
known_operators = set(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq', 'in'])
lineno = self.stream.lineno
expr = self.parse_add_expression()
ops = []
while True:
if self.stream.current.type in known_operators:
op = self.stream.current.type
self.stream.next()
ops.append([op, self.parse_add_expression()])
elif self.stream.current.type == 'not' and \
self.stream.look().type == 'in':
self.stream.skip(2)
ops.append(['not in', self.parse_add_expression()])
else:
break
if not ops:
return expr
return nodes.CompareExpression(expr, ops, lineno, self.filename)
def parse_add_expression(self):
"""
Parse something like {{ foo + bar }}.
"""
lineno = self.stream.lineno
left = self.parse_sub_expression()
while self.stream.current.type == 'add':
self.stream.next()
right = self.parse_sub_expression()
left = nodes.AddExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_sub_expression(self):
"""
Parse something like {{ foo - bar }}.
"""
lineno = self.stream.lineno
left = self.parse_concat_expression()
while self.stream.current.type == 'sub':
self.stream.next()
right = self.parse_concat_expression()
left = nodes.SubExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_concat_expression(self):
"""
Parse something like {{ foo ~ bar }}.
"""
lineno = self.stream.lineno
args = [self.parse_mul_expression()]
while self.stream.current.type == 'tilde':
self.stream.next()
args.append(self.parse_mul_expression())
if len(args) == 1:
return args[0]
return nodes.ConcatExpression(args, lineno, self.filename)
def parse_mul_expression(self):
"""
Parse something like {{ foo * bar }}.
"""
lineno = self.stream.lineno
left = self.parse_div_expression()
while self.stream.current.type == 'mul':
self.stream.next()
right = self.parse_div_expression()
left = nodes.MulExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_div_expression(self):
"""
Parse something like {{ foo / bar }}.
"""
lineno = self.stream.lineno
left = self.parse_floor_div_expression()
while self.stream.current.type == 'div':
self.stream.next()
right = self.parse_floor_div_expression()
left = nodes.DivExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_floor_div_expression(self):
"""
Parse something like {{ foo // bar }}.
"""
lineno = self.stream.lineno
left = self.parse_mod_expression()
while self.stream.current.type == 'floordiv':
self.stream.next()
right = self.parse_mod_expression()
left = nodes.FloorDivExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_mod_expression(self):
"""
Parse something like {{ foo % bar }}.
"""
lineno = self.stream.lineno
left = self.parse_pow_expression()
while self.stream.current.type == 'mod':
self.stream.next()
right = self.parse_pow_expression()
left = nodes.ModExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_pow_expression(self):
"""
Parse something like {{ foo ** bar }}.
"""
lineno = self.stream.lineno
left = self.parse_unary_expression()
while self.stream.current.type == 'pow':
self.stream.next()
right = self.parse_unary_expression()
left = nodes.PowExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_unary_expression(self):
"""
Parse all kinds of unary expressions.
"""
if self.stream.current.type == 'not':
return self.parse_not_expression()
elif self.stream.current.type == 'sub':
return self.parse_neg_expression()
elif self.stream.current.type == 'add':
return self.parse_pos_expression()
return self.parse_primary_expression()
def parse_not_expression(self):
"""
Parse something like {{ not foo }}.
"""
token = self.stream.expect('not')
node = self.parse_unary_expression()
return nodes.NotExpression(node, token.lineno, self.filename)
def parse_neg_expression(self):
"""
Parse something like {{ -foo }}.
"""
token = self.stream.expect('sub')
node = self.parse_unary_expression()
return nodes.NegExpression(node, token.lineno, self.filename)
def parse_pos_expression(self):
"""
Parse something like {{ +foo }}.
"""
token = self.stream.expect('add')
node = self.parse_unary_expression()
return nodes.PosExpression(node, token.lineno, self.filename)
def parse_primary_expression(self, parse_postfix=True):
"""
Parse a primary expression such as a name or literal.
"""
current = self.stream.current
if current.type == 'name':
if current.value in ('true', 'false'):
node = self.parse_bool_expression()
elif current.value == 'none':
node = self.parse_none_expression()
elif current.value == 'undefined':
node = self.parse_undefined_expression()
elif current.value == '_':
node = self.parse_gettext_call()
else:
node = self.parse_name_expression()
elif current.type in ('integer', 'float'):
node = self.parse_number_expression()
elif current.type == 'string':
node = self.parse_string_expression()
elif current.type == 'regex':
node = self.parse_regex_expression()
elif current.type == 'lparen':
node = self.parse_paren_expression()
elif current.type == 'lbracket':
node = self.parse_list_expression()
elif current.type == 'lbrace':
node = self.parse_dict_expression()
elif current.type == 'at':
node = self.parse_set_expression()
else:
raise TemplateSyntaxError("unexpected token '%s'" %
self.stream.current,
self.stream.current.lineno,
self.filename)
if parse_postfix:
node = self.parse_postfix_expression(node)
return node
def parse_tuple_expression(self, enforce=False, simplified=False):
"""
Parse multiple expressions into a tuple. This can also return
just one expression which is not a tuple. If you want to enforce
a tuple, pass it enforce=True.
"""
lineno = self.stream.lineno
if simplified:
parse = self.parse_primary_expression
else:
parse = self.parse_expression
args = []
is_tuple = False
while True:
if args:
self.stream.expect('comma')
if self.stream.current.type in tuple_edge_tokens:
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
if not is_tuple and args:
if enforce:
raise TemplateSyntaxError('tuple expected', lineno,
self.filename)
return args[0]
return nodes.TupleExpression(args, lineno, self.filename)
def parse_bool_expression(self):
"""
Parse a boolean literal.
"""
token = self.stream.expect('name')
if token.value == 'true':
value = True
elif token.value == 'false':
value = False
else:
raise TemplateSyntaxError("expected boolean literal",
token.lineno, self.filename)
return nodes.ConstantExpression(value, token.lineno, self.filename)
def parse_none_expression(self):
"""
Parse a none literal.
"""
token = self.stream.expect('name', 'none')
return nodes.ConstantExpression(None, token.lineno, self.filename)
def parse_undefined_expression(self):
"""
Parse an undefined literal.
"""
token = self.stream.expect('name', 'undefined')
return nodes.UndefinedExpression(token.lineno, self.filename)
def parse_gettext_call(self):
"""
parse {{ _('foo') }}.
"""
# XXX: check if only one argument was passed and if
# it is a string literal. Maybe that should become a special
# expression anyway.
token = self.stream.expect('name', '_')
node = nodes.NameExpression(token.value, token.lineno, self.filename)
return self.parse_call_expression(node)
def parse_name_expression(self):
"""
Parse any name.
"""
token = self.stream.expect('name')
self.test_name(token.value)
return nodes.NameExpression(token.value, token.lineno, self.filename)
def parse_number_expression(self):
"""
Parse a number literal.
"""
token = self.stream.current
if token.type not in ('integer', 'float'):
raise TemplateSyntaxError('integer or float literal expected',
token.lineno, self.filename)
self.stream.next()
return nodes.ConstantExpression(token.value, token.lineno, self.filename)
def parse_string_expression(self):
"""
Parse a string literal.
"""
token = self.stream.expect('string')
return nodes.ConstantExpression(token.value, token.lineno, self.filename)
def parse_regex_expression(self):
"""
Parse a regex literal.
"""
token = self.stream.expect('regex')
return nodes.RegexExpression(token.value, token.lineno, self.filename)
def parse_paren_expression(self):
"""
Parse a parenthized expression.
"""
self.stream.expect('lparen')
try:
return self.parse_tuple_expression()
finally:
self.stream.expect('rparen')
def parse_list_expression(self):
"""
Parse something like {{ [1, 2, "three"] }}
"""
token = self.stream.expect('lbracket')
items = []
while self.stream.current.type != 'rbracket':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
self.stream.expect('rbracket')
return nodes.ListExpression(items, token.lineno, self.filename)
def parse_dict_expression(self):
"""
Parse something like {{ {1: 2, 3: 4} }}
"""
token = self.stream.expect('lbrace')
items = []
while self.stream.current.type != 'rbrace':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
self.stream.expect('colon')
value = self.parse_expression()
items.append((key, value))
self.stream.expect('rbrace')
return nodes.DictExpression(items, token.lineno, self.filename)
def parse_set_expression(self):
"""
Parse something like {{ @(1, 2, 3) }}.
"""
token = self.stream.expect('at')
self.stream.expect('lparen')
items = []
while self.stream.current.type != 'rparen':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rparen':
break
items.append(self.parse_expression())
self.stream.expect('rparen')
return nodes.SetExpression(items, token.lineno, self.filename)
def parse_postfix_expression(self, node):
"""
Parse a postfix expression such as a filter statement or a
function call.
"""
while True:
current = self.stream.current.type
if current == 'dot' or current == 'lbracket':
node = self.parse_subscript_expression(node)
elif current == 'lparen':
node = self.parse_call_expression(node)
elif current == 'pipe':
node = self.parse_filter_expression(node)
elif current == 'is':
node = self.parse_test_expression(node)
else:
break
return node
def parse_subscript_expression(self, node):
"""
Parse a subscript statement. Gets attributes and items from an
object.
"""
lineno = self.stream.lineno
if self.stream.current.type == 'dot':
self.stream.next()
token = self.stream.current
if token.type in ('name', 'integer'):
arg = nodes.ConstantExpression(token.value, token.lineno,
self.filename)
else:
raise TemplateSyntaxError('expected name or number',
token.lineno, self.filename)
self.stream.next()
elif self.stream.current.type == 'lbracket':
self.stream.next()
args = []
while self.stream.current.type != 'rbracket':
if args:
self.stream.expect('comma')
args.append(self.parse_subscribed_expression())
self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
arg = nodes.TupleExpression(args, lineno, self.filename)
else:
raise TemplateSyntaxError('expected subscript expression',
self.lineno, self.filename)
return nodes.SubscriptExpression(node, arg, lineno, self.filename)
def parse_call_expression(self, node=None):
"""
Parse a call.
"""
if node is None:
node = self.parse_primary_expression(parse_postfix=False)
token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = None
dyn_kwargs = None
require_comma = False
while self.stream.current.type != 'rparen':
if require_comma:
self.stream.expect('comma')
# support for trailing comma
if self.stream.current.type == 'rparen':
break
if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
self.stream.next()
dyn_args = self.parse_expression()
elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
self.stream.next()
dyn_kwargs = self.parse_expression()
else:
ensure(dyn_args is None and dyn_kwargs is None)
if self.stream.current.type == 'name' and \
self.stream.look().type == 'assign':
key = self.stream.current.value
self.stream.skip(2)
kwargs.append((key, self.parse_expression()))
else:
ensure(not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect('rparen')
return nodes.CallExpression(node, args, kwargs, dyn_args,
dyn_kwargs, token.lineno,
self.filename)
def parse_filter_expression(self, node):
"""
Parse filter calls.
"""
lineno = self.stream.lineno
filters = []
while self.stream.current.type == 'pipe':
self.stream.next()
token = self.stream.expect('name')
args = []
if self.stream.current.type == 'lparen':
self.stream.next()
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
args.append(self.parse_expression())
self.stream.expect('rparen')
filters.append((token.value, args))
return nodes.FilterExpression(node, filters, lineno, self.filename)
def parse_test_expression(self, node):
"""
Parse test calls.
"""
token = self.stream.expect('is')
if self.stream.current.type == 'not':
self.stream.next()
negated = True
else:
negated = False
name = self.stream.expect('name').value
args = []
if self.stream.current.type == 'lparen':
self.stream.next()
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
args.append(self.parse_expression())
self.stream.expect('rparen')
elif self.stream.current.type in ('name', 'string', 'integer',
'float', 'lparen', 'lbracket',
'lbrace', 'regex'):
args.append(self.parse_expression())
node = nodes.TestExpression(node, name, args, token.lineno,
self.filename)
if negated:
node = nodes.NotExpression(node, token.lineno, self.filename)
return node
def test_name(self, name):
"""
Test if a name is not a special constant
"""
if name in ('true', 'false', 'none', 'undefined', '_'):
raise TemplateSyntaxError('expected name not special constant',
self.stream.lineno, self.filename)
def subparse(self, test, drop_needle=False):
"""
Helper function used to parse the sourcecode until the test
function which is passed a tuple in the form (lineno, token, data)
returns True. In that case the current token is pushed back to
the stream and the generator ends.
The test function is only called for the first token after a
block tag. Variable tags are *not* aliases for {% print %} in
that case.
If drop_needle is True the needle_token is removed from the
stream.
"""
if self.closed:
raise RuntimeError('parser is closed')
result = []
buffer = []
next = self.stream.next
lineno = self.stream.lineno
while self.stream:
token_type = self.stream.current.type
if token_type == 'variable_begin':
next()
push_variable()
self.stream.expect('variable_end')
elif token_type == 'raw_begin':
next()
push_data()
self.stream.expect('raw_end')
elif token_type == 'block_begin':
next()
if test is not None and test(self.stream.current):
if drop_needle:
next()
return assemble_list()
handler = self.directives.get(self.stream.current.type)
if handler is None:
if self.no_variable_block:
push_variable()
self.stream.expect('block_end')
elif self.stream.current.type in self.context_directives:
raise TemplateSyntaxError('unexpected directive %r.' %
self.stream.current.type,
lineno, self.filename)
else:
name = self.stream.current.value
raise TemplateSyntaxError('unknown directive %r.' %
name, lineno, self.filename)
else:
node = handler()
if node is not None:
push_node(node)
elif token_type == 'data':
push_data()
# this should be unreachable code
else:
assert False, "unexpected token %r" % self.stream.current
if test is not None:
msg = isinstance(test, StateTest) and ': ' + test.msg or ''
raise TemplateSyntaxError('unexpected end of stream' + msg,
self.stream.lineno, self.filename)
return assemble_list()
def _sanitize_tree(self, nodelist, stack, extends, body):
"""
This is not a closure because python leaks memory if it is. It's used
by `parse()` to make sure blocks do not trigger unexpected behavior.
"""
for node in nodelist:
if extends is not None and \
node.__class__ is nodes.Block and \
stack[-1] is not body:
for n in stack:
if n.__class__ is nodes.Block:
break
else:
raise TemplateSyntaxError('misplaced block %r, '
'blocks in child '
'templates must be '
'either top level or '
'located in a block '
'tag.' % node.name,
node.lineno,
self.filename)
stack.append(node)
self._sanitize_tree(node.get_child_nodes(), stack, extends, body)
stack.pop()
def parse(self):
"""
Parse the template and return a Template node. This also does some
post processing sanitizing and parses for an extends tag.
"""
if self.closed:
raise RuntimeError('parser is closed')
try:
# get the leading whitespace, if we are not in a child
# template we push that back to the stream later.
leading_whitespace = self.stream.read_whitespace()
# parse an optional extends which *must* be the first node
# of a template.
if self.stream.current.type == 'block_begin' and \
self.stream.look().type == 'extends':
self.stream.skip(2)
extends = self.stream.expect('string').value
self.stream.expect('block_end')
else:
extends = None
if leading_whitespace:
self.stream.shift(leading_whitespace)
body = self.sanitize_tree(self.subparse(None), extends)
return nodes.Template(extends, body, 1, self.filename)
finally:
self.close()
def close(self):
"""Clean up soon."""
self.closed = True
self.stream = self.directives = self.stream = self.blocks = \
self.environment = None
| 37.183502 | 81 | 0.539005 | # -*- coding: utf-8 -*-
"""
jinja.parser
~~~~~~~~~~~~
Implements the template parser.
The Jinja template parser is not a real parser but a combination of the
python compiler package and some postprocessing. The tokens yielded by
the lexer are used to separate template data and expressions. The
expression tokens are then converted into strings again and processed
by the python parser.
:copyright: 2007 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from jinja import nodes
from jinja.datastructure import StateTest
from jinja.exceptions import TemplateSyntaxError
from jinja.utils import set
__all__ = ['Parser']
# general callback functions for the parser
end_of_block = StateTest.expect_token('block_end',
msg='expected end of block tag')
end_of_variable = StateTest.expect_token('variable_end',
msg='expected end of variable')
end_of_comment = StateTest.expect_token('comment_end',
msg='expected end of comment')
# internal tag callbacks
switch_for = StateTest.expect_token('else', 'endfor')
end_of_for = StateTest.expect_token('endfor')
switch_if = StateTest.expect_token('else', 'elif', 'endif')
end_of_if = StateTest.expect_token('endif')
end_of_filter = StateTest.expect_token('endfilter')
end_of_macro = StateTest.expect_token('endmacro')
end_of_call = StateTest.expect_token('endcall')
end_of_block_tag = StateTest.expect_token('endblock')
end_of_trans = StateTest.expect_token('endtrans')
# this ends a tuple
tuple_edge_tokens = set(['rparen', 'block_end', 'variable_end', 'in',
'recursive'])
class Parser(object):
"""
The template parser class.
Transforms sourcecode into an abstract syntax tree.
"""
def __init__(self, environment, source, filename=None):
self.environment = environment
if isinstance(source, str):
source = source.decode(environment.template_charset, 'ignore')
if isinstance(filename, unicode):
filename = filename.encode('utf-8')
self.source = source
self.filename = filename
self.closed = False
#: set for blocks in order to keep them unique
self.blocks = set()
#: mapping of directives that require special treatment
self.directives = {
# "fake" directives that just trigger errors
'raw': self.parse_raw_directive,
'extends': self.parse_extends_directive,
# real directives
'for': self.parse_for_loop,
'if': self.parse_if_condition,
'cycle': self.parse_cycle_directive,
'call': self.parse_call_directive,
'set': self.parse_set_directive,
'filter': self.parse_filter_directive,
'print': self.parse_print_directive,
'macro': self.parse_macro_directive,
'block': self.parse_block_directive,
'include': self.parse_include_directive,
'trans': self.parse_trans_directive
}
#: set of directives that are only available in a certain
#: context.
self.context_directives = set([
'elif', 'else', 'endblock', 'endfilter', 'endfor', 'endif',
'endmacro', 'endraw', 'endtrans', 'pluralize'
])
#: get the `no_variable_block` flag
self.no_variable_block = self.environment.lexer.no_variable_block
self.stream = environment.lexer.tokenize(source, filename)
def parse_raw_directive(self):
"""
Handle fake raw directive. (real raw directives are handled by
the lexer. But if there are arguments to raw or the end tag
is missing the parser tries to resolve this directive. In that
case present the user a useful error message.
"""
if self.stream:
raise TemplateSyntaxError('raw directive does not support '
'any arguments.', self.stream.lineno,
self.filename)
raise TemplateSyntaxError('missing end tag for raw directive.',
self.stream.lineno, self.filename)
def parse_extends_directive(self):
"""
Handle the extends directive used for inheritance.
"""
raise TemplateSyntaxError('mispositioned extends tag. extends must '
'be the first tag of a template.',
self.stream.lineno, self.filename)
def parse_for_loop(self):
"""
Handle a for directive and return a ForLoop node
"""
token = self.stream.expect('for')
item = self.parse_tuple_expression(simplified=True)
if not item.allows_assignments():
raise TemplateSyntaxError('cannot assign to expression',
token.lineno, self.filename)
self.stream.expect('in')
seq = self.parse_tuple_expression()
if self.stream.current.type == 'recursive':
self.stream.next()
recursive = True
else:
recursive = False
self.stream.expect('block_end')
body = self.subparse(switch_for)
# do we have an else section?
if self.stream.current.type == 'else':
self.stream.next()
self.stream.expect('block_end')
else_ = self.subparse(end_of_for, True)
else:
self.stream.next()
else_ = None
self.stream.expect('block_end')
return nodes.ForLoop(item, seq, body, else_, recursive,
token.lineno, self.filename)
def parse_if_condition(self):
"""
Handle if/else blocks.
"""
token = self.stream.expect('if')
expr = self.parse_expression()
self.stream.expect('block_end')
tests = [(expr, self.subparse(switch_if))]
else_ = None
# do we have an else section?
while True:
if self.stream.current.type == 'else':
self.stream.next()
self.stream.expect('block_end')
else_ = self.subparse(end_of_if, True)
elif self.stream.current.type == 'elif':
self.stream.next()
expr = self.parse_expression()
self.stream.expect('block_end')
tests.append((expr, self.subparse(switch_if)))
continue
else:
self.stream.next()
break
self.stream.expect('block_end')
return nodes.IfCondition(tests, else_, token.lineno, self.filename)
def parse_cycle_directive(self):
"""
Handle {% cycle foo, bar, baz %}.
"""
token = self.stream.expect('cycle')
expr = self.parse_tuple_expression()
self.stream.expect('block_end')
return nodes.Cycle(expr, token.lineno, self.filename)
def parse_set_directive(self):
"""
Handle {% set foo = 'value of foo' %}.
"""
token = self.stream.expect('set')
name = self.stream.expect('name')
self.test_name(name.value)
self.stream.expect('assign')
value = self.parse_expression()
if self.stream.current.type == 'bang':
self.stream.next()
scope_local = False
else:
scope_local = True
self.stream.expect('block_end')
return nodes.Set(name.value, value, scope_local,
token.lineno, self.filename)
def parse_filter_directive(self):
"""
Handle {% filter foo|bar %} directives.
"""
token = self.stream.expect('filter')
filters = []
while self.stream.current.type != 'block_end':
if filters:
self.stream.expect('pipe')
token = self.stream.expect('name')
args = []
if self.stream.current.type == 'lparen':
self.stream.next()
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
args.append(self.parse_expression())
self.stream.expect('rparen')
filters.append((token.value, args))
self.stream.expect('block_end')
body = self.subparse(end_of_filter, True)
self.stream.expect('block_end')
return nodes.Filter(body, filters, token.lineno, self.filename)
def parse_print_directive(self):
"""
Handle {% print foo %}.
"""
token = self.stream.expect('print')
expr = self.parse_tuple_expression()
node = nodes.Print(expr, token.lineno, self.filename)
self.stream.expect('block_end')
return node
def parse_macro_directive(self):
"""
Handle {% macro foo bar, baz %} as well as
{% macro foo(bar, baz) %}.
"""
token = self.stream.expect('macro')
macro_name = self.stream.expect('name')
self.test_name(macro_name.value)
if self.stream.current.type == 'lparen':
self.stream.next()
needle_token = 'rparen'
else:
needle_token = 'block_end'
args = []
while self.stream.current.type != needle_token:
if args:
self.stream.expect('comma')
name = self.stream.expect('name').value
self.test_name(name)
if self.stream.current.type == 'assign':
self.stream.next()
default = self.parse_expression()
else:
default = None
args.append((name, default))
self.stream.next()
if needle_token == 'rparen':
self.stream.expect('block_end')
body = self.subparse(end_of_macro, True)
self.stream.expect('block_end')
return nodes.Macro(macro_name.value, args, body, token.lineno,
self.filename)
def parse_call_directive(self):
"""
Handle {% call foo() %}...{% endcall %}
"""
token = self.stream.expect('call')
expr = self.parse_call_expression()
self.stream.expect('block_end')
body = self.subparse(end_of_call, True)
self.stream.expect('block_end')
return nodes.Call(expr, body, token.lineno, self.filename)
def parse_block_directive(self):
"""
Handle block directives used for inheritance.
"""
token = self.stream.expect('block')
name = self.stream.expect('name').value
# check if this block does not exist by now.
if name in self.blocks:
raise TemplateSyntaxError('block %r defined twice' %
name, token.lineno,
self.filename)
self.blocks.add(name)
if self.stream.current.type != 'block_end':
lineno = self.stream.lineno
expr = self.parse_tuple_expression()
node = nodes.Print(expr, lineno, self.filename)
body = nodes.NodeList([node], lineno, self.filename)
self.stream.expect('block_end')
else:
# otherwise parse the body and attach it to the block
self.stream.expect('block_end')
body = self.subparse(end_of_block_tag, True)
self.stream.expect('block_end')
return nodes.Block(name, body, token.lineno, self.filename)
def parse_include_directive(self):
"""
Handle the include directive used for template inclusion.
"""
token = self.stream.expect('include')
template = self.stream.expect('string').value
self.stream.expect('block_end')
return nodes.Include(template, token.lineno, self.filename)
def parse_trans_directive(self):
"""
Handle translatable sections.
"""
trans_token = self.stream.expect('trans')
# string based translations {% trans "foo" %}
if self.stream.current.type == 'string':
text = self.stream.expect('string')
self.stream.expect('block_end')
return nodes.Trans(text.value, None, None, None,
trans_token.lineno, self.filename)
# block based translations
replacements = {}
plural_var = None
while self.stream.current.type != 'block_end':
if replacements:
self.stream.expect('comma')
name = self.stream.expect('name')
if self.stream.current.type == 'assign':
self.stream.next()
value = self.parse_expression()
else:
value = nodes.NameExpression(name.value, name.lineno,
self.filename)
if name.value in replacements:
raise TemplateSyntaxError('translation variable %r '
'is defined twice' % name.value,
name.lineno, self.filename)
replacements[name.value] = value
if plural_var is None:
plural_var = name.value
self.stream.expect('block_end')
def process_variable():
var_name = self.stream.expect('name')
if var_name.value not in replacements:
raise TemplateSyntaxError('unregistered translation variable'
" '%s'." % var_name.value,
var_name.lineno, self.filename)
buf.append('%%(%s)s' % var_name.value)
buf = singular = []
plural = None
while True:
token = self.stream.current
if token.type == 'data':
buf.append(token.value.replace('%', '%%'))
self.stream.next()
elif token.type == 'variable_begin':
self.stream.next()
process_variable()
self.stream.expect('variable_end')
elif token.type == 'block_begin':
self.stream.next()
if plural is None and self.stream.current.type == 'pluralize':
self.stream.next()
if self.stream.current.type == 'name':
plural_var = self.stream.expect('name').value
plural = buf = []
elif self.stream.current.type == 'endtrans':
self.stream.next()
self.stream.expect('block_end')
break
else:
if self.no_variable_block:
process_variable()
else:
raise TemplateSyntaxError('blocks are not allowed '
'in trans tags',
self.stream.lineno,
self.filename)
self.stream.expect('block_end')
else:
assert False, 'something very strange happened'
singular = u''.join(singular)
if plural is not None:
plural = u''.join(plural)
return nodes.Trans(singular, plural, plural_var, replacements,
trans_token.lineno, self.filename)
def parse_expression(self):
"""
Parse one expression from the stream.
"""
return self.parse_conditional_expression()
def parse_subscribed_expression(self):
"""
Like parse_expression but parses slices too. Because this
parsing function requires a border the two tokens rbracket
and comma mark the end of the expression in some situations.
"""
lineno = self.stream.lineno
if self.stream.current.type == 'colon':
self.stream.next()
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != 'colon':
return node
self.stream.next()
args = [node]
if self.stream.current.type == 'colon':
args.append(None)
elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == 'colon':
self.stream.next()
if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.SliceExpression(*(args + [lineno, self.filename]))
def parse_conditional_expression(self):
"""
Parse a conditional expression (foo if bar else baz)
"""
lineno = self.stream.lineno
expr1 = self.parse_or_expression()
while self.stream.current.type == 'if':
self.stream.next()
expr2 = self.parse_or_expression()
self.stream.expect('else')
expr3 = self.parse_conditional_expression()
expr1 = nodes.ConditionalExpression(expr2, expr1, expr3,
lineno, self.filename)
lineno = self.stream.lineno
return expr1
def parse_or_expression(self):
"""
Parse something like {{ foo or bar }}.
"""
lineno = self.stream.lineno
left = self.parse_and_expression()
while self.stream.current.type == 'or':
self.stream.next()
right = self.parse_and_expression()
left = nodes.OrExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_and_expression(self):
"""
Parse something like {{ foo and bar }}.
"""
lineno = self.stream.lineno
left = self.parse_compare_expression()
while self.stream.current.type == 'and':
self.stream.next()
right = self.parse_compare_expression()
left = nodes.AndExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_compare_expression(self):
"""
Parse something like {{ foo == bar }}.
"""
known_operators = set(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq', 'in'])
lineno = self.stream.lineno
expr = self.parse_add_expression()
ops = []
while True:
if self.stream.current.type in known_operators:
op = self.stream.current.type
self.stream.next()
ops.append([op, self.parse_add_expression()])
elif self.stream.current.type == 'not' and \
self.stream.look().type == 'in':
self.stream.skip(2)
ops.append(['not in', self.parse_add_expression()])
else:
break
if not ops:
return expr
return nodes.CompareExpression(expr, ops, lineno, self.filename)
def parse_add_expression(self):
"""
Parse something like {{ foo + bar }}.
"""
lineno = self.stream.lineno
left = self.parse_sub_expression()
while self.stream.current.type == 'add':
self.stream.next()
right = self.parse_sub_expression()
left = nodes.AddExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_sub_expression(self):
"""
Parse something like {{ foo - bar }}.
"""
lineno = self.stream.lineno
left = self.parse_concat_expression()
while self.stream.current.type == 'sub':
self.stream.next()
right = self.parse_concat_expression()
left = nodes.SubExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_concat_expression(self):
"""
Parse something like {{ foo ~ bar }}.
"""
lineno = self.stream.lineno
args = [self.parse_mul_expression()]
while self.stream.current.type == 'tilde':
self.stream.next()
args.append(self.parse_mul_expression())
if len(args) == 1:
return args[0]
return nodes.ConcatExpression(args, lineno, self.filename)
def parse_mul_expression(self):
"""
Parse something like {{ foo * bar }}.
"""
lineno = self.stream.lineno
left = self.parse_div_expression()
while self.stream.current.type == 'mul':
self.stream.next()
right = self.parse_div_expression()
left = nodes.MulExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_div_expression(self):
"""
Parse something like {{ foo / bar }}.
"""
lineno = self.stream.lineno
left = self.parse_floor_div_expression()
while self.stream.current.type == 'div':
self.stream.next()
right = self.parse_floor_div_expression()
left = nodes.DivExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_floor_div_expression(self):
"""
Parse something like {{ foo // bar }}.
"""
lineno = self.stream.lineno
left = self.parse_mod_expression()
while self.stream.current.type == 'floordiv':
self.stream.next()
right = self.parse_mod_expression()
left = nodes.FloorDivExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_mod_expression(self):
"""
Parse something like {{ foo % bar }}.
"""
lineno = self.stream.lineno
left = self.parse_pow_expression()
while self.stream.current.type == 'mod':
self.stream.next()
right = self.parse_pow_expression()
left = nodes.ModExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_pow_expression(self):
"""
Parse something like {{ foo ** bar }}.
"""
lineno = self.stream.lineno
left = self.parse_unary_expression()
while self.stream.current.type == 'pow':
self.stream.next()
right = self.parse_unary_expression()
left = nodes.PowExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_unary_expression(self):
"""
Parse all kinds of unary expressions.
"""
if self.stream.current.type == 'not':
return self.parse_not_expression()
elif self.stream.current.type == 'sub':
return self.parse_neg_expression()
elif self.stream.current.type == 'add':
return self.parse_pos_expression()
return self.parse_primary_expression()
def parse_not_expression(self):
"""
Parse something like {{ not foo }}.
"""
token = self.stream.expect('not')
node = self.parse_unary_expression()
return nodes.NotExpression(node, token.lineno, self.filename)
def parse_neg_expression(self):
"""
Parse something like {{ -foo }}.
"""
token = self.stream.expect('sub')
node = self.parse_unary_expression()
return nodes.NegExpression(node, token.lineno, self.filename)
def parse_pos_expression(self):
"""
Parse something like {{ +foo }}.
"""
token = self.stream.expect('add')
node = self.parse_unary_expression()
return nodes.PosExpression(node, token.lineno, self.filename)
def parse_primary_expression(self, parse_postfix=True):
"""
Parse a primary expression such as a name or literal.
"""
current = self.stream.current
if current.type == 'name':
if current.value in ('true', 'false'):
node = self.parse_bool_expression()
elif current.value == 'none':
node = self.parse_none_expression()
elif current.value == 'undefined':
node = self.parse_undefined_expression()
elif current.value == '_':
node = self.parse_gettext_call()
else:
node = self.parse_name_expression()
elif current.type in ('integer', 'float'):
node = self.parse_number_expression()
elif current.type == 'string':
node = self.parse_string_expression()
elif current.type == 'regex':
node = self.parse_regex_expression()
elif current.type == 'lparen':
node = self.parse_paren_expression()
elif current.type == 'lbracket':
node = self.parse_list_expression()
elif current.type == 'lbrace':
node = self.parse_dict_expression()
elif current.type == 'at':
node = self.parse_set_expression()
else:
raise TemplateSyntaxError("unexpected token '%s'" %
self.stream.current,
self.stream.current.lineno,
self.filename)
if parse_postfix:
node = self.parse_postfix_expression(node)
return node
def parse_tuple_expression(self, enforce=False, simplified=False):
"""
Parse multiple expressions into a tuple. This can also return
just one expression which is not a tuple. If you want to enforce
a tuple, pass it enforce=True.
"""
lineno = self.stream.lineno
if simplified:
parse = self.parse_primary_expression
else:
parse = self.parse_expression
args = []
is_tuple = False
while True:
if args:
self.stream.expect('comma')
if self.stream.current.type in tuple_edge_tokens:
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
if not is_tuple and args:
if enforce:
raise TemplateSyntaxError('tuple expected', lineno,
self.filename)
return args[0]
return nodes.TupleExpression(args, lineno, self.filename)
def parse_bool_expression(self):
"""
Parse a boolean literal.
"""
token = self.stream.expect('name')
if token.value == 'true':
value = True
elif token.value == 'false':
value = False
else:
raise TemplateSyntaxError("expected boolean literal",
token.lineno, self.filename)
return nodes.ConstantExpression(value, token.lineno, self.filename)
def parse_none_expression(self):
"""
Parse a none literal.
"""
token = self.stream.expect('name', 'none')
return nodes.ConstantExpression(None, token.lineno, self.filename)
def parse_undefined_expression(self):
"""
Parse an undefined literal.
"""
token = self.stream.expect('name', 'undefined')
return nodes.UndefinedExpression(token.lineno, self.filename)
def parse_gettext_call(self):
"""
parse {{ _('foo') }}.
"""
# XXX: check if only one argument was passed and if
# it is a string literal. Maybe that should become a special
# expression anyway.
token = self.stream.expect('name', '_')
node = nodes.NameExpression(token.value, token.lineno, self.filename)
return self.parse_call_expression(node)
def parse_name_expression(self):
"""
Parse any name.
"""
token = self.stream.expect('name')
self.test_name(token.value)
return nodes.NameExpression(token.value, token.lineno, self.filename)
def parse_number_expression(self):
"""
Parse a number literal.
"""
token = self.stream.current
if token.type not in ('integer', 'float'):
raise TemplateSyntaxError('integer or float literal expected',
token.lineno, self.filename)
self.stream.next()
return nodes.ConstantExpression(token.value, token.lineno, self.filename)
def parse_string_expression(self):
"""
Parse a string literal.
"""
token = self.stream.expect('string')
return nodes.ConstantExpression(token.value, token.lineno, self.filename)
def parse_regex_expression(self):
"""
Parse a regex literal.
"""
token = self.stream.expect('regex')
return nodes.RegexExpression(token.value, token.lineno, self.filename)
def parse_paren_expression(self):
"""
Parse a parenthized expression.
"""
self.stream.expect('lparen')
try:
return self.parse_tuple_expression()
finally:
self.stream.expect('rparen')
def parse_list_expression(self):
"""
Parse something like {{ [1, 2, "three"] }}
"""
token = self.stream.expect('lbracket')
items = []
while self.stream.current.type != 'rbracket':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
self.stream.expect('rbracket')
return nodes.ListExpression(items, token.lineno, self.filename)
def parse_dict_expression(self):
"""
Parse something like {{ {1: 2, 3: 4} }}
"""
token = self.stream.expect('lbrace')
items = []
while self.stream.current.type != 'rbrace':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
self.stream.expect('colon')
value = self.parse_expression()
items.append((key, value))
self.stream.expect('rbrace')
return nodes.DictExpression(items, token.lineno, self.filename)
def parse_set_expression(self):
"""
Parse something like {{ @(1, 2, 3) }}.
"""
token = self.stream.expect('at')
self.stream.expect('lparen')
items = []
while self.stream.current.type != 'rparen':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rparen':
break
items.append(self.parse_expression())
self.stream.expect('rparen')
return nodes.SetExpression(items, token.lineno, self.filename)
def parse_postfix_expression(self, node):
"""
Parse a postfix expression such as a filter statement or a
function call.
"""
while True:
current = self.stream.current.type
if current == 'dot' or current == 'lbracket':
node = self.parse_subscript_expression(node)
elif current == 'lparen':
node = self.parse_call_expression(node)
elif current == 'pipe':
node = self.parse_filter_expression(node)
elif current == 'is':
node = self.parse_test_expression(node)
else:
break
return node
def parse_subscript_expression(self, node):
"""
Parse a subscript statement. Gets attributes and items from an
object.
"""
lineno = self.stream.lineno
if self.stream.current.type == 'dot':
self.stream.next()
token = self.stream.current
if token.type in ('name', 'integer'):
arg = nodes.ConstantExpression(token.value, token.lineno,
self.filename)
else:
raise TemplateSyntaxError('expected name or number',
token.lineno, self.filename)
self.stream.next()
elif self.stream.current.type == 'lbracket':
self.stream.next()
args = []
while self.stream.current.type != 'rbracket':
if args:
self.stream.expect('comma')
args.append(self.parse_subscribed_expression())
self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
arg = nodes.TupleExpression(args, lineno, self.filename)
else:
raise TemplateSyntaxError('expected subscript expression',
self.lineno, self.filename)
return nodes.SubscriptExpression(node, arg, lineno, self.filename)
def parse_call_expression(self, node=None):
"""
Parse a call.
"""
if node is None:
node = self.parse_primary_expression(parse_postfix=False)
token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = None
dyn_kwargs = None
require_comma = False
def ensure(expr):
if not expr:
raise TemplateSyntaxError('invalid syntax for function '
'call expression', token.lineno,
self.filename)
while self.stream.current.type != 'rparen':
if require_comma:
self.stream.expect('comma')
# support for trailing comma
if self.stream.current.type == 'rparen':
break
if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
self.stream.next()
dyn_args = self.parse_expression()
elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
self.stream.next()
dyn_kwargs = self.parse_expression()
else:
ensure(dyn_args is None and dyn_kwargs is None)
if self.stream.current.type == 'name' and \
self.stream.look().type == 'assign':
key = self.stream.current.value
self.stream.skip(2)
kwargs.append((key, self.parse_expression()))
else:
ensure(not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect('rparen')
return nodes.CallExpression(node, args, kwargs, dyn_args,
dyn_kwargs, token.lineno,
self.filename)
def parse_filter_expression(self, node):
"""
Parse filter calls.
"""
lineno = self.stream.lineno
filters = []
while self.stream.current.type == 'pipe':
self.stream.next()
token = self.stream.expect('name')
args = []
if self.stream.current.type == 'lparen':
self.stream.next()
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
args.append(self.parse_expression())
self.stream.expect('rparen')
filters.append((token.value, args))
return nodes.FilterExpression(node, filters, lineno, self.filename)
def parse_test_expression(self, node):
"""
Parse test calls.
"""
token = self.stream.expect('is')
if self.stream.current.type == 'not':
self.stream.next()
negated = True
else:
negated = False
name = self.stream.expect('name').value
args = []
if self.stream.current.type == 'lparen':
self.stream.next()
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
args.append(self.parse_expression())
self.stream.expect('rparen')
elif self.stream.current.type in ('name', 'string', 'integer',
'float', 'lparen', 'lbracket',
'lbrace', 'regex'):
args.append(self.parse_expression())
node = nodes.TestExpression(node, name, args, token.lineno,
self.filename)
if negated:
node = nodes.NotExpression(node, token.lineno, self.filename)
return node
def test_name(self, name):
"""
Test if a name is not a special constant
"""
if name in ('true', 'false', 'none', 'undefined', '_'):
raise TemplateSyntaxError('expected name not special constant',
self.stream.lineno, self.filename)
def subparse(self, test, drop_needle=False):
"""
Helper function used to parse the sourcecode until the test
function which is passed a tuple in the form (lineno, token, data)
returns True. In that case the current token is pushed back to
the stream and the generator ends.
The test function is only called for the first token after a
block tag. Variable tags are *not* aliases for {% print %} in
that case.
If drop_needle is True the needle_token is removed from the
stream.
"""
if self.closed:
raise RuntimeError('parser is closed')
result = []
buffer = []
next = self.stream.next
lineno = self.stream.lineno
def assemble_list():
push_buffer()
return nodes.NodeList(result, lineno, self.filename)
def push_variable():
buffer.append((True, self.parse_tuple_expression()))
def push_data():
buffer.append((False, self.stream.expect('data')))
def push_buffer():
if not buffer:
return
template = []
variables = []
for is_var, data in buffer:
if is_var:
template.append('%s')
variables.append(data)
else:
template.append(data.value.replace('%', '%%'))
result.append(nodes.Text(u''.join(template), variables,
buffer[0][1].lineno, self.filename))
del buffer[:]
def push_node(node):
push_buffer()
result.append(node)
while self.stream:
token_type = self.stream.current.type
if token_type == 'variable_begin':
next()
push_variable()
self.stream.expect('variable_end')
elif token_type == 'raw_begin':
next()
push_data()
self.stream.expect('raw_end')
elif token_type == 'block_begin':
next()
if test is not None and test(self.stream.current):
if drop_needle:
next()
return assemble_list()
handler = self.directives.get(self.stream.current.type)
if handler is None:
if self.no_variable_block:
push_variable()
self.stream.expect('block_end')
elif self.stream.current.type in self.context_directives:
raise TemplateSyntaxError('unexpected directive %r.' %
self.stream.current.type,
lineno, self.filename)
else:
name = self.stream.current.value
raise TemplateSyntaxError('unknown directive %r.' %
name, lineno, self.filename)
else:
node = handler()
if node is not None:
push_node(node)
elif token_type == 'data':
push_data()
# this should be unreachable code
else:
assert False, "unexpected token %r" % self.stream.current
if test is not None:
msg = isinstance(test, StateTest) and ': ' + test.msg or ''
raise TemplateSyntaxError('unexpected end of stream' + msg,
self.stream.lineno, self.filename)
return assemble_list()
def sanitize_tree(self, body, extends):
self._sanitize_tree([body], [body], extends, body)
return body
def _sanitize_tree(self, nodelist, stack, extends, body):
"""
This is not a closure because python leaks memory if it is. It's used
by `parse()` to make sure blocks do not trigger unexpected behavior.
"""
for node in nodelist:
if extends is not None and \
node.__class__ is nodes.Block and \
stack[-1] is not body:
for n in stack:
if n.__class__ is nodes.Block:
break
else:
raise TemplateSyntaxError('misplaced block %r, '
'blocks in child '
'templates must be '
'either top level or '
'located in a block '
'tag.' % node.name,
node.lineno,
self.filename)
stack.append(node)
self._sanitize_tree(node.get_child_nodes(), stack, extends, body)
stack.pop()
def parse(self):
"""
Parse the template and return a Template node. This also does some
post processing sanitizing and parses for an extends tag.
"""
if self.closed:
raise RuntimeError('parser is closed')
try:
# get the leading whitespace, if we are not in a child
# template we push that back to the stream later.
leading_whitespace = self.stream.read_whitespace()
# parse an optional extends which *must* be the first node
# of a template.
if self.stream.current.type == 'block_begin' and \
self.stream.look().type == 'extends':
self.stream.skip(2)
extends = self.stream.expect('string').value
self.stream.expect('block_end')
else:
extends = None
if leading_whitespace:
self.stream.shift(leading_whitespace)
body = self.sanitize_tree(self.subparse(None), extends)
return nodes.Template(extends, body, 1, self.filename)
finally:
self.close()
def close(self):
"""Clean up soon."""
self.closed = True
self.stream = self.directives = self.stream = self.blocks = \
self.environment = None
| 3,287 | 0 | 271 |
7ecb076b0a9bdbdbc22619e20b5da7f2efecaa80 | 21,747 | py | Python | spans/settypes.py | MichiK/spans | 1bab1c5ee9f27b698b5b1e1f849dd61641d12cfd | [
"MIT"
] | 1 | 2018-01-02T18:39:47.000Z | 2018-01-02T18:39:47.000Z | spans/settypes.py | MichiK/spans | 1bab1c5ee9f27b698b5b1e1f849dd61641d12cfd | [
"MIT"
] | null | null | null | spans/settypes.py | MichiK/spans | 1bab1c5ee9f27b698b5b1e1f849dd61641d12cfd | [
"MIT"
] | null | null | null | from itertools import chain
from ._compat import add_metaclass, fix_timedelta_repr
from ._utils import PartialOrderingMixin
from .types import Range
from .types import *
from .types import DiscreteRange, OffsetableRangeMixin
# Imports needed for doctests in date range sets
from datetime import *
__all__ = [
"intrangeset",
"floatrangeset",
"strrangeset",
"daterangeset",
"datetimerangeset",
"timedeltarangeset",
]
class MetaRangeSet(type):
"""
A meta class for RangeSets. The purpose is to automatically add relevant
mixins to the range set class based on what mixins and base classes the
range class has.
All subclasses of :class:`~spans.settypes.RangeSet` uses this class as its
metaclass
.. versionchanged:: 0.5.0
Changed name from ``metarangeset`` to ``MetaRangeSet``
"""
mixin_map = {}
@classmethod
def add(cls, range_mixin, range_set_mixin):
"""
Register a range set mixin for a range mixin.
:param range_mixin: Range mixin class
:param range_set_mixin: Range set mixin class
"""
cls.mixin_map[range_mixin] = range_set_mixin
@classmethod
def register(cls, range_mixin):
"""
Decorator for registering range set mixins for global use. This works
the same as :meth:`~spans.settypes.MetaRangeSet.add`
:param range_mixin: A :class:`~spans.types.Range` mixin class to
to register a decorated range set mixin class for
:return: A decorator to use on a range set mixin class
"""
return decorator
@MetaRangeSet.register(DiscreteRange)
class DiscreteRangeSetMixin(object):
"""
Mixin that adds support for discrete range set operations. Automatically used
by :class:`~spans.settypes.RangeSet` when :class:`~spans.types.Range` type
inherits :class:`~spans.types.DiscreteRange`.
.. versionchanged:: 0.5.0
Changed name from ``discreterangeset`` to ``DiscreteRangeSetMixin``
"""
__slots__ = ()
def values(self):
"""
Returns an iterator over each value in this range set.
>>> list(intrangeset([intrange(1, 5), intrange(10, 15)]).values())
[1, 2, 3, 4, 10, 11, 12, 13, 14]
"""
return chain(*self)
@MetaRangeSet.register(OffsetableRangeMixin)
class OffsetableRangeSetMixin(object):
"""
Mixin that adds support for offsetable range set operations. Automatically
used by :class:`~spans.settypes.RangeSet` when range type inherits
:class:`~spans.settypes.OffsetableRangeMixin`.
.. versionchanged:: 0.5.0
Changed name from ``offsetablerangeset`` to ``OffsetableRangeSetMixin``
"""
__slots__ = ()
def offset(self, offset):
"""
Shift the range set to the left or right with the given offset
>>> intrangeset([intrange(0, 5), intrange(10, 15)]).offset(5)
intrangeset([intrange([5,10)), intrange([15,20))])
>>> intrangeset([intrange(5, 10), intrange(15, 20)]).offset(-5)
intrangeset([intrange([0,5)), intrange([10,15))])
This function returns an offset copy of the original set, i.e. updating
is not done in place.
"""
return self.__class__(r.offset(offset) for r in self)
@add_metaclass(MetaRangeSet)
class RangeSet(PartialOrderingMixin):
"""
A range set works a lot like a range with some differences:
- All range sets supports ``len()``. Cardinality for a range set means the
number of distinct ranges required to represent this set. See
:meth:`~spans.settypes.RangeSet.__len__`.
- All range sets are iterable. The iterator returns a range for each
iteration. See :meth:`~spans.settypes.RangeSet.__iter__` for more details.
- All range sets are invertible using the ``~`` operator. The result is a
new range set that does not intersect the original range set at all.
>>> ~intrangeset([intrange(1, 5)])
intrangeset([intrange((,1)), intrange([5,))])
- Contrary to ranges. A range set may be split into multiple ranges when
performing set operations such as union, difference or intersection.
.. tip::
The ``RangeSet`` constructor supports any iterable sequence as argument.
:param ranges: A sequence of ranges to add to this set.
:raises TypeError: If any of the given ranges are of incorrect type.
.. versionchanged:: 0.5.0
Changed name from ``rangeset`` to ``RangeSet``
"""
__slots__ = ("_list",)
# Support pickling using the default ancient pickling protocol for Python 2.7
def __nonzero__(self):
"""
Returns False if the only thing in this set is the empty set, otherwise
it returns True.
>>> bool(intrangeset([]))
False
>>> bool(intrangeset([intrange(1, 5)]))
True
"""
return bool(self._list)
def __iter__(self):
"""
Returns an iterator over all ranges within this set. Note that this
iterates over the normalized version of the range set:
>>> list(intrangeset(
... [intrange(1, 5), intrange(5, 10), intrange(15, 20)]))
[intrange([1,10)), intrange([15,20))]
If the set is empty an empty iterator is returned.
>>> list(intrangeset([]))
[]
.. versionchanged:: 0.3.0
This method used to return an empty range when the RangeSet was
empty.
"""
return iter(self._list)
def __len__(self):
"""
Returns the cardinality of the set which is 0 for the empty set or else
the number of ranges used to represent this range set.
>>> len(intrangeset([]))
0
>>> len(intrangeset([intrange(1,5)]))
1
>>> len(intrangeset([intrange(1,5),intrange(10,20)]))
2
.. versionadded:: 0.2.0
"""
return len(self._list)
def __invert__(self):
"""
Returns an inverted version of this set. The inverted set contains no
values this contains.
>>> ~intrangeset([intrange(1, 5)])
intrangeset([intrange((,1)), intrange([5,))])
"""
return self.__class__([self.type()]).difference(self)
@classmethod
@classmethod
@classmethod
def copy(self):
"""
Makes a copy of this set. This copy is not deep since ranges are
immutable.
>>> rs = intrangeset([intrange(1, 5)])
>>> rs_copy = rs.copy()
>>> rs == rs_copy
True
>>> rs is rs_copy
False
:return: A new range set with the same ranges as this range set.
"""
return self.__class__(self)
def contains(self, item):
"""
Test if this range
Return True if one range within the set contains elem, which may be
either a range of the same type or a scalar of the same type as the
ranges within the set.
>>> intrangeset([intrange(1, 5)]).contains(3)
True
>>> intrangeset([intrange(1, 5), intrange(10, 20)]).contains(7)
False
>>> intrangeset([intrange(1, 5)]).contains(intrange(2, 3))
True
>>> intrangeset(
... [intrange(1, 5), intrange(8, 9)]).contains(intrange(4, 6))
False
Contains can also be called using the ``in`` operator.
>>> 3 in intrangeset([intrange(1, 5)])
True
This operation is `O(n)` where `n` is the number of ranges within this
range set.
:param item: Range or scalar to test for.
:return: True if element is contained within this set.
.. versionadded:: 0.2.0
"""
# Verify the type here since contains does not validate the type unless
# there are items in self._list
if not self.is_valid_range(item) and not self.is_valid_scalar(item):
msg = "Unsupported item type provided '{}'"
raise ValueError(msg.format(item.__class__.__name__))
# All range sets contain the empty range
if not item:
return True
return any(r.contains(item) for r in self._list)
def add(self, item):
"""
Adds a range to the set.
>>> rs = intrangeset([])
>>> rs.add(intrange(1, 10))
>>> rs
intrangeset([intrange([1,10))])
>>> rs.add(intrange(5, 15))
>>> rs
intrangeset([intrange([1,15))])
>>> rs.add(intrange(20, 30))
>>> rs
intrangeset([intrange([1,15)), intrange([20,30))])
This operation updates the set in place.
:param item: Range to add to this set.
:raises TypeError: If any of the given ranges are of incorrect type.
"""
self._test_range_type(item)
# If item is empty, do not add it
if not item:
return
i = 0
buffer = []
while i < len(self._list):
r = self._list[i]
if r.overlap(item) or r.adjacent(item):
buffer.append(self._list.pop(i))
continue
elif item.left_of(r):
# If there are buffered items we must break here for the buffer
# to be inserted
if not buffer:
self._list.insert(i, item)
break
i += 1
else:
# The list was exausted and the range should be appended unless there
# are ranges in the buffer
if not buffer:
self._list.append(item)
# Process the buffer
if buffer:
# Unify the buffer
for r in buffer:
item = item.union(r)
self.add(item)
def remove(self, item):
"""
Remove a range from the set. This operation updates the set in place.
>>> rs = intrangeset([intrange(1, 15)])
>>> rs.remove(intrange(5, 10))
>>> rs
intrangeset([intrange([1,5)), intrange([10,15))])
:param item: Range to remove from this set.
"""
self._test_range_type(item)
# If the list currently only have an empty range do nothing since an
# empty RangeSet can't be removed from anyway.
if not self:
return
i = 0
while i < len(self._list):
r = self._list[i]
if item.left_of(r):
break
elif item.overlap(r):
try:
self._list[i] = r.difference(item)
# If the element becomes empty remove it entirely
if not self._list[i]:
del self._list[i]
continue
except ValueError:
# The range was within the range, causing it to be split so
# we do this split manually
del self._list[i]
self._list.insert(
i, r.replace(lower=item.upper, lower_inc=not item.upper_inc))
self._list.insert(
i, r.replace(upper=item.lower, upper_inc=not item.lower_inc))
# When this happens we know we are done
break
i += 1
def span(self):
"""
Return a range that spans from the first point to the last point in this
set. This means the smallest range containing all elements of this set
with no gaps.
>>> intrangeset([intrange(1, 5), intrange(30, 40)]).span()
intrange([1,40))
This method can be used to implement the PostgreSQL function
``range_merge(a, b)``:
>>> a = intrange(1, 5)
>>> b = intrange(10, 15)
>>> intrangeset([a, b]).span()
intrange([1,15))
:return: A new range the contains this entire range set.
"""
# If the set is empty we treat it specially by returning an empty range
if not self:
return self.type.empty()
return self._list[0].replace(
upper=self._list[-1].upper,
upper_inc=self._list[-1].upper_inc)
def union(self, *others):
"""
Returns this set combined with every given set into a super set for each
given set.
>>> intrangeset([intrange(1, 5)]).union(
... intrangeset([intrange(5, 10)]))
intrangeset([intrange([1,10))])
:param other: Range set to merge with.
:return: A new range set that is the union of this and `other`.
"""
# Make a copy of self and add all its ranges to the copy
union = self.copy()
for other in others:
self._test_rangeset_type(other)
for r in other:
union.add(r)
return union
def difference(self, *others):
"""
Returns this set stripped of every subset that are in the other given
sets.
>>> intrangeset([intrange(1, 15)]).difference(
... intrangeset([intrange(5, 10)]))
intrangeset([intrange([1,5)), intrange([10,15))])
:param other: Range set to compute difference against.
:return: A new range set that is the difference between this and `other`.
"""
# Make a copy of self and remove all its ranges from the copy
difference = self.copy()
for other in others:
self._test_rangeset_type(other)
for r in other:
difference.remove(r)
return difference
def intersection(self, *others):
"""
Returns a new set of all subsets that exist in this and every given set.
>>> intrangeset([intrange(1, 15)]).intersection(
... intrangeset([intrange(5, 10)]))
intrangeset([intrange([5,10))])
:param other: Range set to intersect this range set with.
:return: A new range set that is the intersection between this and
`other`.
"""
# Initialize output with a reference to this RangeSet. When
# intersecting against multiple RangeSets at once this will be replaced
# after each iteration.
output = self
for other in others:
self._test_rangeset_type(other)
# Intermediate RangeSet containing intersection for this current
# iteration.
intersection = self.__class__([])
# Intersect every range within the current output with every range
# within the currently processed other RangeSet. All intersecting
# parts are added to the intermediate intersection set.
for a in output:
for b in other:
intersection.add(a.intersection(b))
# If the intermediate intersection RangeSet is still empty, there
# where no intersections with at least one of the arguments and
# we can quit early, since any intersection with the empty set will
# always be empty.
if not intersection:
return intersection
# Update output with intersection for the current iteration.
output = intersection
return output
# ``in`` operator support
__contains__ = contains
# Python 3 support
__bool__ = __nonzero__
class intrangeset(RangeSet):
"""
Range set that operates on :class:`~spans.types.intrange`.
>>> intrangeset([intrange(1, 5), intrange(10, 15)])
intrangeset([intrange([1,5)), intrange([10,15))])
Inherits methods from :class:`~spans.settypes.RangeSet`,
:class:`~spans.settypes.DiscreteRangeset` and
:class:`~spans.settypes.OffsetableRangeMixinset`.
"""
__slots__ = ()
type = intrange
class floatrangeset(RangeSet):
"""
Range set that operates on :class:`~spans.types.floatrange`.
>>> floatrangeset([floatrange(1.0, 5.0), floatrange(10.0, 15.0)])
floatrangeset([floatrange([1.0,5.0)), floatrange([10.0,15.0))])
Inherits methods from :class:`~spans.settypes.RangeSet`,
:class:`~spans.settypes.DiscreteRangeset` and
:class:`~spans.settypes.OffsetableRangeMixinset`.
"""
__slots__ = ()
type = floatrange
class strrangeset(RangeSet):
"""
Range set that operates on .. seealso:: :class:`~spans.types.strrange`.
>>> strrangeset([
... strrange(u"a", u"f", upper_inc=True),
... strrange(u"0", u"9", upper_inc=True)])
strrangeset([strrange([u'0',u':')), strrange([u'a',u'g'))])
Inherits methods from :class:`~spans.settypes.RangeSet` and
:class:`~spans.settypes.DiscreteRangeset`.
"""
__slots__ = ()
type = strrange
class daterangeset(RangeSet):
"""
Range set that operates on :class:`~spans.types.daterange`.
>>> month = daterange(date(2000, 1, 1), date(2000, 2, 1))
>>> daterangeset([month, month.offset(timedelta(366))]) # doctest: +NORMALIZE_WHITESPACE
daterangeset([daterange([datetime.date(2000, 1, 1),datetime.date(2000, 2, 1))),
daterange([datetime.date(2001, 1, 1),datetime.date(2001, 2, 1)))])
Inherits methods from :class:`~spans.settypes.RangeSet`,
:class:`~spans.settypes.DiscreteRangeset` and
:class:`~spans.settypes.OffsetableRangeMixinset`.
"""
__slots__ = ()
type = daterange
class datetimerangeset(RangeSet):
"""
Range set that operates on :class:`~spans.types.datetimerange`.
>>> month = datetimerange(datetime(2000, 1, 1), datetime(2000, 2, 1))
>>> datetimerangeset([month, month.offset(timedelta(366))]) # doctest: +NORMALIZE_WHITESPACE
datetimerangeset([datetimerange([datetime.datetime(2000, 1, 1, 0, 0),datetime.datetime(2000, 2, 1, 0, 0))),
datetimerange([datetime.datetime(2001, 1, 1, 0, 0),datetime.datetime(2001, 2, 1, 0, 0)))])
Inherits methods from :class:`~spans.settypes.RangeSet` and
:class:`~spans.settypes.OffsetableRangeMixinset`.
"""
__slots__ = ()
type = datetimerange
@fix_timedelta_repr
class timedeltarangeset(RangeSet):
"""
Range set that operates on :class:`~spans.types.timedeltarange`.
>>> week = timedeltarange(timedelta(0), timedelta(7))
>>> timedeltarangeset([week, week.offset(timedelta(7))])
timedeltarangeset([timedeltarange([datetime.timedelta(0),datetime.timedelta(14)))])
Inherits methods from :class:`~spans.settypes.RangeSet` and
:class:`~spans.settypes.OffsetableRangeMixinset`.
"""
__slots__ = ()
type = timedeltarange
# Legacy names
#: This alias exist for legacy reasons. It is considered deprecated but will not
#: likely be removed.
#:
#: .. versionadded:: 0.5.0
metarangeset = MetaRangeSet
#: This alias exist for legacy reasons. It is considered deprecated but will not
#: likely be removed.
#:
#: .. versionadded:: 0.5.0
rangeset = RangeSet
| 31.022825 | 115 | 0.585644 | from itertools import chain
from ._compat import add_metaclass, fix_timedelta_repr
from ._utils import PartialOrderingMixin
from .types import Range
from .types import *
from .types import DiscreteRange, OffsetableRangeMixin
# Imports needed for doctests in date range sets
from datetime import *
__all__ = [
"intrangeset",
"floatrangeset",
"strrangeset",
"daterangeset",
"datetimerangeset",
"timedeltarangeset",
]
class MetaRangeSet(type):
"""
A meta class for RangeSets. The purpose is to automatically add relevant
mixins to the range set class based on what mixins and base classes the
range class has.
All subclasses of :class:`~spans.settypes.RangeSet` uses this class as its
metaclass
.. versionchanged:: 0.5.0
Changed name from ``metarangeset`` to ``MetaRangeSet``
"""
mixin_map = {}
def __new__(cls, name, bases, attrs):
parents = list(bases)
if "type" in attrs:
for rangemixin, RangeSetmixin in cls.mixin_map.items():
if issubclass(attrs["type"], rangemixin):
parents.append(RangeSetmixin)
return super(MetaRangeSet, cls).__new__(cls, name, tuple(parents), attrs)
@classmethod
def add(cls, range_mixin, range_set_mixin):
"""
Register a range set mixin for a range mixin.
:param range_mixin: Range mixin class
:param range_set_mixin: Range set mixin class
"""
cls.mixin_map[range_mixin] = range_set_mixin
@classmethod
def register(cls, range_mixin):
"""
Decorator for registering range set mixins for global use. This works
the same as :meth:`~spans.settypes.MetaRangeSet.add`
:param range_mixin: A :class:`~spans.types.Range` mixin class to
to register a decorated range set mixin class for
:return: A decorator to use on a range set mixin class
"""
def decorator(range_set_mixin):
cls.add(range_mixin, range_set_mixin)
return range_set_mixin
return decorator
@MetaRangeSet.register(DiscreteRange)
class DiscreteRangeSetMixin(object):
"""
Mixin that adds support for discrete range set operations. Automatically used
by :class:`~spans.settypes.RangeSet` when :class:`~spans.types.Range` type
inherits :class:`~spans.types.DiscreteRange`.
.. versionchanged:: 0.5.0
Changed name from ``discreterangeset`` to ``DiscreteRangeSetMixin``
"""
__slots__ = ()
def values(self):
"""
Returns an iterator over each value in this range set.
>>> list(intrangeset([intrange(1, 5), intrange(10, 15)]).values())
[1, 2, 3, 4, 10, 11, 12, 13, 14]
"""
return chain(*self)
@MetaRangeSet.register(OffsetableRangeMixin)
class OffsetableRangeSetMixin(object):
"""
Mixin that adds support for offsetable range set operations. Automatically
used by :class:`~spans.settypes.RangeSet` when range type inherits
:class:`~spans.settypes.OffsetableRangeMixin`.
.. versionchanged:: 0.5.0
Changed name from ``offsetablerangeset`` to ``OffsetableRangeSetMixin``
"""
__slots__ = ()
def offset(self, offset):
"""
Shift the range set to the left or right with the given offset
>>> intrangeset([intrange(0, 5), intrange(10, 15)]).offset(5)
intrangeset([intrange([5,10)), intrange([15,20))])
>>> intrangeset([intrange(5, 10), intrange(15, 20)]).offset(-5)
intrangeset([intrange([0,5)), intrange([10,15))])
This function returns an offset copy of the original set, i.e. updating
is not done in place.
"""
return self.__class__(r.offset(offset) for r in self)
@add_metaclass(MetaRangeSet)
class RangeSet(PartialOrderingMixin):
"""
A range set works a lot like a range with some differences:
- All range sets supports ``len()``. Cardinality for a range set means the
number of distinct ranges required to represent this set. See
:meth:`~spans.settypes.RangeSet.__len__`.
- All range sets are iterable. The iterator returns a range for each
iteration. See :meth:`~spans.settypes.RangeSet.__iter__` for more details.
- All range sets are invertible using the ``~`` operator. The result is a
new range set that does not intersect the original range set at all.
>>> ~intrangeset([intrange(1, 5)])
intrangeset([intrange((,1)), intrange([5,))])
- Contrary to ranges. A range set may be split into multiple ranges when
performing set operations such as union, difference or intersection.
.. tip::
The ``RangeSet`` constructor supports any iterable sequence as argument.
:param ranges: A sequence of ranges to add to this set.
:raises TypeError: If any of the given ranges are of incorrect type.
.. versionchanged:: 0.5.0
Changed name from ``rangeset`` to ``RangeSet``
"""
__slots__ = ("_list",)
def __init__(self, ranges):
self._list = []
for r in ranges:
self.add(r)
def __repr__(self):
return "{instance.__class__.__name__}({list!r})".format(
instance=self,
list=self._list)
# Support pickling using the default ancient pickling protocol for Python 2.7
def __getstate__(self):
return self._list
def __setstate__(self, state):
self._list = state
def __nonzero__(self):
"""
Returns False if the only thing in this set is the empty set, otherwise
it returns True.
>>> bool(intrangeset([]))
False
>>> bool(intrangeset([intrange(1, 5)]))
True
"""
return bool(self._list)
def __iter__(self):
"""
Returns an iterator over all ranges within this set. Note that this
iterates over the normalized version of the range set:
>>> list(intrangeset(
... [intrange(1, 5), intrange(5, 10), intrange(15, 20)]))
[intrange([1,10)), intrange([15,20))]
If the set is empty an empty iterator is returned.
>>> list(intrangeset([]))
[]
.. versionchanged:: 0.3.0
This method used to return an empty range when the RangeSet was
empty.
"""
return iter(self._list)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._list == other._list
def __lt__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._list < other._list
def __len__(self):
"""
Returns the cardinality of the set which is 0 for the empty set or else
the number of ranges used to represent this range set.
>>> len(intrangeset([]))
0
>>> len(intrangeset([intrange(1,5)]))
1
>>> len(intrangeset([intrange(1,5),intrange(10,20)]))
2
.. versionadded:: 0.2.0
"""
return len(self._list)
def __invert__(self):
"""
Returns an inverted version of this set. The inverted set contains no
values this contains.
>>> ~intrangeset([intrange(1, 5)])
intrangeset([intrange((,1)), intrange([5,))])
"""
return self.__class__([self.type()]).difference(self)
@classmethod
def is_valid_rangeset(cls, obj):
return isinstance(obj, cls)
@classmethod
def is_valid_range(cls, obj):
return cls.type.is_valid_range(obj)
@classmethod
def is_valid_scalar(cls, obj):
return cls.type.is_valid_scalar(obj)
def _test_rangeset_type(self, item):
if not self.is_valid_rangeset(item):
raise TypeError((
"Invalid range type '{range_type.__name__}' expected "
"'{expected_type.__name__}'").format(
expected_type=self.type,
range_type=item.__class__))
def _test_range_type(self, item):
if not self.is_valid_range(item):
raise TypeError((
"Invalid range type '{range_type.__name__}' expected "
"'{expected_type.__name__}'").format(
expected_type=self.type,
range_type=item.__class__))
def copy(self):
"""
Makes a copy of this set. This copy is not deep since ranges are
immutable.
>>> rs = intrangeset([intrange(1, 5)])
>>> rs_copy = rs.copy()
>>> rs == rs_copy
True
>>> rs is rs_copy
False
:return: A new range set with the same ranges as this range set.
"""
return self.__class__(self)
def contains(self, item):
"""
Test if this range
Return True if one range within the set contains elem, which may be
either a range of the same type or a scalar of the same type as the
ranges within the set.
>>> intrangeset([intrange(1, 5)]).contains(3)
True
>>> intrangeset([intrange(1, 5), intrange(10, 20)]).contains(7)
False
>>> intrangeset([intrange(1, 5)]).contains(intrange(2, 3))
True
>>> intrangeset(
... [intrange(1, 5), intrange(8, 9)]).contains(intrange(4, 6))
False
Contains can also be called using the ``in`` operator.
>>> 3 in intrangeset([intrange(1, 5)])
True
This operation is `O(n)` where `n` is the number of ranges within this
range set.
:param item: Range or scalar to test for.
:return: True if element is contained within this set.
.. versionadded:: 0.2.0
"""
# Verify the type here since contains does not validate the type unless
# there are items in self._list
if not self.is_valid_range(item) and not self.is_valid_scalar(item):
msg = "Unsupported item type provided '{}'"
raise ValueError(msg.format(item.__class__.__name__))
# All range sets contain the empty range
if not item:
return True
return any(r.contains(item) for r in self._list)
def add(self, item):
"""
Adds a range to the set.
>>> rs = intrangeset([])
>>> rs.add(intrange(1, 10))
>>> rs
intrangeset([intrange([1,10))])
>>> rs.add(intrange(5, 15))
>>> rs
intrangeset([intrange([1,15))])
>>> rs.add(intrange(20, 30))
>>> rs
intrangeset([intrange([1,15)), intrange([20,30))])
This operation updates the set in place.
:param item: Range to add to this set.
:raises TypeError: If any of the given ranges are of incorrect type.
"""
self._test_range_type(item)
# If item is empty, do not add it
if not item:
return
i = 0
buffer = []
while i < len(self._list):
r = self._list[i]
if r.overlap(item) or r.adjacent(item):
buffer.append(self._list.pop(i))
continue
elif item.left_of(r):
# If there are buffered items we must break here for the buffer
# to be inserted
if not buffer:
self._list.insert(i, item)
break
i += 1
else:
# The list was exausted and the range should be appended unless there
# are ranges in the buffer
if not buffer:
self._list.append(item)
# Process the buffer
if buffer:
# Unify the buffer
for r in buffer:
item = item.union(r)
self.add(item)
def remove(self, item):
"""
Remove a range from the set. This operation updates the set in place.
>>> rs = intrangeset([intrange(1, 15)])
>>> rs.remove(intrange(5, 10))
>>> rs
intrangeset([intrange([1,5)), intrange([10,15))])
:param item: Range to remove from this set.
"""
self._test_range_type(item)
# If the list currently only have an empty range do nothing since an
# empty RangeSet can't be removed from anyway.
if not self:
return
i = 0
while i < len(self._list):
r = self._list[i]
if item.left_of(r):
break
elif item.overlap(r):
try:
self._list[i] = r.difference(item)
# If the element becomes empty remove it entirely
if not self._list[i]:
del self._list[i]
continue
except ValueError:
# The range was within the range, causing it to be split so
# we do this split manually
del self._list[i]
self._list.insert(
i, r.replace(lower=item.upper, lower_inc=not item.upper_inc))
self._list.insert(
i, r.replace(upper=item.lower, upper_inc=not item.lower_inc))
# When this happens we know we are done
break
i += 1
def span(self):
"""
Return a range that spans from the first point to the last point in this
set. This means the smallest range containing all elements of this set
with no gaps.
>>> intrangeset([intrange(1, 5), intrange(30, 40)]).span()
intrange([1,40))
This method can be used to implement the PostgreSQL function
``range_merge(a, b)``:
>>> a = intrange(1, 5)
>>> b = intrange(10, 15)
>>> intrangeset([a, b]).span()
intrange([1,15))
:return: A new range the contains this entire range set.
"""
# If the set is empty we treat it specially by returning an empty range
if not self:
return self.type.empty()
return self._list[0].replace(
upper=self._list[-1].upper,
upper_inc=self._list[-1].upper_inc)
def union(self, *others):
"""
Returns this set combined with every given set into a super set for each
given set.
>>> intrangeset([intrange(1, 5)]).union(
... intrangeset([intrange(5, 10)]))
intrangeset([intrange([1,10))])
:param other: Range set to merge with.
:return: A new range set that is the union of this and `other`.
"""
# Make a copy of self and add all its ranges to the copy
union = self.copy()
for other in others:
self._test_rangeset_type(other)
for r in other:
union.add(r)
return union
def difference(self, *others):
"""
Returns this set stripped of every subset that are in the other given
sets.
>>> intrangeset([intrange(1, 15)]).difference(
... intrangeset([intrange(5, 10)]))
intrangeset([intrange([1,5)), intrange([10,15))])
:param other: Range set to compute difference against.
:return: A new range set that is the difference between this and `other`.
"""
# Make a copy of self and remove all its ranges from the copy
difference = self.copy()
for other in others:
self._test_rangeset_type(other)
for r in other:
difference.remove(r)
return difference
def intersection(self, *others):
"""
Returns a new set of all subsets that exist in this and every given set.
>>> intrangeset([intrange(1, 15)]).intersection(
... intrangeset([intrange(5, 10)]))
intrangeset([intrange([5,10))])
:param other: Range set to intersect this range set with.
:return: A new range set that is the intersection between this and
`other`.
"""
# Initialize output with a reference to this RangeSet. When
# intersecting against multiple RangeSets at once this will be replaced
# after each iteration.
output = self
for other in others:
self._test_rangeset_type(other)
# Intermediate RangeSet containing intersection for this current
# iteration.
intersection = self.__class__([])
# Intersect every range within the current output with every range
# within the currently processed other RangeSet. All intersecting
# parts are added to the intermediate intersection set.
for a in output:
for b in other:
intersection.add(a.intersection(b))
# If the intermediate intersection RangeSet is still empty, there
# where no intersections with at least one of the arguments and
# we can quit early, since any intersection with the empty set will
# always be empty.
if not intersection:
return intersection
# Update output with intersection for the current iteration.
output = intersection
return output
def __or__(self, other):
try:
return self.union(other)
except TypeError:
return NotImplemented
def __and__(self, other):
try:
return self.intersection(other)
except TypeError:
return NotImplemented
def __sub__(self, other):
try:
return self.difference(other)
except TypeError:
return NotImplemented
# ``in`` operator support
__contains__ = contains
# Python 3 support
__bool__ = __nonzero__
class intrangeset(RangeSet):
"""
Range set that operates on :class:`~spans.types.intrange`.
>>> intrangeset([intrange(1, 5), intrange(10, 15)])
intrangeset([intrange([1,5)), intrange([10,15))])
Inherits methods from :class:`~spans.settypes.RangeSet`,
:class:`~spans.settypes.DiscreteRangeset` and
:class:`~spans.settypes.OffsetableRangeMixinset`.
"""
__slots__ = ()
type = intrange
class floatrangeset(RangeSet):
"""
Range set that operates on :class:`~spans.types.floatrange`.
>>> floatrangeset([floatrange(1.0, 5.0), floatrange(10.0, 15.0)])
floatrangeset([floatrange([1.0,5.0)), floatrange([10.0,15.0))])
Inherits methods from :class:`~spans.settypes.RangeSet`,
:class:`~spans.settypes.DiscreteRangeset` and
:class:`~spans.settypes.OffsetableRangeMixinset`.
"""
__slots__ = ()
type = floatrange
class strrangeset(RangeSet):
"""
Range set that operates on .. seealso:: :class:`~spans.types.strrange`.
>>> strrangeset([
... strrange(u"a", u"f", upper_inc=True),
... strrange(u"0", u"9", upper_inc=True)])
strrangeset([strrange([u'0',u':')), strrange([u'a',u'g'))])
Inherits methods from :class:`~spans.settypes.RangeSet` and
:class:`~spans.settypes.DiscreteRangeset`.
"""
__slots__ = ()
type = strrange
class daterangeset(RangeSet):
"""
Range set that operates on :class:`~spans.types.daterange`.
>>> month = daterange(date(2000, 1, 1), date(2000, 2, 1))
>>> daterangeset([month, month.offset(timedelta(366))]) # doctest: +NORMALIZE_WHITESPACE
daterangeset([daterange([datetime.date(2000, 1, 1),datetime.date(2000, 2, 1))),
daterange([datetime.date(2001, 1, 1),datetime.date(2001, 2, 1)))])
Inherits methods from :class:`~spans.settypes.RangeSet`,
:class:`~spans.settypes.DiscreteRangeset` and
:class:`~spans.settypes.OffsetableRangeMixinset`.
"""
__slots__ = ()
type = daterange
class datetimerangeset(RangeSet):
"""
Range set that operates on :class:`~spans.types.datetimerange`.
>>> month = datetimerange(datetime(2000, 1, 1), datetime(2000, 2, 1))
>>> datetimerangeset([month, month.offset(timedelta(366))]) # doctest: +NORMALIZE_WHITESPACE
datetimerangeset([datetimerange([datetime.datetime(2000, 1, 1, 0, 0),datetime.datetime(2000, 2, 1, 0, 0))),
datetimerange([datetime.datetime(2001, 1, 1, 0, 0),datetime.datetime(2001, 2, 1, 0, 0)))])
Inherits methods from :class:`~spans.settypes.RangeSet` and
:class:`~spans.settypes.OffsetableRangeMixinset`.
"""
__slots__ = ()
type = datetimerange
@fix_timedelta_repr
class timedeltarangeset(RangeSet):
"""
Range set that operates on :class:`~spans.types.timedeltarange`.
>>> week = timedeltarange(timedelta(0), timedelta(7))
>>> timedeltarangeset([week, week.offset(timedelta(7))])
timedeltarangeset([timedeltarange([datetime.timedelta(0),datetime.timedelta(14)))])
Inherits methods from :class:`~spans.settypes.RangeSet` and
:class:`~spans.settypes.OffsetableRangeMixinset`.
"""
__slots__ = ()
type = timedeltarange
# Legacy names
#: This alias exist for legacy reasons. It is considered deprecated but will not
#: likely be removed.
#:
#: .. versionadded:: 0.5.0
metarangeset = MetaRangeSet
#: This alias exist for legacy reasons. It is considered deprecated but will not
#: likely be removed.
#:
#: .. versionadded:: 0.5.0
rangeset = RangeSet
| 2,063 | 0 | 432 |
00b339c8fe21cab0883a4fbca8f802dcb584e12a | 2,767 | py | Python | ui/etc/page2.py | mizcos/SS-3 | 3d023df6f9c3915b78638044d878775677a284ab | [
"MIT"
] | 1 | 2021-09-20T11:54:57.000Z | 2021-09-20T11:54:57.000Z | ui/etc/page2.py | mizcos/SS-3 | 3d023df6f9c3915b78638044d878775677a284ab | [
"MIT"
] | null | null | null | ui/etc/page2.py | mizcos/SS-3 | 3d023df6f9c3915b78638044d878775677a284ab | [
"MIT"
] | 1 | 2021-04-13T12:16:21.000Z | 2021-04-13T12:16:21.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'page2.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
| 45.360656 | 102 | 0.713408 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'page2.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(480, 320)
self.pushButton_return = QtWidgets.QPushButton(Form)
self.pushButton_return.setGeometry(QtCore.QRect(10, 10, 121, 71))
self.pushButton_return.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("image/capsule_return.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_return.setIcon(icon)
self.pushButton_return.setIconSize(QtCore.QSize(100, 100))
self.pushButton_return.setFlat(True)
self.pushButton_return.setObjectName("pushButton_return")
self.pushButton_setadress = QtWidgets.QPushButton(Form)
self.pushButton_setadress.setGeometry(QtCore.QRect(150, 10, 311, 81))
font = QtGui.QFont()
font.setPointSize(30)
self.pushButton_setadress.setFont(font)
self.pushButton_setadress.setObjectName("pushButton_setadress")
self.pushButton_set_mealtime = QtWidgets.QPushButton(Form)
self.pushButton_set_mealtime.setGeometry(QtCore.QRect(20, 100, 261, 201))
font = QtGui.QFont()
font.setPointSize(27)
self.pushButton_set_mealtime.setFont(font)
self.pushButton_set_mealtime.setObjectName("pushButton_set_mealtime")
self.pushButton_set_volume = QtWidgets.QPushButton(Form)
self.pushButton_set_volume.setGeometry(QtCore.QRect(310, 100, 151, 201))
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
font.setPointSize(30)
font.setKerning(True)
self.pushButton_set_volume.setFont(font)
self.pushButton_set_volume.setObjectName("pushButton_set_volume")
self.retranslateUi(Form)
self.pushButton_setadress.pressed.connect(Form.set_adress)
self.pushButton_return.pressed.connect(Form.return_top)
self.pushButton_set_mealtime.pressed.connect(Form.set_mealtime)
self.pushButton_set_volume.pressed.connect(Form.set_volume)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.pushButton_setadress.setText(_translate("Form", "連絡先設定"))
self.pushButton_set_mealtime.setText(_translate("Form", "お食事時間設定"))
self.pushButton_set_volume.setText(_translate("Form", "音量設定"))
| 2,390 | 1 | 76 |
08453069f642cd08cf76bbd5d6e98308dedbfc19 | 1,513 | py | Python | examples/example07.py | robertsj/pypgapack | c24b4a58f347ec02c20929aaaec25010fa603eb8 | [
"MIT"
] | 4 | 2015-12-16T09:44:32.000Z | 2021-05-23T23:52:33.000Z | examples/example07.py | robertsj/pypgapack | c24b4a58f347ec02c20929aaaec25010fa603eb8 | [
"MIT"
] | null | null | null | examples/example07.py | robertsj/pypgapack | c24b4a58f347ec02c20929aaaec25010fa603eb8 | [
"MIT"
] | 1 | 2022-01-01T17:44:21.000Z | 2022-01-01T17:44:21.000Z | """
pypgapack/examples/example07.py -- maxbit with end-of-generation hill climb
"""
from pypgapack import PGA
import sys
class MyPGA(PGA) :
"""
Derive our own class from PGA.
"""
def maxbit(self, p, pop) :
"""
Maximum when all alleles are 1's, and that maximum is n.
"""
val = 0
# Size of the problem
n = self.GetStringLength()
for i in range(0, n) :
# Check whether ith allele in string p is 1
if self.GetBinaryAllele(p, pop, i) :
val = val + 1
# Remember that fitness evaluations must return a float
return float(val)
def climb(self):
"""
Randomly set a bit to 1 in each string
"""
popsize = self.GetPopSize()
n = self.GetStringLength()
for p in range(0, popsize) :
i = self.RandomInterval(0, n - 1)
self.SetBinaryAllele(p, PGA.NEWPOP, i, 1)
# (Command line arguments, 1's and 0's, string length, and maximize it)
opt = MyPGA(sys.argv, PGA.DATATYPE_BINARY, 100, PGA.MAXIMIZE)
opt.SetRandomSeed(1) # Set random seed for verification.
opt.SetMaxGAIterValue(50) # 50 generations (default 1000) for short output.
opt.SetEndOfGen(opt.climb) # Set a hill climbing heuristic
opt.SetUp() # Internal allocations, etc.
opt.Run(opt.maxbit) # Set the objective.
opt.Destroy() # Clean up PGAPack internals
| 36.02381 | 77 | 0.573695 | """
pypgapack/examples/example07.py -- maxbit with end-of-generation hill climb
"""
from pypgapack import PGA
import sys
class MyPGA(PGA) :
"""
Derive our own class from PGA.
"""
def maxbit(self, p, pop) :
"""
Maximum when all alleles are 1's, and that maximum is n.
"""
val = 0
# Size of the problem
n = self.GetStringLength()
for i in range(0, n) :
# Check whether ith allele in string p is 1
if self.GetBinaryAllele(p, pop, i) :
val = val + 1
# Remember that fitness evaluations must return a float
return float(val)
def climb(self):
"""
Randomly set a bit to 1 in each string
"""
popsize = self.GetPopSize()
n = self.GetStringLength()
for p in range(0, popsize) :
i = self.RandomInterval(0, n - 1)
self.SetBinaryAllele(p, PGA.NEWPOP, i, 1)
# (Command line arguments, 1's and 0's, string length, and maximize it)
opt = MyPGA(sys.argv, PGA.DATATYPE_BINARY, 100, PGA.MAXIMIZE)
opt.SetRandomSeed(1) # Set random seed for verification.
opt.SetMaxGAIterValue(50) # 50 generations (default 1000) for short output.
opt.SetEndOfGen(opt.climb) # Set a hill climbing heuristic
opt.SetUp() # Internal allocations, etc.
opt.Run(opt.maxbit) # Set the objective.
opt.Destroy() # Clean up PGAPack internals
| 0 | 0 | 0 |
e813f611e691c058492667f535db043e5ad97375 | 2,028 | py | Python | experiment-mininet-wifi/Cls_topology.py | phoophoo187/Privacy_SDN_Edge_IoT | 3ee6e0fb36c6d86cf8caf4599a35c04b0ade9a8c | [
"MIT"
] | null | null | null | experiment-mininet-wifi/Cls_topology.py | phoophoo187/Privacy_SDN_Edge_IoT | 3ee6e0fb36c6d86cf8caf4599a35c04b0ade9a8c | [
"MIT"
] | null | null | null | experiment-mininet-wifi/Cls_topology.py | phoophoo187/Privacy_SDN_Edge_IoT | 3ee6e0fb36c6d86cf8caf4599a35c04b0ade9a8c | [
"MIT"
] | null | null | null | """
" The Topology class responed for
" - creating a network topology which specified in a JSON format file
" - adding and removing nodes and edges from a network
" - showing a current network topology
"""
import networkx as nx
from networkx.readwrite import json_graph
import json
import matplotlib.pyplot as plt
| 36.214286 | 101 | 0.591223 | """
" The Topology class responed for
" - creating a network topology which specified in a JSON format file
" - adding and removing nodes and edges from a network
" - showing a current network topology
"""
import networkx as nx
from networkx.readwrite import json_graph
import json
import matplotlib.pyplot as plt
class Cls_topology:
def __init__(self):
pass
def create_g_from_json(self,jsonfile):
# Read in a JSON formatted graph file.
# return the networkx Graph
gnl = json.load(open(jsonfile))
return json_graph.node_link_graph(gnl)
def show_network(self,G):
# Show basic node and link info
print("Network nodes: {}".format(G.nodes()))
print("Network links: {}".format(G.edges()))
# Node and Link with extra data properties
print("Network nodes: {}".format(G.nodes(data=True)))
print("Network links: {}".format(G.edges(data=True)))
def draw_graph(self,G):
#This function is to draw a graph with the fixed position
edge_labels = {} # dict for edge label {(src,dst):{key:value,key:value}}
node_pos={} # dict for node posttion {'node':(x,y),'node':(x,y)}
for n, data in G.nodes(data=True):
node_pos[n] = data['x'], data['y']
for u, v, data in G.edges(data=True):
edge_labels[u, v] = {'w':data['weight'],'c':data['cost'],'c1':data['c1'],'c2':data['c2']}
nx.draw_networkx(G,node_pos)
nx.draw_networkx_edge_labels(G,node_pos,edge_labels=edge_labels)
def show_node_position(self,G):
node_pos={}
for n, data in G.nodes(data=True):
node_pos[n] = data['x'], data['y']
print(node_pos)
def show_edge_labels(self,G):
edge_labels={}
for u, v, data in G.edges(data=True):
edge_labels[u, v] = {'w':data['weight'],'c':data['cost'],'c1':data['c1'],'c2':data['c2']}
print(edge_labels) | 1,466 | -2 | 229 |
9f6700e87fa0ea57093fb7809b358292954f90e0 | 3,958 | py | Python | options/options_train_executor.py | aluo-x/shape2prog | 1177e5205b99bb293e353688b564c94a14211c75 | [
"BSD-2-Clause"
] | 109 | 2019-01-10T03:16:21.000Z | 2022-02-10T07:39:22.000Z | options/options_train_executor.py | aluo-x/shape2prog | 1177e5205b99bb293e353688b564c94a14211c75 | [
"BSD-2-Clause"
] | 6 | 2019-06-11T13:30:08.000Z | 2020-11-19T17:42:12.000Z | options/options_train_executor.py | aluo-x/shape2prog | 1177e5205b99bb293e353688b564c94a14211c75 | [
"BSD-2-Clause"
] | 16 | 2019-01-16T08:08:18.000Z | 2021-11-11T02:52:40.000Z | from __future__ import print_function
import os
import argparse
import socket
import torch
from programs.label_config import max_param, stop_id
def get_parser():
"""
a parser for training the program executor
"""
parser = argparse.ArgumentParser(description="arguments for training program executor")
# optimization
parser.add_argument('--learning_rate', type=float, default=1e-3, help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default='20,25', help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.2, help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=0, help='weight decay')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for Adam')
parser.add_argument('--beta2', type=float, default=0.999, help='beta2 for Adam')
parser.add_argument('--grad_clip', type=float, default=0.1, help='threshold for gradient clipping')
parser.add_argument('--epochs', type=int, default=30, help='number of training epochs')
# print and save
parser.add_argument('--info_interval', type=int, default=10, help='freq for printing info')
parser.add_argument('--save_interval', type=int, default=1, help='freq for saving model')
# model parameters
parser.add_argument('--program_size', type=int, default=stop_id-1, help='number of programs')
parser.add_argument('--input_encoding_size', type=int, default=128, help='dim of input encoding')
parser.add_argument('--program_vector_size', type=int, default=128, help='dim of program encoding')
parser.add_argument('--nc', type=int, default=2, help='number of output channels')
parser.add_argument('--rnn_size', type=int, default=128, help='core dim of aggregation LSTM')
parser.add_argument('--num_layers', type=int, default=1, help='number of LSTM layers')
parser.add_argument('--drop_prob_lm', type=float, default=0, help='dropout prob of LSTM')
parser.add_argument('--seq_length', type=int, default=3, help='sequence length')
parser.add_argument('--max_param', type=int, default=max_param-1, help='maximum number of parameters')
# data parameter
parser.add_argument('--batch_size', type=int, default=64, help='batch size of training and validating')
parser.add_argument('--num_workers', type=int, default=8, help='num of threads for data loader')
parser.add_argument('--train_file', type=str, default='./data/train_blocks.h5', help='path to training file')
parser.add_argument('--val_file', type=str, default='./data/val_blocks.h5', help='path to val file')
parser.add_argument('--model_name', type=str, default='program_executor', help='folder name to save model')
# weighted loss
parser.add_argument('--n_weight', type=int, default=1, help='weight for negative voxels')
parser.add_argument('--p_weight', type=int, default=5, help='weight for positive voxels')
# randomization file for validation
parser.add_argument('--rand1', type=str, default='./data/rand1.npy', help='directory to rand file 1')
parser.add_argument('--rand2', type=str, default='./data/rand2.npy', help='directory to rand file 2')
parser.add_argument('--rand3', type=str, default='./data/rand3.npy', help='directory to rand file 3')
return parser
if __name__ == '__main__':
opt = parse()
print('===== arguments: training program executor =====')
for key, val in vars(opt).items():
print("{:20} {}".format(key, val))
| 45.494253 | 113 | 0.704144 | from __future__ import print_function
import os
import argparse
import socket
import torch
from programs.label_config import max_param, stop_id
def get_parser():
"""
a parser for training the program executor
"""
parser = argparse.ArgumentParser(description="arguments for training program executor")
# optimization
parser.add_argument('--learning_rate', type=float, default=1e-3, help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default='20,25', help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.2, help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=0, help='weight decay')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for Adam')
parser.add_argument('--beta2', type=float, default=0.999, help='beta2 for Adam')
parser.add_argument('--grad_clip', type=float, default=0.1, help='threshold for gradient clipping')
parser.add_argument('--epochs', type=int, default=30, help='number of training epochs')
# print and save
parser.add_argument('--info_interval', type=int, default=10, help='freq for printing info')
parser.add_argument('--save_interval', type=int, default=1, help='freq for saving model')
# model parameters
parser.add_argument('--program_size', type=int, default=stop_id-1, help='number of programs')
parser.add_argument('--input_encoding_size', type=int, default=128, help='dim of input encoding')
parser.add_argument('--program_vector_size', type=int, default=128, help='dim of program encoding')
parser.add_argument('--nc', type=int, default=2, help='number of output channels')
parser.add_argument('--rnn_size', type=int, default=128, help='core dim of aggregation LSTM')
parser.add_argument('--num_layers', type=int, default=1, help='number of LSTM layers')
parser.add_argument('--drop_prob_lm', type=float, default=0, help='dropout prob of LSTM')
parser.add_argument('--seq_length', type=int, default=3, help='sequence length')
parser.add_argument('--max_param', type=int, default=max_param-1, help='maximum number of parameters')
# data parameter
parser.add_argument('--batch_size', type=int, default=64, help='batch size of training and validating')
parser.add_argument('--num_workers', type=int, default=8, help='num of threads for data loader')
parser.add_argument('--train_file', type=str, default='./data/train_blocks.h5', help='path to training file')
parser.add_argument('--val_file', type=str, default='./data/val_blocks.h5', help='path to val file')
parser.add_argument('--model_name', type=str, default='program_executor', help='folder name to save model')
# weighted loss
parser.add_argument('--n_weight', type=int, default=1, help='weight for negative voxels')
parser.add_argument('--p_weight', type=int, default=5, help='weight for positive voxels')
# randomization file for validation
parser.add_argument('--rand1', type=str, default='./data/rand1.npy', help='directory to rand file 1')
parser.add_argument('--rand2', type=str, default='./data/rand2.npy', help='directory to rand file 2')
parser.add_argument('--rand3', type=str, default='./data/rand3.npy', help='directory to rand file 3')
return parser
def parse():
parser = get_parser()
opt = parser.parse_args()
iterations = opt.lr_decay_epochs.split(',')
opt.lr_decay_epochs = list([])
for it in iterations:
opt.lr_decay_epochs.append(int(it))
opt.save_folder = os.path.join('./model', 'ckpts_{}'.format(opt.model_name))
opt.is_cuda = torch.cuda.is_available()
opt.num_gpu = torch.cuda.device_count()
return opt
if __name__ == '__main__':
opt = parse()
print('===== arguments: training program executor =====')
for key, val in vars(opt).items():
print("{:20} {}".format(key, val))
| 389 | 0 | 23 |
6877dcd0c8c82bb4b4b41cce88c1868d8a6f7a56 | 339 | py | Python | links/admin.py | n2o/dpb | 9e44ef91dc25782a12150e1001983aeee62bc566 | [
"MIT"
] | 3 | 2020-11-05T10:09:04.000Z | 2021-03-13T11:27:05.000Z | links/admin.py | n2o/dpb | 9e44ef91dc25782a12150e1001983aeee62bc566 | [
"MIT"
] | 31 | 2015-07-26T13:53:26.000Z | 2020-09-28T06:08:03.000Z | links/admin.py | n2o/dpb | 9e44ef91dc25782a12150e1001983aeee62bc566 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Link, LinkCategory
from dpb.admin import PageDownAdmin
admin.site.register(Link, LinkAdmin)
admin.site.register(LinkCategory)
| 22.6 | 60 | 0.740413 | from django.contrib import admin
from .models import Link, LinkCategory
from dpb.admin import PageDownAdmin
class LinkAdmin(PageDownAdmin):
list_display = ('title', 'state', 'category', 'website')
list_filter = ('category',)
search_fields = ['title']
admin.site.register(Link, LinkAdmin)
admin.site.register(LinkCategory)
| 0 | 133 | 23 |
cc8442a5c0c0fcab704e88eeb677b48927c7ac7a | 7,884 | py | Python | KalmanMachine/KDataGenerator.py | marc-h-lambert/L-RVGA | e7cd0c9745c87fb68828f28b1856a9616da933b1 | [
"CC0-1.0"
] | null | null | null | KalmanMachine/KDataGenerator.py | marc-h-lambert/L-RVGA | e7cd0c9745c87fb68828f28b1856a9616da933b1 | [
"CC0-1.0"
] | null | null | null | KalmanMachine/KDataGenerator.py | marc-h-lambert/L-RVGA | e7cd0c9745c87fb68828f28b1856a9616da933b1 | [
"CC0-1.0"
] | null | null | null | ###################################################################################
# THE KALMAN MACHINE LIBRARY #
# Code supported by Marc Lambert #
###################################################################################
# Generate N synthetic noisy observations in dimension d for : #
# - the linear regression problem (with Gaussian inputs and an ouput noise) #
# - the logistic regression problem (with two Gaussian inputs for Y=0 and Y=1) #
# The Gaussian covariance on inputs are parametrized by #
# c, scale, rotate and normalize #
###################################################################################
import numpy.linalg as LA
import numpy as np
from scipy.stats import special_ortho_group
from .KUtils import graphix,sigmoid
import math
from mpl_toolkits.mplot3d import Axes3D
| 37.542857 | 159 | 0.581558 | ###################################################################################
# THE KALMAN MACHINE LIBRARY #
# Code supported by Marc Lambert #
###################################################################################
# Generate N synthetic noisy observations in dimension d for : #
# - the linear regression problem (with Gaussian inputs and an ouput noise) #
# - the logistic regression problem (with two Gaussian inputs for Y=0 and Y=1) #
# The Gaussian covariance on inputs are parametrized by #
# c, scale, rotate and normalize #
###################################################################################
import numpy.linalg as LA
import numpy as np
from scipy.stats import special_ortho_group
from .KUtils import graphix,sigmoid
import math
class observations(object):
def __init__(self, N,d,c,scale,rotate,normalize,seed):
self._N = N # the number of observations
self._d = d # the dimension of inputs
self._c = c # a parameter driving the condition number of covariance of inputs
self._scale = scale # the inputs scale (1 by default)
self._rotate = rotate #true if the covariance of inputs are rotated
self._normalize = normalize #true if the covariance of inputs are rotated
self._seed = seed # the random seed (to reproduce results)
@property
def N(self):
return self._N
@property
def d(self):
return self._d
def covariance(self,normalize=True):
vec=(1/np.arange(1,self._d+1)**self._c)*self._scale**2
if normalize:
vec=vec/LA.norm(vec)**2
Cov_u=np.diag(vec)
if self._d>1 and self._rotate:
np.random.seed(self._seed)
Q = special_ortho_group.rvs(dim=self._d)
Cov_u=np.transpose(Q).dot(Cov_u).dot(Q)
return Cov_u
@property
def datas(self):
pass
@property
def optim(self):
pass
@property
def covInputs(self):
pass
from mpl_toolkits.mplot3d import Axes3D
class LinearRegObservations(observations):
def __init__(self, sigma,N,d,c,seed,scale=1,rotate=True,normalize=False):
super().__init__(N,d,c,scale,rotate,normalize,seed)
self._sigma = sigma # the outputs noise
self._CovInputs=self.covariance(normalize=self._normalize)
self._meanInputs=np.zeros((d,))
# generate the inputs
np.random.seed(seed)
X=np.random.multivariate_normal(self._meanInputs,self._CovInputs,(N))
# if self._normalize:
# Xnorms=LA.norm(X,axis=1)
# X=X/np.mean(Xnorms)
# --> better to normalize before in covariance generation
self._inputs=X
# generate a random optimal of norm 1 (ie outputs are normalized
# if inputs are normalized)
np.random.seed(seed)
theta=np.random.uniform(-1,1,d)
self._thetaOpt=theta/LA.norm(theta)
# generate the outputs
Y=X.dot(self._thetaOpt)
if self._sigma>0: #if sigma <0 --> no noise model
np.random.seed(seed)
B=np.random.normal(0,self._sigma,N).reshape(N,)
Y=Y+B
self._outputs=Y
@property
def datas(self):
return self._outputs,self._inputs
@property
def optim(self):
return self._thetaOpt
@property
def covInputs(self):
return self._CovInputs
def plot(self,ax):
Y=self._outputs
X=self._inputs
N,d=X.shape
# plot cloud point
Y=Y.reshape(N,1)
ax.view_init(90, -90)
ax.set_zticks([])
ax.scatter(X[:,0],X[:,1],Y,c=Y.reshape(N,),cmap='jet',marker='.',s=20)
ax.set_title('outputs value in function of inputs')
class LogisticRegObservations(observations):
def __init__(self, meansShift,N,d,c,seed,scale=1,rotate=True,normalize=False):
super().__init__(N,d,c,scale,rotate,normalize,seed)
self._meansShift = meansShift # the distance between the means
self._CovInputs=self.covariance(normalize=self._normalize)
# we normalize the means
np.random.seed(seed)
mean_dir=np.random.rand(d,)
theta=mean_dir/LA.norm(mean_dir)
self._meanInputs0 = theta*self._meansShift/2
self._meanInputs1 = -theta*self._meansShift/2
invCov=LA.inv(self._CovInputs)
#gamma=0.5*self.__meanInputs0.T.dot(invCov).dot(self.__meanInputs0)-0.5*self.__meanInputs1.T.dot(invCov).dot(self.__meanInputs1)
#print('gamma=(must be 0)',gamma)
self._thetaOpt=invCov.dot(self._meanInputs1-self._meanInputs0)
# generate the inputs
np.random.seed(seed)
X0=np.random.multivariate_normal(self._meanInputs0,self._CovInputs,int(N/2))
#if normalize: (not used, it is equivalent to normalize Cov)
# X0=self.__meanInputs0+(X0-self.__meanInputs0)/LA.norm(np.std(X0,axis=0))
np.random.seed(seed+1)
X1=np.random.multivariate_normal(self._meanInputs1,self._CovInputs,int(N/2))
#if normalize: (not used, it is equivalent to normalize Cov)
# X1=self.__meanInputs1+(X1-self.__meanInputs1)/LA.norm(np.std(X1,axis=0))
X=np.concatenate((X0,X1))
# generate the outputs
Y0=np.ones((int(N/2),1))*0
Y1=np.ones((int(N/2),1))*1
Y=np.concatenate((Y0,Y1))
DataSet=list(zip(Y,X))
np.random.shuffle(DataSet)
Y,X= zip(*DataSet)
self._outputs,self._inputs = np.array(Y),np.array(X)
def plot(self,ax,plotcov=False,plotNormal=False):
Y=self._outputs
X=self._inputs
N,d=X.shape
# plot cloud point
Y=Y.reshape(N,1)
ax.scatter(X[np.where(Y==0)[0],0],X[np.where(Y==0)[0],1])
ax.scatter(X[np.where(Y==1)[0],0],X[np.where(Y==1)[0],1])
# plot ellipsoids
if plotcov:
X0=X[np.where(Y==0)[0]]
Cov=np.cov(X0.T)
graphix.plot_ellipsoid2d(ax,self._meanInputs0[0:2],Cov[0:2,0:2],linestyle='-',linewidth=2,label='Covariance after normalization')
#graphix.plot_ellipsoid2d(ax,RegObs2.meanInputs0[0:2],RegObs2.covInputs[0:2,0:2],linestyle='-.',linewidth=2,label='Covariance used for generation')
X1=X[np.where(Y==1)[0]]
Cov=np.cov(X1.T)
graphix.plot_ellipsoid2d(ax,self._meanInputs1[0:2],Cov[0:2,0:2],linestyle='-',linewidth=2)
#graphix.plot_ellipsoid2d(ax,RegObs2.meanInputs1[0:2],RegObs2.covInputs[0:2,0:2],linestyle='-.',linewidth=2)
if plotNormal:
# plot separator and normal (optimal) --> the norm of the vector show the confidence of classification
x=np.arange(-1/math.sqrt(d),1/math.sqrt(d),0.001)
y=-self._thetaOpt[0]/self._thetaOpt[1]*x
ax.plot(x,y,'b',label='separator',linewidth=2,markeredgewidth=0.1,markeredgecolor='bk')
ax.arrow(0,0,self._thetaOpt[0],self._thetaOpt[1],width=0.1,length_includes_head=True, label='Theta')
def plotOutputs(self, ax):
theta=self._thetaOpt/LA.norm(self._thetaOpt)
MU=sigmoid(self._inputs.dot(theta))
ax.hist(MU,50)
@property
def datas(self):
return self._outputs,self._inputs
@property
def optim(self):
return self._thetaOpt
@property
def covInputs(self):
return self._CovInputs
@property
def meanInputs0(self):
return self._meanInputs0
@property
def meanInputs1(self):
return self._meanInputs1
| 5,906 | 854 | 72 |
63775f782cb8e7de2343442b499ed0f15ca44db8 | 1,890 | py | Python | sysupdate.py | mtibbett67/sysupdate | 28d0e78d74f18184efdc54d2d4ec62dd16b351f0 | [
"MIT"
] | null | null | null | sysupdate.py | mtibbett67/sysupdate | 28d0e78d74f18184efdc54d2d4ec62dd16b351f0 | [
"MIT"
] | null | null | null | sysupdate.py | mtibbett67/sysupdate | 28d0e78d74f18184efdc54d2d4ec62dd16b351f0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
NAME:
sysupdate.py
DESCRIPTION:
Update OS using apt
CREATED:
Tue Mar 17 22:17:50 2015
VERSION:
2
AUTHOR:
Mark Tibbett
AUTHOR_EMAIL:
mtibbett67@gmail.com
URL:
N/A
DOWNLOAD_URL:
N/A
INSTALL_REQUIRES:
[]
PACKAGES:
[]
SCRIPTS:
[]
'''
# Standard library imports
import os
import sys
import subprocess
# Related third party imports
# Local application/library specific imports
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
C = '\033[36m' # cyan
GR = '\033[37m' # gray
# Section formats
SEPARATOR = B + '=' * 80 + W
NL = '\n'
# Clear the terminal
os.system('clear')
# Check for root or sudo. Remove if not needed.
UID = os.getuid()
if UID != 0:
print R + ' [!]' + O + ' ERROR:' + G + ' sysupdate' + O + \
' must be run as ' + R + 'root' + W
# print R + ' [!]' + O + ' login as root (' + W + 'su root' + O + ') \
# or try ' + W + 'sudo ./wifite.py' + W
os.execvp('sudo', ['sudo'] + sys.argv)
else:
print NL
print G + 'You are running this script as ' + R + 'root' + W
print NL + SEPARATOR + NL
def apt(arg1, arg2):
'''Run apt to update system'''
print arg1 + NL
subprocess.call(['apt-get', arg2])
apt(G + 'Retrieving new lists of packages' + W, 'update')
print NL + SEPARATOR + NL
apt(G + 'Performing dist-upgrade' + W, 'dist-upgrade')
print NL + SEPARATOR + NL
apt(G + 'Performing upgrades' + W, 'upgrade')
print NL + SEPARATOR + NL
apt(G + 'Erasing downloaded archive files' + W, 'clean')
print NL + SEPARATOR + NL
apt(G + 'Erasing old downladed archive files' + W, 'autoclean')
print NL + SEPARATOR + NL
apt(G + 'Removing all unused packages' + W, 'autoremove')
print NL + SEPARATOR + NL
| 18.712871 | 73 | 0.591534 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
NAME:
sysupdate.py
DESCRIPTION:
Update OS using apt
CREATED:
Tue Mar 17 22:17:50 2015
VERSION:
2
AUTHOR:
Mark Tibbett
AUTHOR_EMAIL:
mtibbett67@gmail.com
URL:
N/A
DOWNLOAD_URL:
N/A
INSTALL_REQUIRES:
[]
PACKAGES:
[]
SCRIPTS:
[]
'''
# Standard library imports
import os
import sys
import subprocess
# Related third party imports
# Local application/library specific imports
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
C = '\033[36m' # cyan
GR = '\033[37m' # gray
# Section formats
SEPARATOR = B + '=' * 80 + W
NL = '\n'
# Clear the terminal
os.system('clear')
# Check for root or sudo. Remove if not needed.
UID = os.getuid()
if UID != 0:
print R + ' [!]' + O + ' ERROR:' + G + ' sysupdate' + O + \
' must be run as ' + R + 'root' + W
# print R + ' [!]' + O + ' login as root (' + W + 'su root' + O + ') \
# or try ' + W + 'sudo ./wifite.py' + W
os.execvp('sudo', ['sudo'] + sys.argv)
else:
print NL
print G + 'You are running this script as ' + R + 'root' + W
print NL + SEPARATOR + NL
def apt(arg1, arg2):
'''Run apt to update system'''
print arg1 + NL
subprocess.call(['apt-get', arg2])
apt(G + 'Retrieving new lists of packages' + W, 'update')
print NL + SEPARATOR + NL
apt(G + 'Performing dist-upgrade' + W, 'dist-upgrade')
print NL + SEPARATOR + NL
apt(G + 'Performing upgrades' + W, 'upgrade')
print NL + SEPARATOR + NL
apt(G + 'Erasing downloaded archive files' + W, 'clean')
print NL + SEPARATOR + NL
apt(G + 'Erasing old downladed archive files' + W, 'autoclean')
print NL + SEPARATOR + NL
apt(G + 'Removing all unused packages' + W, 'autoremove')
print NL + SEPARATOR + NL
| 0 | 0 | 0 |
134fbfefdb0c396b7402416c2809bf816a232596 | 4,762 | py | Python | practice/hw3/support.py | ixlan/Information-retrieval-I | 98e9cdea328e0c20e6e754ed0849b10f3d43fc3c | [
"MIT"
] | null | null | null | practice/hw3/support.py | ixlan/Information-retrieval-I | 98e9cdea328e0c20e6e754ed0849b10f3d43fc3c | [
"MIT"
] | null | null | null | practice/hw3/support.py | ixlan/Information-retrieval-I | 98e9cdea328e0c20e6e754ed0849b10f3d43fc3c | [
"MIT"
] | 1 | 2018-11-30T10:52:09.000Z | 2018-11-30T10:52:09.000Z | import numpy as np
import glob
import os
from query import load_queries
import copy
import random
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['legend.loc'] = 'best'
from timeit import default_timer as timer
# notice that this is a modified version of NDCG with relative normalization
# we score documents in the collection
# then sort by the score
# and return back the actual relevance list
| 26.752809 | 94 | 0.606468 | import numpy as np
import glob
import os
from query import load_queries
import copy
import random
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['legend.loc'] = 'best'
from timeit import default_timer as timer
# notice that this is a modified version of NDCG with relative normalization
class NDCG():
__cache = {}
__max_rel = None
def __init__(self, max_rel):
self.__max_rel = max_rel
# dcs: documents collections returned by the queries
# assuming that each document is represented by it's relevance!
# e.g. [1,4,3,2] <- collection of 4 documents
# max_c:maximum number of ones in our case
def run(self, dc,max_c):
k = len(dc)
max_c=int(max_c)# making sure that max_c is int
if(max_c==0):
return 0
Z = self.__getNorm(k,max_c)
res = 0
for r in range(k):
res += self.__score(dc[r], r + 1)
return Z * res
# computes ndcg on multiple collections of documents
# returns an array of values
def runOnCol(self, dcs):
n = len(dcs)
res = np.zeros(n)
for i in range(n):
res[i] = self.run(dcs[i])
return res
# k is the length
# c: max number of maximum relevances
def __getNorm(self, k,max_c):
if (k in self.__cache and max_c in self.__cache[k]):
return self.__cache[k][max_c]
Z = 0
for r in range(1, max_c + 1):
Z += self.__score(self.__max_rel, r)
Z = 1 / Z
# storing in cache
self.__cache[k]={max_c:Z}
return Z
def __score(self, rel, rank):
return (math.pow(2, rel) - 1) / math.log(1 + rank, 2)
# we score documents in the collection
# then sort by the score
# and return back the actual relevance list
def getRankedList(model,query,justRel=True):
scores=model.score(query)
labels=query.get_labels()
oIds=range(len(labels))
if(justRel==True):
return [y for (x,y) in sorted(zip(scores,labels),reverse=True)]
else:
ids=[]
rel=[]
for (score,lab,id) in sorted(zip(scores,labels,oIds),reverse=True,key=lambda x: x[0]):
ids.append(id)
rel.append(lab)
return (ids,rel)
def sigmoid(z):
if z > 6:
return 1.0
elif z < -6:
return 0.0
else:
return 1 / (1 + np.exp(-z))
def get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
class CrossFold():
models= None # a list of models
ndcg=None # for evaluation
# models: a list of models
def __init__(self,models):
self.models=models
self.ndcg=NDCG(1)
def run(self,mainFolder,epochs):
folders = get_immediate_subdirectories(mainFolder)
#folders = random.sample(get_immediate_subdirectories(mainFolder),2)
ndcgs=[[] for i in range(len(self.models))]
elapsed=np.zeros(len(self.models)) # for timing
for i,folder in enumerate(folders):
print("fold "+str(i+1))
trainFile=mainFolder+folder+"/train.txt"
train_queries=load_queries(trainFile,64)
testFile=mainFolder+folder+"/test.txt"
testQueries=load_queries(testFile,64)
for i,model in enumerate(self.models):
now=timer()
model.train_with_queries(train_queries,epochs)
elapsed[i]+=timer()-now
# evaluation
ndcgs[i]+=self.__evalaute(testQueries,model)
return ([np.mean(n) for n in ndcgs],elapsed)
# returns a tuple of list of ndcgs
def __evalaute(self,queries,model):
ndcgs=[]
for q in queries:
l=getRankedList(model,q)[:10]
ndcgs.append(self.ndcg.run(l,max_c=np.sum(l)))
return ndcgs
# just to test models
def OneFoldTest(self,folder,model,epochs):
testFile=folder+"/test.txt"
testQueries=load_queries(testFile,64)
trainFile=folder+"/train.txt"
trainQueries=load_queries(trainFile,64)
model.train_with_queries(trainQueries,epochs)
ndcgs=[]
for q in testQueries:
ndcgs.append(self.ndcg.run(getRankedList(model,q)))
return (np.mean(ndcgs))
def plotBars(bars,x_labels,y_label,title):
ind = np.arange(len(bars))
width = 0.2
fig, ax = plt.subplots()
rec1=ax.bar(ind, bars, width, color='r',align='center')
ax.set_xticks(ind)
plt.ylabel(y_label)
ax.set_xticklabels(x_labels)
fig.suptitle(title.lower().replace(" ","_"), fontsize=20)
plt.grid(True)
plt.legend()
plt.savefig(title.lower().replace(" ","_") + '.jpg')
| 3,372 | 812 | 136 |
10da5dd6dbab70fa9e387e5ac878891a91b320b5 | 414 | py | Python | django-react/django_react/todos/models.py | bogdan-veliscu/react-stack-compare | e7d2020ec8177e12d1b1c41bfaed96c2ee87490c | [
"Apache-2.0"
] | null | null | null | django-react/django_react/todos/models.py | bogdan-veliscu/react-stack-compare | e7d2020ec8177e12d1b1c41bfaed96c2ee87490c | [
"Apache-2.0"
] | 7 | 2021-10-06T14:03:21.000Z | 2022-02-27T02:40:18.000Z | django-react/django_react/todos/models.py | bogdan-veliscu/react-stack-compare | e7d2020ec8177e12d1b1c41bfaed96c2ee87490c | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.conf import settings
from django.db import models
# Create your models here.
from django.contrib.auth import get_user_model
User = get_user_model()
| 25.875 | 56 | 0.719807 | from django.db import models
from django.conf import settings
from django.db import models
# Create your models here.
from django.contrib.auth import get_user_model
User = get_user_model()
class Todo(models.Model):
user = models.ForeignKey(User, related_name="todos",
on_delete=models.CASCADE)
title = models.CharField(max_length=100)
completed = models.BooleanField()
| 0 | 199 | 23 |
ce2228693b2b5320ab17bf93618d033e3fc599de | 1,618 | py | Python | vericep/payment/migrations/0002_auto_20200827_1759.py | SefaAkdeniz/Vericep-Graduation-Project-Backend-Django-MySQL | e8393ed9137811e5996b3b30faa4293a9aeb338a | [
"Apache-2.0"
] | null | null | null | vericep/payment/migrations/0002_auto_20200827_1759.py | SefaAkdeniz/Vericep-Graduation-Project-Backend-Django-MySQL | e8393ed9137811e5996b3b30faa4293a9aeb338a | [
"Apache-2.0"
] | 3 | 2021-04-08T19:47:06.000Z | 2021-06-10T20:06:49.000Z | vericep/payment/migrations/0002_auto_20200827_1759.py | SefaAkdeniz/Vericep-Graduation-Project-Backend-Django-MySQL | e8393ed9137811e5996b3b30faa4293a9aeb338a | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0.8 on 2020-08-27 14:59
from django.db import migrations, models
| 39.463415 | 260 | 0.553152 | # Generated by Django 3.0.8 on 2020-08-27 14:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='balance',
options={'verbose_name': 'Bakiyeler', 'verbose_name_plural': 'Bakiyeler'},
),
migrations.AlterModelOptions(
name='creditcard',
options={'verbose_name': 'Kredi Kartları', 'verbose_name_plural': 'Kredi Kartları'},
),
migrations.AlterModelOptions(
name='pastpayments',
options={'verbose_name': 'Geçmiş Ödemeler', 'verbose_name_plural': 'Geçmiş Ödemeler'},
),
migrations.AlterField(
model_name='balance',
name='amaount',
field=models.DecimalField(decimal_places=2, max_digits=11, verbose_name='Bakiye'),
),
migrations.AlterField(
model_name='creditcard',
name='expiration_date_month',
field=models.CharField(choices=[('1', '01'), ('2', '02'), ('3', '03'), ('4', '04'), ('5', '05'), ('6', '06'), ('7', '07'), ('8', '08'), ('9', '09'), ('10', '10'), ('11', '11'), ('12', '12')], max_length=2, verbose_name='Son Geçerlilik Tarih (Ay)'),
),
migrations.AlterField(
model_name='creditcard',
name='expiration_date_year',
field=models.CharField(choices=[('20', '20'), ('21', '21'), ('22', '22'), ('23', '23'), ('24', '24')], max_length=2, verbose_name='Son Geçerlilik Tarih (Yıl)'),
),
]
| 0 | 1,515 | 23 |
9992cd3dda49fe83ec51824d731c774b77b1c5e0 | 4,531 | py | Python | src/SubClassReasoner.py | Weissger/TST.SubClassReasoner | 8fa97ff70136f05594b9eb685df2470ed43b6fd7 | [
"MIT"
] | null | null | null | src/SubClassReasoner.py | Weissger/TST.SubClassReasoner | 8fa97ff70136f05594b9eb685df2470ed43b6fd7 | [
"MIT"
] | null | null | null | src/SubClassReasoner.py | Weissger/TST.SubClassReasoner | 8fa97ff70136f05594b9eb685df2470ed43b6fd7 | [
"MIT"
] | null | null | null | __author__ = 'tmy'
import os
from datetime import datetime
from multiprocessing import Process
from .ProcessManager.ProcessManager import ProcessManager, OccupiedError
from .NTripleLineParser.src.NTripleLineParser import NTripleLineParser
from .SparqlInterface.src import ClientFactory
from .Materializer.Materializer import materialize_to_file, materialize_to_service
from .Utilities.Logger import log
from .Utilities.Utilities import log_progress
import time
| 39.745614 | 121 | 0.556389 | __author__ = 'tmy'
import os
from datetime import datetime
from multiprocessing import Process
from .ProcessManager.ProcessManager import ProcessManager, OccupiedError
from .NTripleLineParser.src.NTripleLineParser import NTripleLineParser
from .SparqlInterface.src import ClientFactory
from .Materializer.Materializer import materialize_to_file, materialize_to_service
from .Utilities.Logger import log
from .Utilities.Utilities import log_progress
import time
class SubClassReasoner(object):
def __init__(self, server, user, password, prop_path, n_processes, log_level):
log.setLevel(log_level)
self.prop_path = prop_path
self.nt_parser = NTripleLineParser(" ")
if n_processes:
self.processManager = ProcessManager(n_processes)
self.__server = ClientFactory.make_client(server=server, user=user, password=password, prop_path=prop_path)
def reason(self, in_file=None, target="./reasoned/", offset=0):
if target == "":
target = None
else:
# Make directory
if not os.path.exists(target):
os.makedirs(target)
cur_time = datetime.now()
if in_file:
log.info("Reasoning from file")
self.__reason_from_file(in_file, target, offset=offset)
else:
log.info("Reasoning from service")
self.__reason_from_service(target, offset=offset)
log.info("Done in: " + str(datetime.now() - cur_time))
def __reason_from_service(self, target, offset=0):
target_file = None
step = 100000
while True:
query = """
SELECT distinct ?type
WHERE {{?type rdfs:subClassOf ?x}}
ORDER BY ?type
LIMIT {}
OFFSET {}
""".format(step, offset)
log.debug("Running query: {}".format(query))
rdf_classes = self.__server.query(query)
log.debug("Number of Query results: {}".format(len(rdf_classes)))
if len(rdf_classes) < 1:
break
log.debug("Step size: {} Offset: {} Starting_type: {}".format(step, offset, rdf_classes[0]["type"]["value"]))
for t in rdf_classes:
offset += 1
log_progress(offset, 100)
t = t["type"]["value"]
if target:
if not target_file:
target_file = target + str(self.__server.server).split("/")[-2] + str("_reasoned.nt")
self.__spawn_daemon(materialize_to_file, dict(rdf_type=t, target=target_file,
server=self.__server))
else:
self.__spawn_daemon(materialize_to_service, dict(rdf_type=t, server=self.__server))
def __reason_from_file(self, f, target, offset=0):
target_file = None
# Iterate through file
with open(f) as input_file:
tmp_type = ""
for line_num, line in enumerate(input_file):
t = self.nt_parser.get_subject(line)
if not t:
offset += 1
continue
if line_num < offset:
continue
log_progress(line_num, 100)
if not t == tmp_type:
if target:
if not target_file:
target_file = target + str(self.__server.server).split("/")[-2] + str("_reasoned.nt")
self.__spawn_daemon(materialize_to_file, dict(rdf_type=t, target=target_file,
server=self.__server))
else:
self.__spawn_daemon(materialize_to_service, dict(rdf_type=t, server=self.__server))
tmp_type = t
def __spawn_daemon(self, target, kwargs):
# Todo Event based?
# Check every 0.1 seconds if we can continue
if hasattr(self, "processManager"):
while not self.processManager.has_free_process_slot():
time.sleep(0.1)
p = Process(target=target, kwargs=kwargs)
p.daemon = True
if hasattr(self, "processManager"):
try:
self.processManager.add(p)
except OccupiedError as e:
log.critical(e)
return 2
else:
p.start()
else:
p.start()
| 3,900 | 10 | 157 |
b847e8056aeb9aad38178a541f8bc5e9d3a88acf | 2,600 | py | Python | pyspider/database/mongodb/resultdb.py | mayk93/pyspider | 87a1f9227ace2e577492e6ec3ecde0e2d8575411 | [
"Apache-2.0"
] | 5 | 2015-03-31T13:25:25.000Z | 2016-03-14T11:17:02.000Z | pyspider/database/mongodb/resultdb.py | mayk93/pyspider | 87a1f9227ace2e577492e6ec3ecde0e2d8575411 | [
"Apache-2.0"
] | null | null | null | pyspider/database/mongodb/resultdb.py | mayk93/pyspider | 87a1f9227ace2e577492e6ec3ecde0e2d8575411 | [
"Apache-2.0"
] | 1 | 2016-02-17T23:12:47.000Z | 2016-02-17T23:12:47.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-10-13 22:18:36
import json
import time
from pymongo import MongoClient
from pyspider.database.base.resultdb import ResultDB as BaseResultDB
from .mongodbbase import SplitTableMixin
| 32.911392 | 96 | 0.615769 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-10-13 22:18:36
import json
import time
from pymongo import MongoClient
from pyspider.database.base.resultdb import ResultDB as BaseResultDB
from .mongodbbase import SplitTableMixin
class ResultDB(SplitTableMixin, BaseResultDB):
collection_prefix = ''
def __init__(self, url, database='resultdb'):
self.conn = MongoClient(url)
self.conn.admin.command("ismaster")
self.database = self.conn[database]
self.projects = set()
self._list_project()
for project in self.projects:
collection_name = self._collection_name(project)
self.database[collection_name].ensure_index('taskid')
def _parse(self, data):
data['_id'] = str(data['_id'])
if 'result' in data:
data['result'] = json.loads(data['result'])
return data
def _stringify(self, data):
if 'result' in data:
data['result'] = json.dumps(data['result'])
return data
def save(self, project, taskid, url, result):
collection_name = self._collection_name(project)
obj = {
'taskid': taskid,
'url': url,
'result': result,
'updatetime': time.time(),
}
return self.database[collection_name].update(
{'taskid': taskid}, {"$set": self._stringify(obj)}, upsert=True
)
def select(self, project, fields=None, offset=0, limit=0):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return
collection_name = self._collection_name(project)
for result in self.database[collection_name].find({}, fields, skip=offset, limit=limit):
yield self._parse(result)
def count(self, project):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return
collection_name = self._collection_name(project)
return self.database[collection_name].count()
def get(self, project, taskid, fields=None):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return
collection_name = self._collection_name(project)
ret = self.database[collection_name].find_one({'taskid': taskid}, fields)
if not ret:
return ret
return self._parse(ret)
| 1,984 | 241 | 23 |
597f27f86a34f76d42a66e98797cbf1e12e589a5 | 932 | py | Python | tests/programs/lists/sequential_tuple.py | astraldawn/pylps | e9964a24bb38657b180d441223b4cdb9e1dadc8a | [
"MIT"
] | 1 | 2018-05-19T18:28:12.000Z | 2018-05-19T18:28:12.000Z | tests/programs/lists/sequential_tuple.py | astraldawn/pylps | e9964a24bb38657b180d441223b4cdb9e1dadc8a | [
"MIT"
] | 12 | 2018-04-26T00:58:11.000Z | 2018-05-13T22:03:39.000Z | tests/programs/lists/sequential_tuple.py | astraldawn/pylps | e9964a24bb38657b180d441223b4cdb9e1dadc8a | [
"MIT"
] | null | null | null | from pylps.core import *
from pylps.lps_data_structures import LPSTuple
initialise(max_time=5)
create_actions('show(_)', 'show_tuple(_, _)')
create_events('handle_list(_)')
create_variables('X', 'Y', 'XS')
reactive_rule(True).then(
handle_list([
('a', 1),
('b', 2),
('c', 3),
('d', 4),
]).frm(T1, T2)
)
goal(handle_list([LPSTuple((X, Y))]).frm(T1, T2)).requires(
show(X).frm(T1, T2),
show(Y).frm(T1, T2)
)
goal(handle_list([LPSTuple((X, Y)) | XS]).frm(T1, T3)).requires(
show_tuple(X, Y).frm(T1, T2),
handle_list(XS).frm(T2, T3)
)
execute(single_clause=False)
show_kb_log()
'''
actions show(_).
if true
then handle_list([a,b,c,d]) from T1 to T2.
handle_list([Single]) from T1 to T2 if show(Single) from T1 to T2.
handle_list([X|Xs]) from T1 to T3 if
show(X) from T1 to T2,
handle_list(Xs) from T2 to T3.
show(a) 1 2
show(b) 2 3
show(c) 3 4
show(d) 4 5
'''
| 18.64 | 66 | 0.613734 | from pylps.core import *
from pylps.lps_data_structures import LPSTuple
initialise(max_time=5)
create_actions('show(_)', 'show_tuple(_, _)')
create_events('handle_list(_)')
create_variables('X', 'Y', 'XS')
reactive_rule(True).then(
handle_list([
('a', 1),
('b', 2),
('c', 3),
('d', 4),
]).frm(T1, T2)
)
goal(handle_list([LPSTuple((X, Y))]).frm(T1, T2)).requires(
show(X).frm(T1, T2),
show(Y).frm(T1, T2)
)
goal(handle_list([LPSTuple((X, Y)) | XS]).frm(T1, T3)).requires(
show_tuple(X, Y).frm(T1, T2),
handle_list(XS).frm(T2, T3)
)
execute(single_clause=False)
show_kb_log()
'''
actions show(_).
if true
then handle_list([a,b,c,d]) from T1 to T2.
handle_list([Single]) from T1 to T2 if show(Single) from T1 to T2.
handle_list([X|Xs]) from T1 to T3 if
show(X) from T1 to T2,
handle_list(Xs) from T2 to T3.
show(a) 1 2
show(b) 2 3
show(c) 3 4
show(d) 4 5
'''
| 0 | 0 | 0 |
794531b10fc3263ae13bf2014886f79ce3cf7b77 | 1,156 | py | Python | interview/leet/778_Swim_in_Rising_Water.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | 1 | 2020-10-12T13:33:29.000Z | 2020-10-12T13:33:29.000Z | interview/leet/778_Swim_in_Rising_Water.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | null | null | null | interview/leet/778_Swim_in_Rising_Water.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | 1 | 2016-11-09T07:28:45.000Z | 2016-11-09T07:28:45.000Z | #!/usr/bin/env python3
# Thinking process
# The animation of spanning with colors black/white/gray
# Like the ones in princeton lecture really helped me
from heapq import heappush, heappop
sol = Solution()
grid = [[0,2],[1,3]]
grid = [[0,1,2,3,4],[24,23,22,21,5],[12,13,14,15,16],[11,17,18,19,20],[10,9,8,7,6]]
grid = [[24,1,2,3,4],[0,23,22,21,5],[12,13,14,15,16],[11,17,18,19,20],[10,9,8,7,6]]
print(sol.swimInWater(grid))
| 35.030303 | 85 | 0.467993 | #!/usr/bin/env python3
# Thinking process
# The animation of spanning with colors black/white/gray
# Like the ones in princeton lecture really helped me
from heapq import heappush, heappop
class Solution:
def swimInWater(self, grid):
l, heap, delta = len(grid), [(grid[0][0], 0, 0)], [(0,1),(0,-1),(1,0),(-1,0)]
for t in range(l**2):
while heap[0][0] <= t:
v, i, j = heappop(heap)
if i == j == l-1:
return t
for dx, dy in delta:
x, y = i+dx, j+dy
if 0 <= x < l and 0 <= y < l and grid[x][y] >= 0:
heappush(heap, (grid[x][y], x, y))
grid[i][j] = -1
print('#'*80)
print(f'After time {t}:')
print(f'heap = {heap}')
for g in grid:
print(' '.join(map(lambda d: f'{d:-2d}', g)))
sol = Solution()
grid = [[0,2],[1,3]]
grid = [[0,1,2,3,4],[24,23,22,21,5],[12,13,14,15,16],[11,17,18,19,20],[10,9,8,7,6]]
grid = [[24,1,2,3,4],[0,23,22,21,5],[12,13,14,15,16],[11,17,18,19,20],[10,9,8,7,6]]
print(sol.swimInWater(grid))
| 686 | -6 | 48 |
70e6fa28f56a29cbf2f2d0d7f92baa08728776a4 | 4,987 | py | Python | dask_geomodeling/ipyleaflet_plugin.py | RichardScottOZ/dask-geomodeling | d463af5db4eccdc53be43862fd25e45f9aca4574 | [
"BSD-3-Clause"
] | 24 | 2019-10-10T21:39:36.000Z | 2022-02-20T21:06:33.000Z | dask_geomodeling/ipyleaflet_plugin.py | RichardScottOZ/dask-geomodeling | d463af5db4eccdc53be43862fd25e45f9aca4574 | [
"BSD-3-Clause"
] | 40 | 2019-09-06T09:17:09.000Z | 2022-01-31T11:12:46.000Z | dask_geomodeling/ipyleaflet_plugin.py | RichardScottOZ/dask-geomodeling | d463af5db4eccdc53be43862fd25e45f9aca4574 | [
"BSD-3-Clause"
] | 6 | 2019-10-28T14:13:26.000Z | 2022-03-09T16:02:26.000Z | from datetime import datetime
from io import BytesIO
from urllib.parse import urljoin
import numpy as np
import traitlets
from ipyleaflet import WMSLayer
from matplotlib import cm
from matplotlib.colors import Normalize
from notebook import notebookapp
from notebook.base.handlers import IPythonHandler
from notebook.utils import url_path_join
from PIL import Image
from dask_geomodeling.core import Block
class GeomodelingWMSHandler(IPythonHandler):
"""This Tornado request handler adds a WMS functionality for displaying
dask-geomodeling results in a notebook
See:
https://jupyter-notebook.readthedocs.io/en/stable/extending/handlers.html
"""
class GeomodelingLayer(WMSLayer):
"""Visualize a dask_geomodeling.RasterBlock on a ipyleaflet Map.
:param block: a dask_geomodeling.RasterBlock instance to visualize
:param url: The url of the jupyter server (e.g. https://localhost:8888)
:param style: a valid matplotlib colormap
:param vmin: the minimum value (for the colormap)
:param vmax: the maximum value (for the colormap)
Notes
-----
To use this ipyleaflet extension, you have to include this plugin into your
Jupyter Notebook server by calling::
$ jupyter notebook --NotebookApp.nbserver_extensions="{'dask_geomodeling.ipyleaflet_plugin':True}"
Or, by adding this setting to the config file in ~/.jupyter, which can be
generated by calling::
$ jupyter notebook --generate-config
This plugin extends the Jupyter notebook server with a server that responds
with PNG images for WMS requests generated by ipyleaflet.
"""
format = traitlets.Unicode("image/png").tag(sync=True, o=True)
maxcellsize = traitlets.Float(10.0).tag(sync=True, o=True)
time = traitlets.Unicode("").tag(sync=True, o=True)
vmin = traitlets.Float(0.0).tag(sync=True, o=True)
vmax = traitlets.Float(1.0).tag(sync=True, o=True)
def load_jupyter_server_extension(nb_server_app):
"""
Called when the extension is loaded.
Args:
nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance.
"""
web_app = nb_server_app.web_app
host_pattern = ".*$"
route_pattern = url_path_join(web_app.settings["base_url"], "/wms")
web_app.add_handlers(host_pattern, [(route_pattern, GeomodelingWMSHandler)])
| 35.621429 | 102 | 0.65009 | from datetime import datetime
from io import BytesIO
from urllib.parse import urljoin
import numpy as np
import traitlets
from ipyleaflet import WMSLayer
from matplotlib import cm
from matplotlib.colors import Normalize
from notebook import notebookapp
from notebook.base.handlers import IPythonHandler
from notebook.utils import url_path_join
from PIL import Image
from dask_geomodeling.core import Block
class GeomodelingWMSHandler(IPythonHandler):
"""This Tornado request handler adds a WMS functionality for displaying
dask-geomodeling results in a notebook
See:
https://jupyter-notebook.readthedocs.io/en/stable/extending/handlers.html
"""
def get(self):
block = Block.from_json(self.get_query_argument("layers"))
style = self.get_query_argument("styles")
vmin = float(self.get_query_argument("vmin"))
vmax = float(self.get_query_argument("vmax"))
format = self.get_query_argument("format")
if format.lower() != "image/png":
self.set_status(400)
self.finish("Only image/png is supported")
return
srs = self.get_query_argument("srs")
height = int(self.get_query_argument("height"))
max_cell_size = float(self.get_query_argument("maxcellsize"))
time_isoformat = self.get_query_argument("time")
if time_isoformat:
time = datetime.strptime(time_isoformat, "%Y-%m-%dT%H:%M:%S.%fZ")
else:
time = None
width = int(self.get_query_argument("width"))
bbox = [float(x) for x in self.get_query_argument("bbox").split(",")]
# overload protection
cell_size_x = (bbox[2] - bbox[0]) / width
cell_size_y = (bbox[3] - bbox[1]) / height
if cell_size_x > max_cell_size or cell_size_y > max_cell_size:
self.set_status(400)
self.finish("Too large area requested")
return
# get cmap
data = block.get_data(
mode="vals",
bbox=bbox,
height=height,
width=width,
projection=srs,
start=time,
)
masked = np.ma.masked_equal(data["values"][0], data["no_data_value"])
stream = BytesIO()
normalized = Normalize(vmin=vmin, vmax=vmax, clip=True)(masked)
img = cm.get_cmap(style)(normalized)
img[normalized.mask, 3] = 0.0
img_uint8 = (img * 255).astype(np.uint8)
Image.fromarray(img_uint8).save(stream, format="png")
raw = stream.getvalue()
self.set_header("Content-Length", len(raw))
self.set_header("Content-Type", "image/png")
self.set_header("Pragma", "no-cache")
self.set_header(
"Cache-Control",
"no-store, "
"no-cache=Set-Cookie, "
"proxy-revalidate, "
"max-age=0, "
"post-check=0, pre-check=0",
)
self.set_header("Expires", "Wed, 2 Dec 1837 21:00:12 GMT")
self.write(raw)
self.finish()
class GeomodelingLayer(WMSLayer):
"""Visualize a dask_geomodeling.RasterBlock on a ipyleaflet Map.
:param block: a dask_geomodeling.RasterBlock instance to visualize
:param url: The url of the jupyter server (e.g. https://localhost:8888)
:param style: a valid matplotlib colormap
:param vmin: the minimum value (for the colormap)
:param vmax: the maximum value (for the colormap)
Notes
-----
To use this ipyleaflet extension, you have to include this plugin into your
Jupyter Notebook server by calling::
$ jupyter notebook --NotebookApp.nbserver_extensions="{'dask_geomodeling.ipyleaflet_plugin':True}"
Or, by adding this setting to the config file in ~/.jupyter, which can be
generated by calling::
$ jupyter notebook --generate-config
This plugin extends the Jupyter notebook server with a server that responds
with PNG images for WMS requests generated by ipyleaflet.
"""
format = traitlets.Unicode("image/png").tag(sync=True, o=True)
maxcellsize = traitlets.Float(10.0).tag(sync=True, o=True)
time = traitlets.Unicode("").tag(sync=True, o=True)
vmin = traitlets.Float(0.0).tag(sync=True, o=True)
vmax = traitlets.Float(1.0).tag(sync=True, o=True)
def __init__(self, block, url=None, **kwargs):
if url is None:
# just get the first URL one
url = next(notebookapp.list_running_servers())["url"]
self.layers = block.to_json()
super().__init__(url=urljoin(url, "wms"), **kwargs)
def load_jupyter_server_extension(nb_server_app):
"""
Called when the extension is loaded.
Args:
nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance.
"""
web_app = nb_server_app.web_app
host_pattern = ".*$"
route_pattern = url_path_join(web_app.settings["base_url"], "/wms")
web_app.add_handlers(host_pattern, [(route_pattern, GeomodelingWMSHandler)])
| 2,589 | 0 | 54 |
734dcac8001fc0cf52375eac800e4329cbad010a | 1,218 | py | Python | solutions/0530-minimum-absolute-difference-in-bst/minimum-absolute-difference-in-bst.py | iFun/Project-G | d33b3b3c7bcee64f93dc2539fd9955a27f321d96 | [
"MIT"
] | null | null | null | solutions/0530-minimum-absolute-difference-in-bst/minimum-absolute-difference-in-bst.py | iFun/Project-G | d33b3b3c7bcee64f93dc2539fd9955a27f321d96 | [
"MIT"
] | null | null | null | solutions/0530-minimum-absolute-difference-in-bst/minimum-absolute-difference-in-bst.py | iFun/Project-G | d33b3b3c7bcee64f93dc2539fd9955a27f321d96 | [
"MIT"
] | null | null | null | # Given a binary search tree with non-negative values, find the minimum absolute difference between values of any two nodes.
#
# Example:
#
#
# Input:
#
# 1
# \
# 3
# /
# 2
#
# Output:
# 1
#
# Explanation:
# The minimum absolute difference is 1, which is the difference between 2 and 1 (or between 2 and 3).
#
#
#
#
# Note: There are at least two nodes in this BST.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
| 21 | 125 | 0.541872 | # Given a binary search tree with non-negative values, find the minimum absolute difference between values of any two nodes.
#
# Example:
#
#
# Input:
#
# 1
# \
# 3
# /
# 2
#
# Output:
# 1
#
# Explanation:
# The minimum absolute difference is 1, which is the difference between 2 and 1 (or between 2 and 3).
#
#
#
#
# Note: There are at least two nodes in this BST.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def findMin(self, root, result, tmp):
if not root:
return None
self.findMin(root.left, result, tmp)
if len(tmp) > 0:
if len(tmp) > 1:
tmp[0] = tmp[1]
tmp[1] = root.val
else:
tmp.append(root.val)
result[0] = min(tmp[1] - tmp[0], result[0])
else:
tmp.append(root.val)
self.findMin(root.right, result, tmp)
return None
def getMinimumDifference(self, root: TreeNode) -> int:
result = [9999999999999]
self.findMin(root, result, [])
return result[0]
| 583 | -6 | 76 |
7acc9ee1bc31164a1469a0d9996ab350cd9b5055 | 1,424 | py | Python | pshychocloud/WAPPMessageAnalyzer.py | partu18/pshychocloud | 208d8d3dd646637719f70a86f9d9e43dd2eed929 | [
"MIT"
] | null | null | null | pshychocloud/WAPPMessageAnalyzer.py | partu18/pshychocloud | 208d8d3dd646637719f70a86f9d9e43dd2eed929 | [
"MIT"
] | 2 | 2021-06-08T19:28:41.000Z | 2021-09-07T23:47:14.000Z | pshychocloud/WAPPMessageAnalyzer.py | partu18/pshychocloud | 208d8d3dd646637719f70a86f9d9e43dd2eed929 | [
"MIT"
] | null | null | null | from MessageAnalyzer import MessageAnalyzer
| 45.935484 | 89 | 0.514045 | from MessageAnalyzer import MessageAnalyzer
class WAPPMessageAnalyzer(MessageAnalyzer):
WAPP_DATE_REGEX = "[0-9]{1,2}/[0-9]{1,2}/[0-9]{1,2}"
WAPP_TIME_REGEX = "[0-9]{1,2}:[0-9]{1,2}"
WAPP_EXTRACT_PARTICIPANT_REGEX = r"{date}\s*,\s*{time}\s*-\s*(.+?)\s*:\s*"\
.format(
date=WAPP_DATE_REGEX,
time=WAPP_TIME_REGEX
)
WAPP_EXTRACT_MESSAGE_REGEX = r"{date}\s*,\s*{time}\s*-\s*{participant}\s*:\s+(.+?)$"\
.format(
date=WAPP_DATE_REGEX,
time=WAPP_TIME_REGEX,
participant='%s' #UGLY UGLY UGLY!
)
WAPP_PARTICIPANT_WILDCARD = "[^:]+"
extract_message_regex = WAPP_EXTRACT_MESSAGE_REGEX
participant_wildcard = WAPP_PARTICIPANT_WILDCARD
extract_participant_regex = WAPP_EXTRACT_PARTICIPANT_REGEX
def _get_clean_words_from_line(self, line):
'''
Remove Whatsapp automatic generated messages
'''
omitted_file = '<Archivo omitido>'.lower()
filtered_line = '' if omitted_file in line else line
return super(WAPPMessageAnalyzer, self)._get_clean_words_from_line(filtered_line)
| 0 | 1,348 | 23 |