max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
public/algorithms/search.py | microwen/cecewsn | 1 | 12757951 | <gh_stars>1-10
import LibrarySearch_v1
import sys
from os.path import join
data_path = sys.argv[1]
path_MB = join(data_path, 'MassBank_matlab.mat')
path_adducts = join(data_path, 'tmp', 'adducts', sys.argv[2])
path_to_spec = join(data_path, 'tmp', 'deconv')
output_ulsa = join(data_path, 'tmp', 'ULSA')
source = 'ESI'
mode = 'POSITIVE'
l = LibrarySearch_v1.initialize()
l.LibrarySearch_v1(path_MB, source, mode, path_adducts, path_to_spec, output_ulsa, nargout=0)
l.terminate()
| 1.921875 | 2 |
src/apollon/segment.py | ifsm/apollon | 0 | 12757952 | """
Licensed under the terms of the BSD-3-Clause license.
Copyright (C) 2019 <NAME>, <EMAIL>
"""
from dataclasses import dataclass
from typing import ClassVar, Generator, Tuple, Union
import numpy as _np
from numpy.lib.stride_tricks import as_strided
from . audio import AudioFile
from . container import Params
from . signal.tools import zero_padding as _zero_padding
from . types import Array, Schema
@dataclass
class LazySegmentParams:
"""Encapsulates segmentation parameters."""
n_perseg: int
n_overlap: int
norm: bool = False
mono: bool = True
expand: bool = True
dtype: str = 'float64'
SEGMENTATION_PARAMS = {
"type": "object",
"properties": {
"n_perseg": {"type": "integer"},
"n_overlap": {"type": "integer"},
"extend": {"anyOf": [{"type": "boolean"}, {"type": "integer"}]},
"pad": {"anyOf": [{"type": "boolean"}, {"type": "integer"}]}
}
}
@dataclass
class SegmentationParams(Params):
"""Parameters for Segmentation."""
_schema: ClassVar[Schema] = SEGMENTATION_PARAMS
n_perseg: int = 512
n_overlap: int = 256
extend: Union[bool, int] = True
pad: Union[bool, int] = True
@dataclass
class Segment:
"""Encapsulates audio segment data and meta data."""
idx: int
start: int
stop: int
center: int
n_frames: int
data: _np.ndarray
class Segments:
"""Segement"""
def __init__(self, params: SegmentationParams, segs: _np.ndarray) -> None:
self._segs = segs
self._params = params
if self._params.extend:
self._offset = 0
else:
self._offset = self._params.n_perseg // 2
@property
def data(self) -> Array:
"""Return the raw segment data array."""
return self._segs
@property
def n_segs(self) -> int:
return self._segs.shape[1]
@property
def n_perseg(self) -> int:
return self._params.n_perseg
@property
def n_overlap(self) -> int:
return self._params.n_overlap
@property
def step(self) -> int:
return self._params.n_perseg - self._params.n_overlap
@property
def params(self) -> SegmentationParams:
"""Parameter set used to compute this instance."""
return self._params
def center(self, seg_idx) -> int:
"""Return the center of segment ``seg_idx`` as frame number
of the original signal.
Args:
seg_indx: Segment index.
Returns:
Center frame index.
"""
if not (0 <= seg_idx < self.n_segs):
raise IndexError('Requested index out of range.')
return seg_idx * self.step + self._offset
def bounds(self, seg_idx) -> Tuple[int, int]:
"""Return the frame numbers of the lower and upper bound
of segment ``seg_idx``. Lower bound index is inclusive,
upper bound index is exclusive.
Args:
seg_idx: Segment index.
Returns:
Lower and upper bound frame index.
"""
if not (0 <= seg_idx < self.n_segs):
raise IndexError('Requested index out of range.')
lob = self.center(seg_idx) - self._params.n_perseg // 2
upb = lob + self._params.n_perseg
return lob, upb
def get(self, seg_idx) -> Segment:
"""Retrun segment ``seg_idx`` wrapped in an ``Segment`` object.
Args:
seg_idx: Segment index.
Returns:
Segment ``seg_idx``.
"""
return Segment(seg_idx, *self.bounds(seg_idx), self.center(seg_idx),
self._params.n_perseg, self[seg_idx])
def __iter__(self) -> Generator[_np.ndarray, None, None]:
for seg in self._segs.T:
yield _np.expand_dims(seg, 1)
def __getitem__(self, key) -> _np.ndarray:
out = self._segs[:, key]
if out.ndim < 2:
return _np.expand_dims(out, 1)
return out
def __repr__(self) -> str:
return f'Segments(params={self._params!s}, segs={self._segs!s})'
def __str__(self) -> str:
return f'<n_segs: {self.n_segs}, len_seg: {self._params.n_perseg}>'
class Segmentation:
"""Segementation"""
def __init__(self, n_perseg: int, n_overlap: int, extend: bool = True,
pad: bool = True) -> None:
"""Subdivide input array.
Args:
n_perseg: Samples per segment.
n_overlap: Overlap in samples.
extend: Extend a half window at start and end.
pad: Pad extension.
"""
if n_perseg > 0:
self.n_perseg = n_perseg
else:
msg = (f'Argument to ``n_perseg`` must be greater than '
f'zero.\nFound ``n_perseg`` = {n_perseg}.')
raise ValueError(msg)
if 0 < n_overlap < n_perseg:
self.n_overlap = n_overlap
else:
msg = (f'Argument to ``n_overlap`` must be greater than '
f'zero and less then ``n_perseg``.\n Found '
f'``n_perseg`` = {self.n_perseg} and ``n_overlap`` '
f' = {n_overlap}.')
raise ValueError(msg)
self._extend = extend
self._pad = pad
self._ext_len = 0
self._pad_len = 0
def transform(self, data: _np.ndarray) -> Segments:
"""Apply segmentation.
Input array must be either one-, or two-dimensional.
If ``data`` is two-dimensional, it must be of shape
(n_elements, 1).
Args:
data: Input array.
Returns:
``Segments`` object.
"""
self._validate_data_shape(data)
self._validate_nps(data.shape[0])
n_frames = data.shape[0]
step = self.n_perseg - self.n_overlap
if self._extend:
self._ext_len = self.n_perseg // 2
if self._pad:
self._pad_len = (-(n_frames-self.n_perseg) % step) % self.n_perseg
data = _np.pad(data.squeeze(), (self._ext_len, self._ext_len+self._pad_len))
new_shape = data.shape[:-1] + ((data.shape[-1] - self.n_overlap) // step, self.n_perseg)
new_strides = data.strides[:-1] + (step * data.strides[-1], data.strides[-1])
segs = as_strided(data, new_shape, new_strides, writeable=False).T
params = SegmentationParams(self.n_perseg, self.n_overlap,
self._extend, self._pad)
return Segments(params, segs)
def _validate_nps(self, n_frames: int) -> None:
if self.n_perseg > n_frames:
msg = (f'Input data length ({n_frames}) incompatible with '
'parameter ``n_perseg`` = {self.n_perseg}. ``n_perseg`` '
'must be less then or equal to input data length.')
raise ValueError(msg)
def _validate_data_shape(self, data: _np.ndarray) -> None:
if not (0 < data.ndim < 3):
msg = (f'Input array must have one or two dimensions.\n'
f'Found ``data.shape`` = {data.shape}.')
elif data.ndim == 2 and data.shape[1] != 1:
msg = (f'Two-dimensional import arrays can only have one '
f'column.\nFound ``data.shape``= {data.shape}.')
else:
return None
raise ValueError(msg)
class LazySegments:
"""Read segments from audio file."""
def __init__(self, snd: AudioFile, n_perseg: int, n_overlap: int,
norm: bool = False, mono: bool = True,
expand: bool = True, dtype: str = 'float64') -> None:
"""Compute equal-sized segments.
Args:
snd:
n_perseg: Number of samples per segment.
n_overlap: Size of segment overlap in samples.
norm: Normalize each segment separately.
mono: If ``True`` mixdown all channels.
expand: Start segmentation at -n_perseg//2.
dtype: Dtype of output array.
"""
self._snd = snd
self.n_perseg = n_perseg
self.n_overlap = n_overlap
self.expand = expand
self.n_segs = int(_np.ceil(self._snd.n_frames / n_overlap))
if expand:
self.n_segs += 1
self.offset = -self.n_perseg // 2
else:
self.n_segs -= 1
self.offset = 0
self.step = self.n_perseg - self.n_overlap
self.norm = norm
self.mono = mono
self.dtype = dtype
def compute_bounds(self, seg_idx):
if seg_idx < 0:
raise IndexError('Expected positive integer for ``seg_idx``. '
f'Got {seg_idx}.')
if seg_idx >= self.n_segs:
raise IndexError(f'You requested segment {seg_idx}, but there '
f'are only {self.n_segs} segments.')
start = seg_idx * self.n_overlap + self.offset
return start, start + self.n_perseg
def read_segment(self, seg_idx: int, norm: bool = None,
mono: bool = None, dtype: str = None):
norm = norm or self.norm
mono = mono or self.mono
dtype = dtype or self.dtype
offset = seg_idx * self.n_overlap + self.offset
return self._snd.read(self.n_perseg, offset, norm, mono, dtype)
def loc(self, seg_idx: int, norm: bool = None,
mono: bool = None, dtype: str = None) -> Segment:
"""Locate segment by index.
Args:
seg_idx: Segment index.
norm: If ``True``, normalize each segment separately.
Falls back to ``self.norm``.
mono: If ``True`` mixdown all channels.
Falls back to ``self.mono``.
dtype: Output dtype. Falls back to ``self.dtype``.
Returns:
Segment number ``seg_idx``.
"""
start, stop = self.compute_bounds(seg_idx)
data = self.read_segment(seg_idx, norm, mono, dtype)
return Segment(seg_idx, start, stop, self.n_perseg,
self._snd.fps, data)
def __getitem__(self, key):
return self.loc(key)
def __iter__(self):
for i in range(self.n_segs):
yield self.__getitem__(i)
def iter_data(self):
for i in range(self.n_segs):
yield self._snd.read(self.n_perseg)
def iter_bounds(self):
for i in range(self.n_segs):
yield self.compute_bounds(i)
def _by_samples(x: Array, n_perseg: int) -> Array:
"""Split ``x`` into segments of lenght ``n_perseg`` samples.
This function automatically applies zero padding for inputs that cannot be
split evenly.
Args:
x: One-dimensional input array.
n_perseg: Length of segments in samples.
Returns:
Two-dimensional array of segments.
"""
if not isinstance(n_perseg, int):
raise TypeError('Param ``n_perchunk`` must be of type int.')
if n_perseg < 1:
raise ValueError('``n_perchunk`` out of range. '
'Expected 1 <= n_perchunk.')
fit_size = int(_np.ceil(x.size / n_perseg) * n_perseg)
n_ext = fit_size - x.size
x = _zero_padding(x, n_ext)
return x.reshape(-1, n_perseg)
def _by_samples_with_hop(x: Array, n_perseg: int, hop_size: int) -> Array:
"""Split `x` into segments of lenght `n_perseg` samples. Move the
extraction window `hop_size` samples.
This function automatically applies zero padding for inputs that cannot be
split evenly.
Args:
x: One-dimensional input array.
n_perseg: Length of segments in samples.
hop_size: Hop size in samples
Returns:
Two-dimensional array of segments.
"""
if not (isinstance(n_perseg, int) and isinstance(hop_size, int)):
raise TypeError('Params must be of type int.')
if not 1 < n_perseg <= x.size:
raise ValueError('n_perseg out of range. '
'Expected 1 < n_perseg <= len(x).')
if hop_size < 1:
raise ValueError('hop_size out of range. Expected 1 < hop_size.')
n_hops = (x.size - n_perseg) // hop_size + 1
n_segs = n_hops
if (x.size - n_perseg) % hop_size != 0 and n_perseg > hop_size:
n_segs += 1
fit_size = hop_size * n_hops + n_perseg
n_ext = fit_size - x.size
x = _zero_padding(x, n_ext)
out = _np.empty((n_segs, n_perseg), dtype=x.dtype)
for i in range(n_segs):
off = i * hop_size
out[i] = x[off:off+n_perseg]
return out
def by_samples(x: Array, n_perseg: int, hop_size: int = 0) -> Array:
"""Segment the input into n segments of length n_perseg and move the
window `hop_size` samples.
This function automatically applies zero padding for inputs that cannot be
split evenly.
If `hop_size` is less than one, it is reset to `n_perseg`.
Overlap in percent is calculated as ov = hop_size / n_perseg * 100.
Args:
x One-dimensional input array.
n_perseg Length of segments in samples.
hop_size Hop size in samples. If < 1, hop_size = n_perseg.
Returns:
Two-dimensional array of segments.
"""
if hop_size < 1:
return _by_samples(x, n_perseg)
else:
return _by_samples_with_hop(x, n_perseg, hop_size)
def by_ms(x: Array, fps: int, ms_perseg: int, hop_size: int = 0) -> Array:
"""Segment the input into n segments of length ms_perseg and move the
window `hop_size` milliseconds.
This function automatically applies zero padding for inputs that cannot be
split evenly.
If `hop_size` is less than one, it is reset to `n_perseg`.
Overlap in percent is calculated as ov = hop_size / n_perseg * 100.
Args:
x One-dimensional input array.
fs Sampling frequency.
n_perseg Length of segments in milliseconds.
hop_size Hop size in milliseconds. If < 1, hop_size = n_perseg.
Returns:
Two-dimensional array of segments.
"""
n_perseg = fps * ms_perseg // 1000
hop_size = fps * hop_size // 1000
return by_samples(x, n_perseg, hop_size)
def by_onsets(x: Array, n_perseg: int, ons_idx: Array, off: int = 0
) -> Array:
"""Split input `x` into len(ons_idx) segments of length `n_perseg`.
Extraction windos start at `ons_idx[i]` + `off`.
Args:
x One-dimensional input array.
n_perseg Length of segments in samples.
ons_idx One-dimensional array of onset positions.
off Length of offset.
Returns:
Two-dimensional array of shape (len(ons_idx), n_perseg).
"""
n_ons = ons_idx.size
out = _np.empty((n_ons, n_perseg), dtype=x.dtype)
for i, idx in enumerate(ons_idx):
pos = idx + off
if pos < 0:
pos = 0
elif pos >= x.size:
pos = x.size - 1
if pos + n_perseg >= x.size:
buff = x[pos:]
out[i] = _zero_padding(buff, n_perseg-buff.size)
else:
out[i] = x[pos:pos+n_perseg]
return out
| 2.109375 | 2 |
src/rent_hotel/apps.py | pfskiev/lisbon | 0 | 12757953 | from django.apps import AppConfig
class RentCarConfig(AppConfig):
name = 'rent_car'
| 1.3125 | 1 |
tests/unit/test_oneview_hypervisor_manager.py | SHANDCRUZ/test-codecov | 10 | 12757954 | <filename>tests/unit/test_oneview_hypervisor_manager.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible_collections.hpe.oneview.tests.unit.utils.hpe_test_utils import OneViewBaseTest
from ansible_collections.hpe.oneview.tests.unit.utils.oneview_module_loader import HypervisorManagerModule
FAKE_MSG_ERROR = 'Fake message error'
DEFAULT_HYPERVISOR_MANAGER_TEMPLATE = dict(
name='172.18.13.11',
hypervisorType='Vmware',
displayName='vcenter',
username='dcs',
password='<PASSWORD>'
)
PARAMS_FOR_PRESENT = dict(
config='config.json',
state='present',
data=dict(name=DEFAULT_HYPERVISOR_MANAGER_TEMPLATE['name'])
)
PARAMS_WITH_CHANGES = dict(
config='config.json',
state='present',
data=dict(name=DEFAULT_HYPERVISOR_MANAGER_TEMPLATE['name'],
displayName="vcenter renamed",
hypervisorType='Vmware')
)
PARAMS_FOR_ABSENT = dict(
config='config.json',
state='absent',
data=dict(name=DEFAULT_HYPERVISOR_MANAGER_TEMPLATE['name'])
)
@pytest.mark.resource(TestHypervisorManagerModule='hypervisor_managers')
class TestHypervisorManagerModule(OneViewBaseTest):
"""
OneViewBaseTestCase provides the mocks used in this test case
"""
def test_should_create_new_hypervisor_manager(self):
self.resource.get_by_name.return_value = []
self.resource.data = DEFAULT_HYPERVISOR_MANAGER_TEMPLATE
self.resource.create.return_value = self.resource
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
HypervisorManagerModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=HypervisorManagerModule.MSG_CREATED,
ansible_facts=dict(hypervisor_manager=DEFAULT_HYPERVISOR_MANAGER_TEMPLATE)
)
def test_should_not_update_when_data_is_equals(self):
self.resource.data = DEFAULT_HYPERVISOR_MANAGER_TEMPLATE
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
HypervisorManagerModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=HypervisorManagerModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(hypervisor_manager=DEFAULT_HYPERVISOR_MANAGER_TEMPLATE)
)
def test_update_when_data_has_modified_attributes(self):
data_merged = DEFAULT_HYPERVISOR_MANAGER_TEMPLATE.copy()
data_merged['displayName'] = 'vcenter renamed'
self.resource.data = DEFAULT_HYPERVISOR_MANAGER_TEMPLATE
self.mock_ansible_module.params = PARAMS_WITH_CHANGES
HypervisorManagerModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=HypervisorManagerModule.MSG_UPDATED,
ansible_facts=dict(hypervisor_manager=DEFAULT_HYPERVISOR_MANAGER_TEMPLATE)
)
def test_should_remove_hypervisor_manager(self):
self.resource.data = DEFAULT_HYPERVISOR_MANAGER_TEMPLATE
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
HypervisorManagerModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=HypervisorManagerModule.MSG_DELETED
)
def test_should_do_nothing_when_hypervisor_manager_not_exist(self):
self.resource.get_by_name.return_value = None
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
HypervisorManagerModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=HypervisorManagerModule.MSG_ALREADY_ABSENT
)
def test_update_scopes_when_different(self):
params_to_scope = PARAMS_FOR_PRESENT.copy()
params_to_scope['data']['scopeUris'] = ['test']
self.mock_ansible_module.params = params_to_scope
resource_data = DEFAULT_HYPERVISOR_MANAGER_TEMPLATE.copy()
resource_data['scopeUris'] = ['fake']
resource_data['uri'] = 'rest/hypervisor-managers/fake'
self.resource.data = resource_data
patch_return = resource_data.copy()
patch_return['scopeUris'] = ['test']
patch_return_obj = self.resource.copy()
patch_return_obj.data = patch_return
self.resource.patch.return_value = patch_return_obj
HypervisorManagerModule().run()
self.resource.patch.assert_called_once_with(operation='replace',
path='/scopeUris',
value=['test'])
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
ansible_facts=dict(hypervisor_manager=patch_return),
msg=HypervisorManagerModule.MSG_UPDATED
)
def test_should_do_nothing_when_scopes_are_the_same(self):
params_to_scope = PARAMS_FOR_PRESENT.copy()
params_to_scope['data']['scopeUris'] = ['test']
self.mock_ansible_module.params = params_to_scope
resource_data = DEFAULT_HYPERVISOR_MANAGER_TEMPLATE.copy()
resource_data['scopeUris'] = ['test']
self.resource.data = resource_data
HypervisorManagerModule().run()
self.resource.patch.not_been_called()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(hypervisor_manager=resource_data),
msg=HypervisorManagerModule.MSG_ALREADY_PRESENT
)
if __name__ == '__main__':
pytest.main([__file__])
| 1.84375 | 2 |
series/migrations/0002_auto_20170818_1006.py | teenoh/tv_series_scraper | 0 | 12757955 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-08-18 09:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('series', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='episodes',
name='season',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='episodes', to='series.Seasons'),
),
migrations.AlterField(
model_name='seasons',
name='series',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='seasons', to='series.Series'),
),
]
| 1.515625 | 2 |
sph_ap.py | Falfat/SPH_Simulator.github.io | 1 | 12757956 | <gh_stars>1-10
from itertools import count
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
from datetime import datetime
import pickle as pi
import warnings
from animate_results import load_and_set, animate
class SPH_main(object):
"""
A class for Smoothed Particle Hydrodynamics (SPH), a meshless method
used for solving the Navier-Stokes equation to simulate a wave
generation problem.
....
Attributes
----------
h : float -- determined attribute
bin half size (meters). [Deafult value = 1.3]
h_fac : float -- set attribute
bin half size constant (unitless).
dx : float -- set attribute
particle spacing (meters).
mu : float -- set attribute
viscosity (Pa s) [Deafult value = 0.001]
rho0 : integer -- set attribute
initial particle density (kg/m^3). [Deafult value = 1000]
c0 : integer -- set attribute
fit-for-purpose speed of sound in water (m/s). [Deafult value = 20]
t_curr : float -- set attribute
current time of the system (s).
gamma : constant -- set attribute
stiffness value (unitless). [Deafult value = 7]
interval_smooth : integer -- set attribute
number of timesteps to which smooth rho (unitless). [Deafult value = 15]
interval_save : integer -- set attribute
number of timesteps at which the current states are saved (unitless).
[Deafult value = 15]
CFL : float -- set attribute
Scale factor for Courant–Friedrichs–Lewy condition (unitless). [Deafult value = 0.2]
B : float -- determined attribute
pressure constant (Pa).
g : 1D array -- set attribute
body force based 2D vector [gravity value (m/s^2)]. [Deafult value = [0, -9.81] ]
file : file -- determined attribute
a file of results for post processing and visulaization.
min_x : list
lower-left boundary for the domain.
max_x : list
upper-right boundaries for the domain.
max_list : list -- determined attribute
binning grid dimensions.
search_grid : array
binning grid.
t_curr : float -- set attribute
Dyanimc. Stores time at which simulation is being run. Starts at 0.
w_fac1 : float -- set attribute
A constant for the smoothing function W (m^-2).
w_fac2 : float -- set attribute
A constant for the derivative smoothing function dW (m^-3.)
particle_list : list -- determined attribute
A list of particles to be simulated.
search_grid : array -- determined attribute
An array of searched neighbouring particles.
lil_bit : float-- determined attribute
An upper limit to get np arrange working as desired.
P_ref : float -- determined attribute
Boundary reference pressure to prevent leakages (Pa).
d_ref : float -- determined attribute
Reference distance for enforcing boundary pressure (m).
func : list -- set attribute
A list containing O and 1 to distinguish fluid particles from
boundaries.
interval_smooth : int -- set attribute
interval of timesteps at which density smoothing function is called
interval_save : int -- set attribute
interval of timesteps at which data is appended to file
Methods
-------
determine_values():
Aids to determine initial simulation parameters.
initialise_grid():
Intializes the domain for simulation.
add_boundaries():
Adds the boundary points of at least 2h around the edges.
place_points(xmin, xmax):
Place points in a rectangle with a square spacing of specific value.
allocate_to_grid():
Allocates all the points to a grid to aid neighbours' searching.
neighbour_iterate(part):
Finds all the particles within the search range of a specific particle.
plot_current_state():
Plots the current state of the system (i.e. where every particles are)
in space.
W(p_i, p_j_list):
Calculates Smoothing factor for a particle being affected by
neighbouring particles within the specified neighbourhood.
dW(p_i, p_j_list):
Calculates the derivative Smoothing factor for a particle being
affected by neighbouring particles within the specified neighbourhood.
LJ_boundary_force(p):
Enforces boundary force to prevent fluid particles' leaking.
rho_smoothing(p_i, p_j_list):
determines the smoothed density of a particle interest.
timestepping(tf):
Timesteps the physical problem with a set dt until
user-specified time is reached.
set_up_save(name, path):
Saves the initial setup of the system and creates the csv file to
store ongoing results as solution runs.
save_state():
Append the current state of every particle in the system to the
end of the csv file.
R_artificial_pressure(p_i, p_j_list, step) -- sph_ap only:
Determines the R component of the artificial pressure.
dW_artificial_pressure(p_i, p_j_list, step) -- sph_ap only:
Calculates the derivative Smoothing factor component of the artificial
pressure for a particle being affected by neighbouring particles
within the specified neighbourhood.
"""
def __init__(self, x_min=(0.0, 0.0), x_max=(1.0, 1.0), dx=0.02):
# set empty attributes for later
self.h = None
self.B = 0.0
self.w_fac1 = 0.0
self.w_fac2 = 0.0
self.file = None
self.P_ref = 0.0
self.d_ref = 0.0
# set given attributes
self.dx = dx
self.t_curr = 0.0 # current time of the system (s)
self.h_fac = 1.3
self.mu = 0.001 # viscosity (Pa s)
self.rho0 = 1000 # initial particle density (kg/m^3)
self.c0 = 20 # speed of sound in water (m/s)
self.gamma = 7 # stiffness value, dimensionless
self.interval_smooth = 15 # timesteps to smooth rho
self.interval_save = 15 # timesteps to save the state
self.CFL = 0.2 # CFL constant, dimensionless
self.g = 9.81 * np.array([0, -1]) # gravity value (m/s^2)
# set the limits
self.min_x = np.zeros(2)
self.min_x[:] = x_min
self.max_x = np.zeros(2)
self.max_x[:] = x_max
self.max_list = np.zeros(2, int)
# setup the particle lists
self.particle_list = []
self.search_grid = np.empty((0, 0), object)
def determine_values(self):
"""Set simulation parameters."""
self.h = self.dx*self.h_fac # bin half-size
self.lil_bit = self.dx*0.01 # to include upper limits
self.B = self.rho0 * self.c0**2 / self.gamma # pressure constant (Pa)
self.w_fac1 = 10 / (7 * np.pi * self.h ** 2) # constant often used
self.w_fac2 = 10 / (7 * np.pi * self.h ** 3) # constant often used
self.P_ref = self.B*(1.05**self.gamma - 1)
self.d_ref = 0.9 * self.dx
def initialise_grid(self, func):
"""
Initalise simulation grid.
func takes array x and returns 1 for particle in fluid or 0 for no particle
"""
assert self.h is not None, 'must run determine values first'
assert callable(func), 'func must be a function'
# set internal points
for x in np.arange(self.min_x[0], self.max_x[0] + self.lil_bit,
self.dx):
for y in np.arange(self.min_x[1], self.max_x[1] + self.lil_bit,
self.dx):
if func(x, y) == 1:
self.place_point(x, y, bound=0)
self.add_boundaries() # create the boundary points
# check there are no duplicate points
tmp = np.array([p.x for p in self.particle_list])
assert np.unique(tmp, axis=0).shape[0] == len(tmp), \
'there is a duplicate point'
# setup the search array (find size then create array)
self.max_list = np.array((self.max_x-self.min_x)/(2.0*self.h)+1, int)
self.search_grid = np.empty(self.max_list, object)
def add_boundaries(self):
""" Adds the boundary points so at least 2h around the edge """
# create the boundary points
tmp_diff = 0
while tmp_diff < 2.0*self.h:
tmp_diff += self.dx
tmp_min = self.min_x - tmp_diff
tmp_max = self.max_x + tmp_diff
# upper and lower rows
for x in np.arange(tmp_min[0], tmp_max[0] + self.lil_bit, self.dx):
self.place_point(x, tmp_min[1], bound=1)
self.place_point(x, tmp_max[1], bound=1)
# left and right (removing corners)
tmp = np.arange(tmp_min[1], tmp_max[1] + self.lil_bit, self.dx)
for i, y in enumerate(tmp):
if i != 0 and i != len(tmp)-1:
self.place_point(tmp_min[0], y, bound=1)
self.place_point(tmp_max[0], y, bound=1)
# account for the boundary particle changing limits
self.min_x -= tmp_diff
self.max_x += tmp_diff
def place_point(self, x, y, bound=0):
"""Place particle at point given and assigns the particle attribute boundary
x: float
x location of particle assuming positive to the right and negative to the left
y: float
y location of particle assuming positive up and negative down"""
# create particle object and assign index
particle = SPH_particle(self, np.array([x, y]))
particle.calc_index()
# intiialise physical paramteres of particles
particle.rho = self.rho0
particle.m = self.dx**2 * self.rho0
particle.P = 0.
particle.bound = bound
# append particle object to list of particles
self.particle_list.append(particle)
def allocate_to_grid(self):
"""Allocate all the points to a grid to aid neighbour searching"""
for i in range(self.max_list[0]):
for j in range(self.max_list[1]):
self.search_grid[i, j] = []
for cnt in self.particle_list:
self.search_grid[cnt.list_num[0], cnt.list_num[1]].append(cnt)
def neighbour_iterate(self, part):
"""Find all the particles within 2h of the specified particle"""
part.adj = [] # needs to be reseted every time it's called
for i in range(max(0, part.list_num[0] - 1),
min(part.list_num[0] + 2, self.max_list[0])):
for j in range(max(0, part.list_num[1] - 1),
min(part.list_num[1] + 2, self.max_list[1])):
for other_part in self.search_grid[i, j]:
if part is not other_part:
dn = part.x - other_part.x # ####### use this later
dist = np.sqrt(np.sum(dn ** 2))
if dist < 2.0 * self.h:
part.adj.append(other_part)
return None
def plot_current_state(self):
"""
Plots the current state of the system (i.e. where every particle is)
in space.
"""
x = np.array([p.x for p in self.particle_list])
bs = [p.bound for p in self.particle_list]
plt.scatter(x[:, 0], x[:, 1], c=bs)
plt.gca().set(xlabel='x', ylabel='y', title='Current State')
def W(self, p_i, p_j_list):
"""
Computes the smoothing parameter for a particle i and all its influencing neighbours
Parameters
----------
p_i: (object)
particle where calculations are being performed
p_j_list: (list of objects)
particles influencing particle i
Returns
--------
j_list:(np array)
smoothing factor for particle i being affected by particles j
"""
xi = p_i.x
xj = np.array([p.x for p in p_j_list])
r = xi - xj
j_list = np.sqrt(np.sum(r ** 2, axis=1)) / self.h
assert ((j_list >= 0).all()), "q must be a positive value"
for i, q in enumerate(j_list):
if 0 <= q < 1:
j_list[i] = self.w_fac1 * (1 - 1.5 * q ** 2 + 0.75 * q ** 3)
elif 1 <= q <= 2:
j_list[i] = self.w_fac1 * (0.25 * (2 - q) ** 3)
else:
j_list[i] = 0
return np.array(j_list)
def dW(self, p_i, p_j_list, step):
"""
Computes the derivative of the smoothing parameter for a particle i and all
its influencing neighbours
Parameters
----------
p_i: (object)
particle where calculations are being performed
p_j_list: (list of objects)
list of particles influencing particle i
step: int
value of 1 or 2 corresponding to the stage of the predictor corrector method
Returns
--------
j_list:(np array)
derivative of smoothing factor for particle i being affected by particles j
"""
xi = 0
xj = 0
if step == 1:
xi = p_i.x
xj = np.array([p.x for p in p_j_list])
if step == 2:
xi = p_i.x_temp
xj = np.array([p.x_temp for p in p_j_list])
r = xi - xj
j_list = np.sqrt(np.sum(r ** 2, axis=1)) / self.h
assert ((j_list >= 0).all()), "q must be a positive value"
for i, q in enumerate(j_list):
if 0 <= q < 1:
j_list[i] = self.w_fac2 * (-3 * q + (9 / 4) * q ** 2)
elif 1 <= q <= 2:
j_list[i] = self.w_fac2 * (-(3 / 4) * (2 - q) ** 2)
else:
j_list[i] = 0
return np.array(j_list)
def R_artificial_pressure(self, p_i, p_j_list, step):
"""
Determines the R component of the artificial pressure inserted.
Parameters
----------
p_i: (object)
particle where calculations are being performed
p_j_list: (list of objects)
list of particles influencing particle i
step: int
value of 1 or 2 corresponding to the stage of the predictor corrector method
Returns
--------
j_list:(np array)
derivative of smoothing factor for particle i being affected by particles j
"""
R_i = np.zeros((len(p_j_list)))
R_j = np.zeros((len(p_j_list)))
rho_i = 0
rho_j = 0
for j, p_j in enumerate(p_j_list):
if step == 1:
rho_i = p_i.rho
rho_j = p_j.rho
if step == 2:
rho_i = p_i.rho_temp
rho_j = p_j.rho_temp
if p_i.P < 0:
R_i[j] = 0.2 * p_i.P/(rho_i**2)
else:
R_i[j] = 0.01*( (p_i.P/(rho_i**2)) + (p_j.P/(rho_j**2)))
if p_j.P < 0:
R_j[j] = 0.2 * p_j.P/(rho_j**2)
else:
R_j[j] = 0.01*( (p_j.P/(rho_j**2)) + (p_i.P/(rho_i**2)))
R = R_i + R_j
return R
def dW_artificial_pressure(self, p_i, p_j_list, step):
"""
Calculates the derivative Smoothing factor component of the artificial
pressure for a particle being affected by neighbouring particles
within the specified neighbourhood.
Parameters
----------
p_i: (object)
particle where calculations are being performed
p_j_list: (list of objects)
list of particles influencing particle i
step: int
value of 1 or 2 corresponding to the stage of the predictor corrector method
Returns
--------
j_list:(np array)
derivative of smoothing factor for particle i being affected by particles j
"""
xi = 0
xj = 0
if step == 1:
xi = p_i.x
xj = np.array([p.x for p in p_j_list])
if step == 2:
xi = p_i.x_temp
xj = np.array([p.x_temp for p in p_j_list])
r = xi - xj
j_list = np.sqrt(np.sum(r ** 2, axis=1)) / self.h
assert ((j_list >= 0).all()), "q must be a positive value"
w_fac1 = 40 / (7 * np.pi * self.h ** 3)
w_fac2 = -30/ (7 * np.pi * self.h ** 3)
for i, q in enumerate(j_list):
if 0 <= q < 1:
j_list[i] = w_fac1 * (-3 * q + (9/4) * q) * (1 - (3/2) * q**2 + (3/4) * q**3)**3
elif 1 <= q <= 2:
j_list[i] = w_fac2 * (2 - q)**3
else:
j_list[i] = 0
return np.array(j_list)
def rho_smoothing(self, p_i, p_j_list):
"""
Computes the smoothed density of a particle i and
Parameters
----------
p_i: (object)
particle where calculations are being performed
p_j_list: (list of objects)
list of particles influencing particle i
Returns
--------
rho:(float)
particle i smoothed density
"""
assert (p_i in p_j_list), "must include particle i in this calculation"
w_list = self.W(p_i, p_j_list)
p_j_rho = np.array([p.rho for p in p_j_list])
assert ((p_j_rho > 0).all()), "density must be always positive"
rho = np.sum(w_list) / np.sum(w_list / p_j_rho)
return rho
def LJ_boundary_force(self, p):
"""
Adds acceleration to a particle p using a Lennard-Jones potential proportional
to its distance to the outermost boundary wall
Parameters
----------
p: (object)
particle where calculations are being performed
"""
r_wall_left = abs(p.x[0] - self.min_x[0])
if r_wall_left != 0:
q_ref_left = self.d_ref / r_wall_left
if q_ref_left > 1:
p.a[0] = p.a[0] + (self.P_ref * (q_ref_left ** 4 - q_ref_left ** 2) / (r_wall_left * p.rho))
r_wall_bottom = abs(p.x[1] - self.min_x[1])
if r_wall_bottom != 0:
q_ref_bottom = self.d_ref / r_wall_bottom
if q_ref_bottom > 1:
p.a[1] = p.a[1] + (self.P_ref * (q_ref_bottom ** 4 - q_ref_bottom ** 2) / (r_wall_bottom * p.rho))
r_wall_right = abs(p.x[0] - self.max_x[0])
if r_wall_right != 0:
q_ref_right = self.d_ref / r_wall_right
if q_ref_right > 1:
p.a[0] = p.a[0] - (self.P_ref * (q_ref_right ** 4 - q_ref_right ** 2) / (r_wall_right * p.rho))
r_wall_top = abs(p.x[1] - self.max_x[1])
if r_wall_top != 0:
q_ref_top = self.d_ref / r_wall_top
if q_ref_top > 1:
p.a[1] = p.a[1] - (self.P_ref * (q_ref_top ** 4 - q_ref_top ** 2) / (r_wall_top * p.rho))
return None
def timestepping(self, tf):
"""
Timesteps the physical problem with a set dt
until user-specified time is reached.
Uses a Predictor-Corrector timestepping method and adds
artificial pressure to reduce tensile instability
in the problem
"""
dt = 0.1 * self.h / self.c0
v_ij_max = 0
a_max = 0
rho_max_condition = 0
assert (tf >= dt), "time to short to resolve problem, dt=%.4f" % dt
count = 1
while self.t_curr <= tf:
sys.stdout.write('\rTime: %.3f' % self.t_curr)
sys.stdout.flush()
"""PART 1 STEP 1"""
# find all the derivatives for each particle
for i, p_i in enumerate(self.particle_list):
# create list of neighbours for particle i
self.neighbour_iterate(p_i)
p_i.a = self.g
p_i.D = 0
if p_i.adj != []:
# calculate smoothing contribution from all neighbouring particles
dW_i = self.dW(p_i, p_i.adj, 1)
f = self.dW_artificial_pressure(p_i, p_i.adj, 1)
R = self.R_artificial_pressure(p_i, p_i.adj, 1)
# calculate acceleration and rate of change of density, find maximum relative velocity
# amongst all particles and their neighbours and the maximum acceleration amongst particles
for j, p_j in enumerate(p_i.adj.copy()):
r_vec = p_i.x - p_j.x
r_mod = np.sqrt(np.sum(r_vec ** 2))
e_ij = r_vec / r_mod
v_ij = p_i.v - p_j.v
p_i.a = p_i.a - (p_j.m * (p_i.P / p_i.rho ** 2 +
p_j.P / p_j.rho ** 2) * dW_i[j] * e_ij)
p_i.a = p_i.a + (self.mu * p_j.m * (1 / p_i.rho ** 2 +
1 / p_j.rho ** 2) * dW_i[j] * v_ij / r_mod)
p_i.a = p_i.a + R[j] * f[j]
p_i.D = p_i.D + p_j.m * dW_i[j] * (v_ij[0] * e_ij[0] + v_ij[1] * e_ij[1])
v_ij_max = np.amax((np.linalg.norm(v_ij), v_ij_max))
# add acceleration due to repulsive boundaries
self.LJ_boundary_force(p_i)
# Max values to calculate the time step
a_max = np.amax((np.linalg.norm(p_i.a), a_max))
rho_condition = np.sqrt((p_i.rho / self.rho0) ** (self.gamma - 1))
rho_max_condition = np.amax((rho_max_condition, rho_condition))
elif ((p_i.x < self.min_x).any() or (p_i.x > self.max_x).any()):
# remove leaked particles
warnings.warn("Particle %g has leaked" % (p_i.id))
self.particle_list.remove(p_i)
"""GET DT"""
# Updating the time step
if count > 1:
cfl_dt = self.h / v_ij_max
f_dt = np.sqrt(self.h / a_max)
a_dt = np.amin(self.h / (self.c0 * rho_max_condition))
dt = self.CFL * np.amin([cfl_dt, f_dt, a_dt])
"""PART 2 STEP 1"""
# updating each particles values
for i, p_i in enumerate(self.particle_list.copy()):
# if particle is not at the boundary
if not p_i.bound:
# update position -- needs to be updated before new velocity is computed
p_i.x_temp = p_i.x + (0.5 * dt * p_i.v)
# update velocity
p_i.v_temp = p_i.v + (0.5 * dt * p_i.a)
# for all particles: update density
p_i.rho_temp = p_i.rho + (0.5 * dt * p_i.D)
# update pressure
p_i.P = self.B * ((p_i.rho_temp / self.rho0) ** self.gamma - 1)
"""PART 1 STEP 2"""
# find all the derivatives for each particle
for i, p_i in enumerate(self.particle_list):
# create list of neighbours for particle i
self.neighbour_iterate(p_i)
p_i.a = self.g
p_i.D = 0
if p_i.adj != []:
# calculate smoothing contribution from all neighbouring particles
dW_i = self.dW(p_i, p_i.adj, 2)
f = self.dW_artificial_pressure(p_i, p_i.adj, 2)
R = self.R_artificial_pressure(p_i, p_i.adj, 2)
# calculate acceleration and rate of change of density, find maximum relative velocity
# amongst all particles and their neighbours and the maximum acceleration amongst particles
for j, p_j in enumerate(p_i.adj.copy()):
r_vec = p_i.x_temp - p_j.x_temp
r_mod = np.sqrt(np.sum(r_vec ** 2))
e_ij = r_vec / r_mod
v_ij = p_i.v - p_j.v
p_i.a = p_i.a - (p_j.m * (p_i.P / p_i.rho ** 2 +
p_j.P / p_j.rho ** 2) * dW_i[j] * e_ij)
p_i.a = p_i.a + (self.mu * p_j.m * (1 / p_i.rho ** 2 +
1 / p_j.rho ** 2) * dW_i[j] * v_ij / r_mod)
p_i.a = p_i.a + R[j] * f[j]
p_i.D = p_i.D + p_j.m * dW_i[j] * (v_ij[0] * e_ij[0] + v_ij[1] * e_ij[1])
# add acceleration due to repulsive boundaries
self.LJ_boundary_force(p_i)
elif ((p_i.x < self.min_x).any() or (p_i.x > self.max_x).any()):
# removed leaked particles
warnings.warn("Particle %g has leaked" % (p_i.id))
self.particle_list.remove(p_i)
"""PART 2 STEP 2"""
# updating each particles values
for i, p_i in enumerate(self.particle_list.copy()):
# if particle is not at the boundary
if not p_i.bound:
# update position -- needs to be updated before new velocity is computed
p_i.x = p_i.x_temp + (0.5 * dt * p_i.v_temp)
# update velocity
p_i.v = p_i.v_temp + (0.5 * dt * p_i.a)
# for all particles: update density
p_i.rho = p_i.rho_temp + (0.5 * dt * p_i.D)
"""PART 3"""
for i, p_i in enumerate(self.particle_list.copy()):
# if particle is not at the boundary
if not p_i.bound:
# update position -- needs to be updated before new velocity is computed
p_i.x = 2 * p_i.x - p_i.x_temp
# update velocity
p_i.v = 2 * p_i.v - p_i.v_temp
# for all particles: update density, smooths if count is a multiple of smoothing
p_i.rho = 2 * p_i.rho - p_i.rho_temp
if count % self.interval_smooth == 0:
p_j_list = p_i.adj[:]
p_j_list.append(p_i)
p_i.rho = self.rho_smoothing(p_i, p_j_list)
# update pressure
p_i.P = self.B * ((p_i.rho / self.rho0) ** self.gamma - 1)
# update particle indices
p_i.calc_index()
# re-allocate particles to grid
self.allocate_to_grid()
# append the state to file
if count % self.interval_save == 0:
self.save_state()
# update count and t
count += 1
self.t_curr += dt
# close file
self.file.close()
return None
def set_up_save(self, name=None, path='raw_data/'):
"""
Saves the initial setup of the system and creates the csv file to
store ongoing results as solution runs.
Files are stored with name in file path (defaults to raw_data folder
with name given by the time of the simulation).
"""
# pick a defualt name if none given
time = datetime.now().strftime('%Y-%m-%d-%Hhr-%Mm')
if name is None:
name = time
assert type(name) is str, 'Name must be a string'
assert os.path.isdir(path), path + ' directory does not exist'
assert self.file is None, "can't run twice as pickling an open file"
# save the config file
file = open(path + name + '_config.pkl', 'wb')
to_save = vars(self).copy()
[to_save.pop(key) for key in ('search_grid', 'particle_list')]
pi.dump(to_save, file, pi.HIGHEST_PROTOCOL)
file.close()
# set up the csv file
# replace any previous file with same name
self.file = open(path + name + '.csv', 'wb').close()
# open the new file in append mode
self.file = open(path + name + '.csv', 'a')
# header comments
self.file.write('# Created by <NAME> on ' + time + '\n')
# set add in the column titles
self.file.write("# [s], , [m], [m], [m/s], [m/s], [m/s^2], [m/s^2]," +
" [Pa], [Kg/(m^3)], [bool]\n")
self.file.write("Time,ID,R_x,R_y,V_x,V_y,a_x,a_y,Pressure," +
"Density,Boundary\n")
print('saving to ' + path + name + '.csv ...')
# save initial state
# self.save_state()
return None
def save_state(self):
"""
Append the current state of every particle in the system to the
end of the csv file.
"""
assert self.file is not None, 'set_up_save() has not been run'
for p in self.particle_list:
string = ''.join([str(v) + ','
for v in (self.t_curr, p.id, p.x[0], p.x[1],
p.v[0], p.v[1], p.a[0], p.a[1], p.P,
p.rho, p.bound)]) + '\n'
self.file.write(string)
class SPH_particle(object):
"""Particles class containing all the attributes for a single particle
Attributes
----------
id: int
particle characteristic ID
main_data: class object
object that refers to the domain where particle is located. Should be created
from SPH_main class
x: array-like of floats
[x,y] position of the particle on the domain at time t_curr (m)
v: array-like of floats
[V_x, V_y] velocities of the particle on the domain at a time t_curr (m/s)
a: array-like of floats
[a_x, a_y] acceleration of the particle on the domain at a time t_curr (m/s^2)
D: float
rate of change of density of particle at a time t_curr (kg/m^3s)
P: float
pressure of particle at a time t_curr (Pa)
bound: boolean
Whether or not particle is a boundary (fixed) particle. 1 if it's a boundary 0 if not
adj: list-like of objects
list of neighbouring influencing particles
"""
_ids = count(0)
def __init__(self, main_data=None, x=np.zeros(2)):
self.id = next(self._ids)
self.main_data = main_data
self.x = np.array(x)
self.x_temp = np.array(x)
self.v = np.zeros(2)
self.v_temp = np.zeros(2)
self.a = np.zeros(2)
self.D = 0
self.rho = 0.0
self.rho_temp = 0.0
self.P = 0.0
self.m = 0.0
self.bound = None
self.adj = []
def calc_index(self):
"""
Calculates the 2D integer index for the particle's
location in the search grid
"""
self.list_num = np.array((self.x - self.main_data.min_x) /
(2.0 * self.main_data.h), int)
def sph_simulation(x_min, x_max, t_final, dx, func, path_name='./', ani=True,
**kwargs):
"""
Simulates fluid flow from user-specified initial state and timeframe using
smoothed particle hydrodynamics method.
Parameters
----------
x_min : list-like
List with [x,y] coordinates of bottom left boundary, assumed rectangular
x_max : list-like
List with [x,y] coordinates of upper right boundary, assumed rectangular
t_final : float
Timeframe of simulation.
dx : float
Initial distance between particles in domain. Particles assumed to be
equally spaced on domain specified by func
func : function
A function that specifies the initial distribution of particles in the
domain with a boolean output.
path name : string
Path where files are to be saved.
ani : boolean
"True" if animation is to be displayed and "False" if otherwise.
Other Parameters
----------------
ani_step: int
frame skipping
ani_key: string
header for colorplot. Choose beteween: ID, Pressure, Density, V_x, and V_y
h_fac : float -- set attribute
bin half size constant (unitless).
mu : float -- set attribute
viscosity (Pa s) [Deafult value = 0.001]
rho0 : integer -- set attribute
initial particle density (kg/m^3). [Deafult value = 1000]
c0 : integer -- set attribute
fit-for-purpose speed of sound in water (m/s). [Deafult value = 20]
gamma : constant -- set attribute
stiffness value (unitless). [Deafult value = 7]
interval_smooth : integer -- set attribute
number of timesteps to which smooth rho (unitless). [Deafult value = 15]
interval_save : integer -- set attribute
number of timesteps at which the current states are saved (unitless).
[Deafult value = 15]
CFL : float -- set attribute
Scale factor for Courant–Friedrichs–Lewy condition (unitless). [Deafult value = 0.2]
g : 1D array -- set attribute
body force based 2D vector [gravity value (m/s^2)]. [Deafult value = [0, -9.81] ]
tf : float
Total real time to run simulation.
P_ref : float (only in the Forward Euler Module)
Boundary reference pressure to prevent leakages (Pa).
d_ref : float (only in the Forward Euler Module)
Reference distance for enforcing boundary pressure (m).
file_name : string
Name of file to be saved. If None saves with current time[Default = None]
"""
# validate kwargs
sim_args = ['h_fac', 'mu', 'rho0', 'c0', 'gamma', 'interval_smooth',
'interval_save', 'CFL', 'g']
other_args = ['file_name', 'ani_step', 'ani_key']
for key in kwargs:
if key not in sim_args + other_args:
raise KeyError('Unrecognised key word argument')
# set the system
system = SPH_main(x_min, x_max, dx=dx)
for key in kwargs:
if key in sim_args:
exec('system.' + key + '= kwargs[key]')
system.determine_values()
system.initialise_grid(func)
system.allocate_to_grid()
if "file_name" in kwargs:
system.set_up_save(name=kwargs['file_name'], path=path_name)
else:
system.set_up_save(path=path_name)
# solve the system
system.timestepping(tf=t_final)
# animate result
if ani:
if "ani_key" in kwargs:
ani = load_and_set(system.file.name, ani_key = kwargs['ani_key'])
else:
ani = load_and_set(system.file.name, 'Density')
if 'ani_step' in kwargs:
ani.animate(ani_step=kwargs['ani_step'])
else:
ani.animate()
plt.show()
return system
if __name__ == '__main__' and 1:
def f(x, y):
if 0 <= y <= 2 or (0 <= x <= 3 and 0 <= y <= 5):
return 1
else:
return 0
sph_simulation(x_min=[0, 0], x_max=[20, 10], t_final=0.5, dx=1, func=f,
path_name='./raw_data/', ani_step=1, ani_key="Pressure",
file_name="final_sim")
| 3.015625 | 3 |
pyPDP/sampler/acquisition_function.py | dwoiwode/py-pdp-partitioner | 1 | 12757957 | from abc import ABC, abstractmethod
from typing import Tuple, Union, Optional, Iterable
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import norm
from pyPDP.surrogate_models import SurrogateModel
from pyPDP.utils.plotting import get_ax, check_and_set_axis
from pyPDP.utils.utils import get_hyperparameters, get_selected_idx, ConfigSpaceHolder
class AcquisitionFunction(ConfigSpaceHolder, ABC):
def __init__(self,
config_space: CS.ConfigurationSpace,
surrogate_model: SurrogateModel,
samples_for_optimization: int = 100,
minimize_objective: bool = True,
seed=None):
super().__init__(config_space, seed=seed)
self.surrogate_model = surrogate_model
self.n_samples_for_optimization = samples_for_optimization
self.minimize_objective = minimize_objective
@abstractmethod
def __call__(self, configuration: CS.Configuration) -> Union[float, np.ndarray]:
pass
def update(self, eta: float):
pass
def get_optimum(self) -> CS.Configuration:
return self._get_optimum_uniform_distribution()[0]
def _get_optimum_uniform_distribution(self) -> Tuple[CS.Configuration, float]:
configs = self.config_space.sample_configuration(self.n_samples_for_optimization)
values = self(configs)
config_value_pairs = [(config, value) for config, value in zip(configs, values)]
return max(config_value_pairs, key=lambda x: x[1])
def convert_configs(self, configuration: Union[CS.Configuration, np.ndarray]):
if isinstance(configuration, CS.Configuration):
x = np.asarray(configuration.get_array())
x = x.reshape([1, -1])
elif isinstance(configuration, list):
x = []
for config in configuration:
if isinstance(config, CS.Configuration):
x.append(config.get_array())
else:
x.append(config.copy())
x = np.asarray(x)
else:
x = configuration.copy()
return x
def plot(self,
color_acquisition="darkgreen",
color_optimum="red",
show_optimum=True,
x_hyperparameters: Optional[Iterable[CSH.Hyperparameter]] = None,
ax: Optional[plt.Axes] = None):
ax = get_ax(ax)
x_hyperparameters = get_hyperparameters(x_hyperparameters, self.config_space)
check_and_set_axis(ax, x_hyperparameters, ylabel="Acquisition")
# Sample configs and get values of acquisition function
configs = self.config_space.sample_configuration(self.n_samples_for_optimization * len(x_hyperparameters))
acquisition_y = np.asarray([self(x) for x in configs]).reshape(-1)
x = np.asarray([[config[hp.name] for hp in x_hyperparameters] for config in configs])
# Get optimum
optimum = self.get_optimum()
# Plot
n_hyperparameters = len(tuple(x_hyperparameters))
if n_hyperparameters == 1: # 1D
# Sort by x axis
order = np.argsort(x, axis=0)[:, 0]
x = x[order, 0]
acquisition_y = acquisition_y[order]
ax.fill_between(x, acquisition_y, color=color_acquisition, alpha=0.3)
ax.plot(x, acquisition_y, color=color_acquisition, label=self.__class__.__name__)
if show_optimum:
ax.plot(list(optimum.values())[0], self(optimum), "*", color=color_optimum, label=f"Optimum ({optimum})",
markersize=15)
elif n_hyperparameters == 2: # 2D
idx = get_selected_idx(x_hyperparameters, self.config_space)
raise NotImplementedError("2D currently not implemented (#TODO)")
else:
raise NotImplementedError(f"Plotting for {n_hyperparameters} dimensions not implemented. "
"Please select a specific hp by setting `x_hyperparemeters`")
class ExpectedImprovement(AcquisitionFunction):
def __init__(
self,
config_space,
surrogate_model: SurrogateModel,
eps: float = 0.0, # Exploration parameter
samples_for_optimization=100,
minimize_objective=True,
seed=None
):
super().__init__(
config_space,
surrogate_model,
samples_for_optimization,
minimize_objective, seed=seed
)
if not minimize_objective:
raise NotImplementedError('EI for maximization')
self.eta = 0
self.exploration = eps
def __call__(self, configuration: Union[CS.Configuration, np.ndarray]) -> Union[float, np.ndarray]:
x = self.convert_configs(configuration)
mean, sigma = self.surrogate_model.predict(x)
Z = (self.eta - mean - self.exploration) / sigma
Phi_Z = norm.cdf(Z)
phi_Z = norm.pdf(Z)
ret = sigma * (Z * Phi_Z + phi_Z)
ret[sigma == 0] = 0
return ret
def update(self, eta: float):
self.eta = eta
class ProbabilityOfImprovement(AcquisitionFunction):
def __init__(self,
config_space: CS.ConfigurationSpace,
surrogate_model: SurrogateModel,
eps: float = 0.1, # Exploration parameter
samples_for_optimization: int = 100,
minimize_objective=True,
seed=None):
super().__init__(config_space, surrogate_model, samples_for_optimization=samples_for_optimization,
minimize_objective=minimize_objective, seed=seed)
self.eta = 0
self.exploration = eps
def __call__(self, configuration: Union[CS.Configuration, np.ndarray]) -> Union[float, np.ndarray]:
x = self.convert_configs(configuration)
mean, sigma = self.surrogate_model.predict(x)
if self.minimize_objective:
temp = (self.eta - mean - self.exploration) / sigma
else:
temp = (mean - self.eta - self.exploration) / sigma
prob_of_improvement = norm.cdf(temp)
prob_of_improvement[sigma == 0] = 0
return prob_of_improvement
def update(self, eta: float):
self.eta = eta
class LowerConfidenceBound(AcquisitionFunction):
"""LCB"""
def __init__(self,
config_space: CS.ConfigurationSpace,
surrogate_model: SurrogateModel,
tau: float = 5,
samples_for_optimization=100,
minimize_objective=True,
seed=None):
super().__init__(config_space, surrogate_model, samples_for_optimization, minimize_objective=minimize_objective,
seed=seed)
self.tau = tau
def __call__(self, configuration: Union[CS.Configuration, np.ndarray]) -> Union[float, np.ndarray]:
x = self.convert_configs(configuration)
mean, sigma = self.surrogate_model.predict(x)
if self.minimize_objective:
return - mean + self.tau * sigma
else:
return mean + self.tau * sigma
| 2.484375 | 2 |
cabot_ui/src/cabot_ui_manager.py | kufusha/cabot | 0 | 12757958 | #!/usr/bin/env python
# Copyright (c) 2020 Carnegie Mellon University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Cabot UI Manager
This class manages the state of the robot.
It serves low level event and maps to high level event which may change state of the robot.
Ideally, this class has plugin architecture to add new UI component but it is not the current goal.
So, all controls which needs to see the current state of the robot are managed by this code.
Low-level (cabot.event) should be mapped into ui-level (cabot_ui.event)
Author: <NAME><<EMAIL>>
"""
import traceback
import rospy
import std_msgs.msg
import std_srvs.srv
import cabot
from cabot import util
import cabot.button
from cabot.event import BaseEvent, ButtonEvent, ClickEvent, JoyButtonEvent, JoyClickEvent
from cabot_ui.event import MenuEvent, NavigationEvent, ExplorationEvent
from cabot_ui.menu import Menu
from cabot_ui.status import State, StatusManager
from cabot_ui.interface import UserInterface
from cabot_ui.navigation import Navigation
from cabot_ui.exploration import Exploration
class CabotUIManager(object):
def __init__(self):
self.main_menu = Menu.create_menu({"menu":"main_menu"}, name_space=rospy.get_name())
self.speed_menu = None
if self.main_menu:
self.main_menu.delegate = self
self.speed_menu = self.main_menu.get_menu_by_identifier("max_velocity_menu")
else:
rospy.logerr("menu is not initialized")
if self.speed_menu:
init_speed = self.speed_menu.value
try:
init_speed = float(rospy.get_param("~init_speed", self.speed_menu.value))
except ValueError:
pass
rospy.logdebug("Initial Speed = %.2f", init_speed)
self.speed_menu.set_value(init_speed)
self.menu_stack = []
self.in_navigation = False
self.destination = None
self.reset()
self._event_mapper = EventMapper()
self._event_mapper.delegate = self
self._status_manager = StatusManager.get_instance()
self._status_manager.delegate = self
self._interface = UserInterface()
self._interface.delegate = self
self._navigation = Navigation()
self._navigation.delegate = self
self._exploration = Exploration()
self._exploration.delegate = self
self._retry_count = 0
rospy.Subscriber("/cabot/event", std_msgs.msg.String,
self._event_callback, None)
self._eventPub = rospy.Publisher("/cabot/event", std_msgs.msg.String, queue_size=1)
rospy.wait_for_service("set_touch_speed_active_mode")
self._touchModeProxy = rospy.ServiceProxy("set_touch_speed_active_mode", std_srvs.srv.SetBool)
rospy.wait_for_service("/cabot/user_speed_enabled")
self._userSpeedEnabledProxy = rospy.ServiceProxy("/cabot/user_speed_enabled", std_srvs.srv.SetBool)
### navigation delegate
def i_am_ready(self):
self._interface.i_am_ready()
def start_navigation(self, pose):
self._interface.start_navigation(pose)
def notify_turn(self, turn=None, pose=None):
self._interface.notify_turn(turn=turn, pose=pose)
def notify_human(self, angle=0, pose=None):
self._interface.notify_human(angle=angle, pose=pose)
def goal_canceled(self, goal):
# unexpected cancel, may need to retry
if self._status_manager.state == State.in_action:
rospy.loginfo("NavigationState: canceled (system)")
self._status_manager.set_state(State.in_pausing)
self._retry_navigation()
return
rospy.loginfo("NavigationState: canceled (user)")
@util.setInterval(2, times=1)
def _retry_navigation(self):
self._retry_count += 1
rospy.loginfo("NavigationState: retrying (system)")
self._navigation.retry_navigation()
self._status_manager.set_state(State.in_action)
rospy.loginfo("NavigationState: retried (system)")
def have_arrived(self, goal):
#rospy.loginfo("delegate have_arrived called")
#self._interface.have_arrived(goal)
rospy.loginfo("NavigationState: arrived")
# notify external nodes about arrival
e = NavigationEvent("arrived", None)
msg = std_msgs.msg.String()
msg.data = str(e)
self._eventPub.publish(msg)
def approaching_to_poi(self, poi=None, pose=None):
self._interface.approaching_to_poi(poi=poi, pose=pose)
def approached_to_poi(self, poi=None, pose=None):
self._interface.approached_to_poi(poi=poi, pose=pose)
def passed_poi(self, poi=None, pose=None):
self._interface.passed_poi(poi=poi, pose=pose)
# def request_action(self, goal=None, pose=None):
# self._interface.request_action(goal=goal, pose=pose)
#
# def completed_action(self, goal=None, pose=None):
# self._interface.completed_action(goal=goal, pose=pose)
#
def could_not_get_current_locaion(self):
self._interface.could_not_get_current_locaion()
def enter_goal(self, goal):
self._interface.enter_goal(goal)
def exit_goal(self, goal):
self._interface.exit_goal(goal)
def announce_social(self, message):
self._interface.announce_social(message)
def please_call_elevator(self, pos):
self._interface.please_call_elevator(pos)
def elevator_opening(self, pose):
self._interface.elevator_opening(pose)
def floor_changed(self, floor):
self._interface.floor_changed(floor)
def queue_start_arrived(self):
self._interface.queue_start_arrived()
def queue_proceed(self, pose=None):
self._interface.queue_proceed(pose=pose)
def queue_target_arrived(self):
self._interface.queue_target_arrived()
def please_pass_door(self):
self._interface.please_pass_door()
def door_passed(self):
self._interface.door_passed()
###
def _event_callback(self, msg):
event = BaseEvent.parse(msg.data)
if event is None:
rospy.logerr("cabot event %s cannot be parsed", msg.data)
return
self.process_event(event)
def reset(self):
"""reset menu"""
if self.main_menu:
self.main_menu.reset()
self.menu_stack = [self.main_menu]
# menu delegate method
def menu_selected(self, menu):
rospy.logdebug("menu_selected, %s, %s"%(menu.identifier, menu.type))
if menu.identifier == "destination_menu":
event = NavigationEvent("destination", menu.value.value)
self.process_event(event)
if menu.identifier == "main_menu" and menu.value is not None:
rospy.loginfo(menu.value)
rospy.loginfo(menu.value.identifier)
if menu.value.identifier == "exploration_menu":
event = ExplorationEvent("start")
self.process_event(event)
# event delegate method
def process_event(self, event):
'''
all events go through this method
'''
#rospy.loginfo("process_event %s", str(event))
self._event_mapper.push(event)
self._process_menu_event(event)
self._process_navigation_event(event)
self._process_exploration_event(event)
def _process_menu_event(self, event):
'''
process only menu event
'''
if event.type != MenuEvent.TYPE:
return
curr_menu = self.menu_stack[-1]
if event.subtype == "next":
curr_menu.next()
self._interface.menu_changed(menu=curr_menu)
elif event.subtype == "prev":
curr_menu.prev()
self._interface.menu_changed(menu=curr_menu)
elif event.subtype == "select":
selected = curr_menu.select()
if selected is None: ## from main menu
if curr_menu.value is None:
curr_menu.next()
selected = curr_menu
elif not selected.can_explore:
self.reset()
elif selected is not curr_menu:
self.menu_stack.append(selected)
if selected.value is None:
selected.next()
self._interface.menu_changed(menu=selected, usage=True)
elif event.subtype == "back":
if len(self.menu_stack) > 1:
self.menu_stack.pop()
curr_menu = self.menu_stack[-1]
self._interface.menu_changed(menu=curr_menu, backed=True)
self.speed = 0
#self.cancel_pub.publish(True)
self._navigation.pause_navigation()
def _process_navigation_event(self, event):
if event.type != NavigationEvent.TYPE:
return
if event.subtype == "speedup":
self.speed_menu.prev()
self._interface.menu_changed(menu=self.speed_menu)
e = NavigationEvent("sound", "SpeedUp")
msg = std_msgs.msg.String()
msg.data = str(e)
self._eventPub.publish(msg)
if event.subtype == "speeddown":
self.speed_menu.next()
self._interface.menu_changed(menu=self.speed_menu)
e = NavigationEvent("sound", "SpeedDown")
msg = std_msgs.msg.String()
msg.data = str(e)
self._eventPub.publish(msg)
if event.subtype == "destination":
rospy.loginfo("Destination: "+event.param)
self._retry_count = 0
self._navigation.set_destination(event.param)
self.destination = event.param
## change handle mode
try:
self._touchModeProxy(True)
except rospy.ServiceException as e:
rospy.loginfo("Could not set touch mode to True")
try:
self._userSpeedEnabledProxy(True)
except rospy.ServiceException as e:
rospy.loginfo("Could not set user speed enabled to True")
## change state
# change to waiting_action by using actionlib
self._status_manager.set_state(State.in_action)
if event.subtype == "summons":
rospy.loginfo("Summons Destination: "+event.param)
self._navigation.set_destination(event.param)
self.destination = event.param
## change handle mode
try:
self._touchModeProxy(False)
except rospy.ServiceException as e:
rospy.loginfo("Could not set touch mode to False")
try:
self._userSpeedEnabledProxy(False)
except rospy.ServiceException as e:
rospy.loginfo("Could not set user speed enabled to False")
## change state
# change to waiting_action by using actionlib
self._status_manager.set_state(State.in_summons)
if event.subtype == "event":
self._navigation.process_event(event)
if event.subtype == "cancel":
rospy.loginfo("NavigationState: User Cancel requested")
if self._status_manager.state == State.in_action or \
self._status_manager.state == State.in_summons:
rospy.loginfo("NavigationState: canceling (user)")
self._interface.cancel_navigation()
self._navigation.cancel_navigation()
self.in_navigation = False
self.destination = None
self._status_manager.set_state(State.idle)
rospy.loginfo("NavigationState: canceled (user)")
else:
rospy.loginfo("NavigationState: state is not in action state={}".format(self._status_manager.state))
if event.subtype == "pause":
rospy.loginfo("NavigationState: User Pause requested")
if self._status_manager.state == State.in_action or \
self._status_manager.state == State.in_summons:
rospy.loginfo("NavigationState: pausing (user)")
self._status_manager.set_state(State.in_pausing)
self._interface.pause_navigation()
self._navigation.pause_navigation()
self._status_manager.set_state(State.in_pause)
rospy.loginfo("NavigationState: paused (user)")
else:
# force to pause state
rospy.loginfo("NavigationState: state is not in action state={}".format(self._status_manager.state))
#self._status_manager.set_state(State.in_pausing)
#self._navigation.pause_navigation()
#self._status_manager.set_state(State.in_pause)
if event.subtype == "resume":
if self.destination is not None:
rospy.loginfo("NavigationState: User Resume requested")
if self._status_manager.state == State.in_pause:
rospy.loginfo("NavigationState: resuming (user)")
self._interface.resume_navigation()
self._navigation.resume_navigation()
self._status_manager.set_state(State.in_action)
rospy.loginfo("NavigationState: resumed (user)")
else:
rospy.loginfo("NavigationState: state is not in pause state")
else:
rospy.loginfo("NavigationState: Next")
e = NavigationEvent("next", None)
msg = std_msgs.msg.String()
msg.data = str(e)
self._eventPub.publish(msg)
if event.subtype == "arrived":
self.destination = None
def _process_exploration_event(self, event):
if event.type != ExplorationEvent.TYPE:
return
if event.subtype == "start":
self._interface.start_exploration()
self._exploration.start_exploration()
class EventMapper(object):
def __init__(self):
self._manager = StatusManager.get_instance()
def push(self, event):
state = self._manager.state
if event.type != ButtonEvent.TYPE and event.type != ClickEvent.TYPE:
return
mevent = None
# simplify the control
mevent = self.map_button_to_navigation(event)
'''
if state == State.idle:
mevent = self.map_button_to_menu(event)
elif state == State.in_action or state == State.waiting_action:
mevent = self.map_button_to_navigation(event)
elif state == State.in_pause or state == State.waiting_pause:
mevent = self.map_button_to_navigation(event)
'''
if mevent:
self.delegate.process_event(mevent)
def map_button_to_menu(self, event):
if event.type == "click" and event.count == 1:
if event.buttons == cabot.button.BUTTON_NEXT:
return MenuEvent(subtype="next")
if event.buttons == cabot.button.BUTTON_PREV:
return MenuEvent(subtype="prev")
if event.buttons == cabot.button.BUTTON_SELECT:
return MenuEvent(subtype="select")
elif event.type == "click" and event.count == 2:
if event.buttons == cabot.button.BUTTON_SELECT:
return MenuEvent(subtype="back")
return None
def map_button_to_navigation(self, event):
if event.type == "button" and event.down:
if event.button == cabot.button.BUTTON_UP:
return NavigationEvent(subtype="speedup")
if event.button == cabot.button.BUTTON_DOWN:
return NavigationEvent(subtype="speeddown")
if event.button == cabot.button.BUTTON_LEFT:
return NavigationEvent(subtype="pause")
if event.button == cabot.button.BUTTON_RIGHT:
return NavigationEvent(subtype="resume")
'''
if event.button == cabot.button.BUTTON_SELECT:
return NavigationEvent(subtype="pause")
if event.type == "click":
if event.buttons == cabot.button.BUTTON_SELECT and event.count == 2:
return NavigationEvent(subtype="cancel")
if event.buttons == cabot.button.BUTTON_NEXT and event.count == 2:
return NavigationEvent(subtype="resume")
'''
return None
if __name__ == "__main__":
rospy.init_node("cabot_ui_manager", log_level=rospy.DEBUG)
try:
CabotUIManager()
except:
rospy.logerr(traceback.format_exc())
rospy.spin()
| 1.648438 | 2 |
jobya/jobs/resources.py | xblzbjs/Jobya | 0 | 12757959 | <reponame>xblzbjs/Jobya
from django.utils.translation import gettext_lazy as _
from import_export import fields, resources
from import_export.widgets import ForeignKeyWidget
from jobya.companies.models import Company
from jobya.core.helper import VerboseExportMixin
from jobya.jobs.models import Category, Job
class CategoryResource(resources.ModelResource):
parent = fields.Field(
column_name="parent",
attribute="parent",
widget=ForeignKeyWidget(Category, "name"),
)
class Meta:
model = Category
skip_unchanged = True
report_skipped = True
exclude = ("id",)
import_id_fields = ("name",)
fields = ("parent", "name", "lft", "rght", "tree_id", "level")
class JobExportResource(VerboseExportMixin, resources.ModelResource):
category = fields.Field(
attribute="category",
widget=ForeignKeyWidget(Category, field="name"),
column_name=_("Category"),
)
company = fields.Field(
attribute="company",
widget=ForeignKeyWidget(Company, field="name"),
column_name=_("Company"),
)
type = fields.Field(attribute="get_type_display", column_name=_("Type"))
salary_min = fields.Field(column_name=_("Salary min"))
salary_max = fields.Field(column_name=_("Salary max"))
class Meta:
model = Job
fields = (
"uuid",
"title",
"category",
"company",
"type",
"description",
"salary_min",
"salary_max",
"redirect_url",
"support_remote",
)
export_order = fields
exclude = ("id", "creator", "status")
def dehydrate_salary_min(self, job) -> int:
return job.salary.lower
def dehydrate_salary_max(self, job) -> int:
return job.salary.upper
| 2.09375 | 2 |
setup.py | pwj58/cuppa | 25 | 12757960 | <gh_stars>10-100
# Copyright <NAME> 2014-2020
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#-------------------------------------------------------------------------------
# setup.py
#-------------------------------------------------------------------------------
from setuptools import setup
import os
import cuppa.version
with open( 'README.rst' ) as readme_file:
long_description = readme_file.read()
setup(
name = 'cuppa',
version = cuppa.utility.version.get_version(),
description = 'Cuppa, an extension package to simplify and extend Scons',
author = 'ja11sop',
url = 'https://github.com/ja11sop/cuppa',
license = 'Boost Software License 1.0 - http://www.boost.org/LICENSE_1_0.txt',
long_description = long_description,
packages = [
'cuppa',
'cuppa.core',
'cuppa.cpp',
'cuppa.cpp.templates',
'cuppa.dependencies',
'cuppa.dependencies.boost',
'cuppa.method_helpers',
'cuppa.methods',
'cuppa.modules',
'cuppa.platforms',
'cuppa.profiles',
'cuppa.project_generators',
'cuppa.scms',
'cuppa.test_report',
'cuppa.toolchains',
'cuppa.variants',
'cuppa.utility',
],
package_data = {
'cuppa': [
'VERSION',
os.path.join( 'dependencies','boost','boost_bug_fix_1.73.0.diff' ),
os.path.join( 'dependencies','boost','boost_test_patch_1.58.0.diff' ),
os.path.join( 'dependencies','boost','boost_test_patch_1.67.0.diff' ),
os.path.join( 'dependencies','boost','boost_test_patch_1.68.0.diff' ),
os.path.join( 'dependencies','boost','boost_test_patch_1.71.0.diff' ),
os.path.join( 'dependencies','boost','boost_test_patch_1.72.0.diff' ),
os.path.join( 'cpp','templates','coverage_index.html' ),
os.path.join( 'test_report','templates','test_report_index.html' ),
os.path.join( 'test_report','templates','test_suite_index.html' ),
]
},
install_requires = [
'colorama',
'gcovr',
'grip',
'jinja2',
'lxml',
'psutil',
'six',
],
entry_points = {
'console_scripts': [
'cuppa = cuppa.__main__:main'
],
'cuppa.method.plugins' : [
'cuppa.test_report.generate_bitten_report = cuppa.test_report.generate_bitten_report:GenerateBittenReportMethod',
'cuppa.test_report.html_report.generate_html_report = cuppa.test_report.html_report:GenerateHtmlReportMethod',
'cuppa.test_report.html_report.collate_test_report = cuppa.test_report.html_report:CollateTestReportIndexMethod',
]
},
classifiers = [
"Topic :: Software Development :: Build Tools",
"Intended Audience :: Developers",
"Development Status :: 4 - Beta",
"License :: OSI Approved",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
],
keywords = [
'scons',
'build',
'c++',
]
)
| 1.546875 | 2 |
app.py | alimurtadho/ML-web-apps-diabetes-prediction | 0 | 12757961 | <gh_stars>0
import pandas as pd # pandas is a dataframe library
import matplotlib.pyplot as plt # matplotlib.pyplot plot data
import numpy as np # numpy provides N-dim object support
df = pd.read_csv("diabetes.csv") # load Pima data. Adjust path as necessary
# print(df.shape) => (768,9)
# print(df.isnull().values.any()) # Checking for null values => False
# print(df.head()) #Prints first five records
from sklearn.model_selection import train_test_split
feature_col_names = ['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']
predicted_class_names = ['Outcome']
X = df[feature_col_names].values # predictor feature columns (8 X m)
y = df[predicted_class_names].values # predicted class (1=true, 0=false) column (1 X m)
split_test_size = 0.30
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=split_test_size, random_state=42)
from sklearn.preprocessing import Imputer
#Impute with mean all 0 readings
fill_0 = Imputer(missing_values=0, strategy="mean", axis=0)
X_train = fill_0.fit_transform(X_train)
X_test = fill_0.fit_transform(X_test)
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
rf_model = RandomForestClassifier(random_state = 42) # Create random forest object
rf_model.fit(X_train, y_train.ravel())
rf_predict_train = rf_model.predict(X_train)
# training metrics
#print("Accuracy: {0:.4f}".format(metrics.accuracy_score(y_train, rf_predict_train)))
rf_predict_test = rf_model.predict(X_test)
# training metrics
print("Accuracy: {0:.4f}".format(metrics.accuracy_score(y_test, rf_predict_test)))
def RFPredict(Pregnancies, Glucose, BloodPressure, SkinThickness, Insulin, BMI, DiabetesPedigreeFunction, Age):
X_new = np.array([Pregnancies, Glucose, BloodPressure, SkinThickness, Insulin, BMI, DiabetesPedigreeFunction, Age])
X_new = X_new.reshape(1,8)
rf_predict = rf_model.predict(X_new)
# training metrics
rf_predict = int(rf_predict)
print(rf_predict)
return rf_predict;
# @app.route('/predict_api',methods=['POST'])
# def predict_api():
# '''
# For direct API calls trought request
# '''
# data = request.get_json(force=True)
# prediction = model.predict([np.array(list(data.values()))])
# output = prediction[0]
# return jsonify(output)
# if __name__ == "__main__":
# app.run(debug=True)
#RFPredict(6, 148, 72, 35, 0, 33.6, 0.627, 50)
| 3.078125 | 3 |
python/5.concurrent/Thread/6.executor/1.tasks.py | dunitian/BaseCode | 25 | 12757962 | import time
import urllib.request
import concurrent.futures
url_list = [
"https://www.baidu.com", "https://www.qq.com", "https://www.sogou.com",
"https://www.cnblogs.com"
]
def get_html(url, timeout=10):
with urllib.request.urlopen(url, timeout=timeout) as conn:
return conn.read()
def main():
start_time = time.time()
with concurrent.futures.ThreadPoolExecutor() as executor:
# 用字典可以通过返回的 future 拿到 url
tasks = [executor.submit(get_html, url) for url in url_list]
# 遍历完成的 future 对象
for task in concurrent.futures.as_completed(tasks):
try:
result = task.result()
except Exception as ex:
print(ex)
else:
print(len(result))
print(time.time() - start_time)
if __name__ == "__main__":
main()
| 3.21875 | 3 |
examples/rofi-alt-tab/dbus_window_daemon.py | thecjharries/py-rofi-bus | 0 | 12757963 | <gh_stars>0
# pylint:disable=W,C,R
from xpybutil.ewmh import (
get_client_list,
get_wm_desktop,
request_active_window,
request_current_desktop,
)
from py_rofi_bus.dbus import Daemon
class DbusWindowDaemon(Daemon):
INTERFACE_NAME = "pro.wizardsoftheweb.pyrofibus.daemon.window_properties"
dbus = """
<node>
<interface name='{}'>
<method name='is_running'>
<arg type='b' name='response' direction='out'/>
</method>
<method name='start'>
</method>
<method name='stop'>
</method>
<method name='update_active_window'>
<arg type='i' name='a' direction='in'/>
</method>
<method name='activate_window'>
<arg type='i' name='a' direction='in'/>
</method>
<method name='get_window_list'>
<arg type='ai' name='response' direction='out'/>
</method>
</interface>
</node>
""".format(INTERFACE_NAME)
def __init__(self, *args, **kwargs):
Daemon.__init__(self, *args, **kwargs)
# super(DbusWindowDaemon, self).__init__(*args, **kwargs)
self.window_ids = self.get_all_window_ids()
@staticmethod
def get_all_window_ids():
return get_client_list().reply()
def update_active_window(self, active_window_id):
global_window_ids = self.get_all_window_ids()
if active_window_id in self.window_ids:
self.window_ids.remove(active_window_id)
self.window_ids.insert(0, active_window_id)
current_window_ids = self.window_ids[:]
for window_id in global_window_ids:
if window_id in self.window_ids:
current_window_ids.remove(window_id)
else:
self.window_ids.append(window_id)
for window_id in current_window_ids:
self.window_ids.remove(window_id)
@staticmethod
def activate_window(window_id):
request_current_desktop(get_wm_desktop(window_id).reply())
request_active_window(window_id)
def get_window_list(self):
return self.window_ids
if '__main__' == __name__:
DbusWindowDaemon.bootstrap()
| 1.984375 | 2 |
quex/output/analyzer/configuration.py | smmckay/quex-mirror | 0 | 12757964 | <filename>quex/output/analyzer/configuration.py
from quex.engine.misc.string_handling import blue_print
# import quex.output.analyzer.adapt as adapt
import quex.blackboard as blackboard
from quex.blackboard import setup as Setup, Lng
import quex.token_db as token_db
from quex.DEFINITIONS import QUEX_VERSION
import time
from itertools import chain
def do(ModeDb):
assert ModeDb
txt = Lng.open_template(Lng.analyzer_configuration_file())
lexatom_loader_seek_buffer_size = 512
indentation_stack_size = 64
buffer_size = 131072
buffer_size_min = 32768
converter_buffer_size = 65536
mode_stack_size = 64
if buffer_size_min >= 1024: fallback_n = 256
elif buffer_size_min >= 16: fallback_n = buffer_size >> 4
else: fallback_n = 0
if Setup.fallback_optional_f:
fallback_n = 0
else:
pre_context_lengths = [m.longest_pre_context() for m in ModeDb.itervalues()]
if None in pre_context_lengths or not pre_context_lengths:
# Assert: any specification has taken care of constraint:
assert not Setup.fallback_mandatory_f
fallback_n = 0
else:
fallback_n = "%s" % max(pre_context_lengths)
adaptable_list = [
("BUFFER_SIZE", "%s" % buffer_size),
("BUFFER_FALLBACK_N", "%s" % fallback_n),
("BUFFER_SIZE_MIN", "QUEX_SETTING_BUFFER_SIZE"),
("INDENTATION_STACK_SIZE", "%s" % indentation_stack_size),
("BUFFER_LEXATOM_LOADER_CONVERTER_BUFFER_SIZE", "(size_t)%s" % converter_buffer_size),
("BUFFER_LEXATOM_LOADER_SEEK_BUFFER_SIZE", lexatom_loader_seek_buffer_size),
("MODE_STACK_SIZE", "(size_t)%s" % mode_stack_size),
("TOKEN_QUEUE_SIZE", "(size_t)%s" % repr(Setup.token_queue_size)),
]
immutable_list = [
("VERSION", '"%s"' % QUEX_VERSION),
("ANALYZER_VERSION", '"%s"' % Setup.user_application_version_id),
("BUILD_DATE", '"%s"' % time.asctime()),
("MODE_INITIAL_P", '&%s' % Lng.NAME_IN_NAMESPACE_MAIN(blackboard.initial_mode.get_pure_text())),
("BUFFER_LEXATOM_BUFFER_BORDER", "0x%X" % Setup.buffer_limit_code),
("BUFFER_LEXATOM_NEWLINE", _lexatom_newline_in_engine_encoding()),
("BUFFER_LEXATOM_PATH_TERMINATION", "0x%X" % Setup.path_limit_code),
("BUFFER_FALLBACK_N", "%s" % fallback_n),
("FALLBACK_MANDATORY", {True: Lng.TRUE, False: Lng.FALSE}[Setup.fallback_mandatory_f])
]
adaptable_txt = [ Lng.QUEX_SETTING_DEF(name, value) for name, value in adaptable_list ]
immutable_txt = [ Lng.QUEX_SETTING_DEF(name, value) for name, value in immutable_list ]
setting_list = [ name for name, dummy in chain(adaptable_list, immutable_list) ]
txt = blue_print(txt, [
["$$ADAPTABLE$$", "\n".join(adaptable_txt)],
["$$IMMUTABLE$$", "\n".join(immutable_txt)],
["$$TYPE_DEFINITIONS$$", _type_definitions()],
["$$HELP_IF_CONFIGURATION_BY_CMAKE$$", Lng.HELP_IF_CONFIGURATION_BY_CMAKE(adaptable_list + immutable_list)],
["$$ERROR_IF_NO_CONFIGURATION_BY_MACRO$$", Lng.ERROR_IF_DEFINED_AND_NOT_CONFIGURATION_BY_MACRO(setting_list)],
])
return txt # adapt.do(txt, Setup.output_directory)
def _type_definitions():
token_descr = token_db.token_type_definition
if Setup.computed_gotos_f: type_goto_label = "void*"
else: type_goto_label = "int32_t"
type_def_list = [
("lexatom_t", Setup.lexatom.type),
("token_id_t", token_descr.token_id_type),
("token_line_n_t", token_descr.line_number_type.get_pure_text()),
("token_column_n_t", token_descr.column_number_type.get_pure_text()),
("acceptance_id_t", "int"),
("indentation_t", "int"),
("stream_position_t", "intmax_t"),
("goto_label_t", type_goto_label)
]
excluded = ""
if not blackboard.required_support_indentation_count():
excluded = "indentation_t"
def_str = "\n".join(Lng.QUEX_TYPE_DEF(original, customized_name)
for customized_name, original in type_def_list
if customized_name != excluded) \
+ "\n"
return Lng.FRAME_IN_NAMESPACE_MAIN(def_str)
def _lexatom_newline_in_engine_encoding():
if not blackboard.required_support_begin_of_line():
return ord("\n") # Anyway, the value is unused
lexatom_sequence = Setup.buffer_encoding.do_single(ord('\n'))
# Any misfit must be caught at pattern definition time.
if not len(lexatom_sequence) == 1:
return ord("\n")
return lexatom_sequence[0]
| 2.125 | 2 |
data_preprocess.py | smlin2000/OmniAnomaly | 344 | 12757965 | <gh_stars>100-1000
import ast
import csv
import os
import sys
from pickle import dump
import numpy as np
from tfsnippet.utils import makedirs
output_folder = 'processed'
makedirs(output_folder, exist_ok=True)
def load_and_save(category, filename, dataset, dataset_folder):
temp = np.genfromtxt(os.path.join(dataset_folder, category, filename),
dtype=np.float32,
delimiter=',')
print(dataset, category, filename, temp.shape)
with open(os.path.join(output_folder, dataset + "_" + category + ".pkl"), "wb") as file:
dump(temp, file)
def load_data(dataset):
if dataset == 'SMD':
dataset_folder = 'ServerMachineDataset'
file_list = os.listdir(os.path.join(dataset_folder, "train"))
for filename in file_list:
if filename.endswith('.txt'):
load_and_save('train', filename, filename.strip('.txt'), dataset_folder)
load_and_save('test', filename, filename.strip('.txt'), dataset_folder)
load_and_save('test_label', filename, filename.strip('.txt'), dataset_folder)
elif dataset == 'SMAP' or dataset == 'MSL':
dataset_folder = 'data'
with open(os.path.join(dataset_folder, 'labeled_anomalies.csv'), 'r') as file:
csv_reader = csv.reader(file, delimiter=',')
res = [row for row in csv_reader][1:]
res = sorted(res, key=lambda k: k[0])
label_folder = os.path.join(dataset_folder, 'test_label')
makedirs(label_folder, exist_ok=True)
data_info = [row for row in res if row[1] == dataset and row[0] != 'P-2']
labels = []
for row in data_info:
anomalies = ast.literal_eval(row[2])
length = int(row[-1])
label = np.zeros([length], dtype=np.bool)
for anomaly in anomalies:
label[anomaly[0]:anomaly[1] + 1] = True
labels.extend(label)
labels = np.asarray(labels)
print(dataset, 'test_label', labels.shape)
with open(os.path.join(output_folder, dataset + "_" + 'test_label' + ".pkl"), "wb") as file:
dump(labels, file)
def concatenate_and_save(category):
data = []
for row in data_info:
filename = row[0]
temp = np.load(os.path.join(dataset_folder, category, filename + '.npy'))
data.extend(temp)
data = np.asarray(data)
print(dataset, category, data.shape)
with open(os.path.join(output_folder, dataset + "_" + category + ".pkl"), "wb") as file:
dump(data, file)
for c in ['train', 'test']:
concatenate_and_save(c)
if __name__ == '__main__':
datasets = ['SMD', 'SMAP', 'MSL']
commands = sys.argv[1:]
load = []
if len(commands) > 0:
for d in commands:
if d in datasets:
load_data(d)
else:
print("""
Usage: python data_preprocess.py <datasets>
where <datasets> should be one of ['SMD', 'SMAP', 'MSL']
""")
| 2.4375 | 2 |
test/test_recipes.py | jamesbraid/pygrocydm | 3 | 12757966 | <reponame>jamesbraid/pygrocydm
import json
from test.test_const import CONST_BASE_URL, CONST_PORT, CONST_SSL
from unittest import TestCase
from requests.exceptions import HTTPError
from pygrocydm import GrocyAPI
from pygrocydm.recipes import RECIPES_ENDPOINT, Recipe
from pygrocydm.grocy_api_client import GrocyApiClient
class TestRecipe(TestCase):
def setUp(self):
self.grocy_api = GrocyAPI(CONST_BASE_URL, "demo_mode", verify_ssl = CONST_SSL, port = CONST_PORT)
self.api_client = GrocyApiClient(CONST_BASE_URL, "demo_mode", verify_ssl=CONST_SSL, port=CONST_PORT)
self.endpoint = f"{RECIPES_ENDPOINT}/1/fulfillment"
def test_recipe_data_diff_valid(self):
recipe = self.api_client.do_request("GET", self.endpoint)
recipe_keys = recipe.keys()
moked_recipe_json = """{
"id": "1",
"recipe_id": "1",
"need_fulfilled": "0",
"need_fulfilled_with_shopping_list": "0",
"missing_products_count": "4",
"costs": "24.25",
"calories": "492.0"
}"""
moked_keys = json.loads(moked_recipe_json).keys()
self.assertCountEqual(list(recipe_keys), list(moked_keys))
def test_parse_json(self):
recipe = Recipe(self.api_client, self.api_client.do_request("GET", self.endpoint))
assert isinstance(recipe.id, int)
assert isinstance(recipe.recipe_id, int)
assert isinstance(recipe.need_fulfilled, bool)
assert isinstance(recipe.need_fulfilled_with_shopping_list, bool)
assert isinstance(recipe.missing_products_count, int)
assert isinstance(recipe.costs, float)
assert isinstance(recipe.calories, float)
def test_add_product(self):
recipes = self.grocy_api.recipes().fullfilment_list
for recipe in recipes:
if recipe.recipe_id == 2:
recipe.add_not_fulfilled_products_to_shoppinglist()
break
def test_add_product_exclude(self):
recipes = self.grocy_api.recipes().fullfilment_list
for recipe in recipes:
if recipe.recipe_id == 2:
recipe.add_not_fulfilled_products_to_shoppinglist([17])
break
def test_consume_valid(self):
recipes = self.grocy_api.recipes().fullfilment_list
for recipe in recipes:
if recipe.recipe_id == 3:
recipe.consume()
break
def test_consume_error(self):
recipes = self.grocy_api.recipes().fullfilment_list
for recipe in recipes:
if recipe.recipe_id == 0:
self.assertRaises(HTTPError, recipe.consume)
| 2.59375 | 3 |
docs/source/conf.py | prusse-martin/alfasim-sdk | 0 | 12757967 | # -*- coding: utf-8 -*-
import os
import subprocess
from pathlib import Path
# -- Breathe Configs for ReadTheDocs ------------------------------------------
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if on_rtd:
subprocess.run("cd ..; doxygen alfasim_sdk_api.cfg", shell=True)
breathe_projects = {"alfasim_sdk_api": "../alfasim_sdk_api/xml"}
else:
breathe_projects = {
"alfasim_sdk_api": "../_build/breathe/doxygen/alfasim_sdk_api/xml"
}
# -- Breathe Configs -------------------------------------------------------
breathe_default_project = "alfasim_sdk_api"
alfasim_sdk_api_project_folder = (
Path(os.getcwd()).parents[1] / "src/alfasim_sdk/alfasim_sdk_api"
)
breathe_projects_source = {
"alfasim_sdk_api": (
alfasim_sdk_api_project_folder,
["common.h", "api.h", "detail/bootstrap_win.h"],
)
}
breathe_doxygen_config_options = {
"MACRO_EXPANSION": "YES",
"EXPAND_ONLY_PREDEF": "YES",
"PREDEFINED": "DLL_EXPORT= DOXYGEN_SHOULD_SKIP_THIS",
}
# -- Project information -----------------------------------------------------
project = ""
copyright = "2019, ESSS"
author = "ESSS"
version = ""
release = ""
# -- Options for Graphviz -------------------------------------------------
graphviz_dot = "dot"
graphviz_dot_args = ["-Tsvg"]
graphviz_output_format = "svg"
# -- General configuration ---------------------------------------------------
extensions = [
"breathe",
"sphinx.ext.autodoc",
"sphinx_click.ext",
"sphinx.ext.graphviz",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx_inline_tabs",
"sphinx_copybutton",
"sphinx_togglebutton",
]
add_module_names = False
autodoc_typehints = "none"
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
rst_prolog = """
.. |alfasim| replace:: :program:`ALFAsim`
.. |sdk| replace:: :program:`ALFAsim-SDK`
.. |template-command| replace:: :ref:`Template Command <alfasim_sdk_cli_template_section>`
.. |gui_hook| replace:: :py:func:`alfasim_get_data_model_type`
.. |container| replace:: :py:func:`alfasim_sdk.container_model`
.. |model| replace:: :py:func:`alfasim_sdk.data_model`
.. |s_variable| replace:: :py:func:`alfasim_sdk.SecondaryVariable`
.. |s_variable_hook| replace:: :py:func:`alfasim_get_additional_variables`
.. |marker_1| image:: /_static/images/marker_1.png
:scale: 80%
.. |marker_2| image:: /_static/images/marker_2.png
:scale: 80%
.. # define a hard line break for HTML
.. |br| raw:: html
<br />
.. |tracer_warning_text| replace::
This is advanced customization. We strongly encourage the plugin developer to read the Tracers chapter of
|alfasim|'s Technical Report available on the `Help` menu at |alfasim| application.
.. |manual| image:: /_static/images/help_menu_technical_manual.png
:scale: 80%
.. |tracer_warn| replace::
|tracer_warning_text| |br|
|manual|
"""
language = None
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "breathe/*"]
# -- Options for HTML output -------------------------------------------------
html_theme = "pydata_sphinx_theme"
html_logo = "_static/images/logo-alfasim.svg"
html_theme_options = {
"github_url": "https://github.com/esss/alfasim-sdk",
"google_analytics_id": "UA-149094345-1",
}
html_static_path = ["_static"]
html_css_files = [
"css/custom.css",
]
html_favicon = "_static/images/alfasim_gui.ico"
# -- Options for intersphinx -------------------------------------------------
intersphinx_mapping = {
"python": ("http://docs.python.org/3", None),
"barril": ("https://barril.readthedocs.io/en/latest/", None),
}
# -- Options for Autodoc -----------------------------------------------------
autodoc_member_order = "groupwise"
| 1.867188 | 2 |
migration_checker/checks/rename_model.py | kolonialno/django-migrations-checker | 4 | 12757968 | from django.db.migrations import Migration
from django.db.migrations.operations import RenameModel
from django.db.migrations.state import ProjectState
def check_rename_model(*, migration: Migration, state: ProjectState) -> list[str]:
warnings = []
if any(isinstance(operation, RenameModel) for operation in migration.operations):
warnings.append(
"🚨 Renaming a model is not safe\n"
"This migration is renaming a model. That is not safe if the model "
"is in use. Please add a new model, copy data, and remove the old "
"model instead."
)
return warnings
| 2.203125 | 2 |
fungi/validators.py | pablo-martin/Mushroom-Hikes | 0 | 12757969 | <reponame>pablo-martin/Mushroom-Hikes<gh_stars>0
from cerberus import Validator
paths_schema = {'BASE_DIR': {'type': 'string'},
'DATA_DIR': {'type': 'string'},
'BOTTLENECK_DIR': {'type': 'string'},
'GRAPH_DIR': {'type': 'string'}}
model_schema = {}
data_schema = {}
training_schema = {}
'''
--------------------------------------------------------------------------------
Creating Validators
--------------------------------------------------------------------------------
'''
SCHEMAS = {'paths': paths_schema,
'model': model_schema,
'data': data_schema,
'training': training_schema}
VALIDATORS = {key: Validator(schema) for key, schema in SCHEMAS.items()}
def get_schema(module=None):
if module is not None:
return SCHEMAS[module]
# return all
return SCHEMAS
def get_validator(module=None):
if module is not None:
return VALIDATORS[module]
# return all
return VALIDATORS
| 2.265625 | 2 |
stinkin/lincoln/migrations/0002_auto_20170423_0241.py | NewsNerdsAtCoJMC/ProjectTicoTeam7 | 0 | 12757970 | <reponame>NewsNerdsAtCoJMC/ProjectTicoTeam7
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-04-23 02:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lincoln', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='pothole',
name='address',
),
migrations.AddField(
model_name='pothole',
name='district',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='pothole',
name='location',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='pothole',
name='source',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='pothole',
name='status',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='pothole',
name='id',
field=models.CharField(max_length=255, primary_key=True, serialize=False),
),
]
| 1.6875 | 2 |
sidebar/admin.py | renjigr/django-sidebar | 0 | 12757971 | from django.contrib import admin
from sidebar.models import Sidebar
from django import forms
from django.db import models
class SidebarAdmin(admin.ModelAdmin):
fields = ('position',)
admin.site.register(Sidebar, SidebarAdmin)
| 1.335938 | 1 |
students/k3342/practical_works/Reybandt Alexandr/docker/backend/flight_board/apps.py | Reybandt/ITMO_ICT_WebProgramming_2020 | 0 | 12757972 | <gh_stars>0
from django.apps import AppConfig
class FlightBoardConfig(AppConfig):
name = 'flight_board'
| 1.179688 | 1 |
app/extensions/providers/openstack.py | subhransusekhar/cloud-core | 0 | 12757973 | # encoding: utf-8
# pylint: disable=no-self-use
"""
OpenStack provider setup.
"""
from datetime import datetime, timedelta
import functools
import logging
import openstack
import openstack.cloud
import types
from shade import *
from novaclient import client
from retrying import retry
from flask_login import current_user
from flask_restplus_patched import Resource
from flask_restplus_patched._http import HTTPStatus
from app.extensions.api import Namespace, abort
import sqlalchemy
import os
from app.extensions import api, db
import requests, ssl, json
from requests.auth import HTTPBasicAuth
log = logging.getLogger(__name__)
openstack.enable_logging()
class OpenStackProvider(object):
def __init__(self, app=None):
if app:
self.init_app(app)
def init_app(self, app):
self.headers = {
'Content-Type': "application/json",
'Cache-Control': "no-cache"
}
return self
def connect(self, provider):
if provider.identity_api_version == 3:
conn = openstack.connect(
auth_url=provider.auth_url,
project_name=provider.project_name,
username=provider.username,
password=<PASSWORD>,
region_name=provider.region_name,
app_name='api',
app_version='1.0',
verify=False,
identity_api_version=provider.identity_api_version,
user_domain_name=provider.user_domain_id,
project_domain_id=provider.user_domain_id,
)
elif provider.identity_api_version == 2:
conn = openstack.connect(
auth_url=provider.auth_url,
tenant_name=provider.project_name,
username=provider.username,
password=<PASSWORD>,
region_name=provider.region_name,
app_name='api',
app_version='1.0',
verify=False,
identity_api_version=provider.identity_api_version,
)
return conn
def create_provider(self, provider):
try:
conn = self.connect(provider)
new_provider = {}
_images = []
images = conn.list_images()
# log.info("images list %s :",images)
for image in images:
# log.info("image in image list %s :",image)
if image.visibility == 'public':
name = ''
size = 0
minRam = 0
minDisk = 0
os = ''
os_version = ''
os_architecture = ''
cost = 0
if 'name' in image:
name = image.name
if 'size' in image:
size = image.size
if 'minRam' in image:
minRam = image.minRam
if 'minDisk' in image:
minDisk = image.minDisk
if 'os' in image:
os = image.os
if 'os_version' in image:
os_version = image.os_version
if 'os_architecture' in image:
os_architecture = image.os_architecture
if 'cost' in image:
cost = image.cost
temp = {
'id': image.id,
'name': name,
'size': size,
'min_ram': minRam,
'min_disk': minDisk,
'os': os,
'os_version': os_version,
'os_architecture': os_architecture,
'cost':cost
}
_images.append(temp)
new_provider['images']=_images
_flavors = []
flavors = conn.list_flavors()
flavor_name_weight_value = {"Small-I":100,"Small-II":200,"Small-III":300,"Small-IV":400,"Small-V":500,"Medium-I":600,"Medium-II":700,"Medium-III":800,"Medium-IV":900,"Medium-V":1000,"Medium-VI":1100,"Large-I":1200,"Large-II":1300,"Large-III":1400,"Large-IV":1500,"Large-V":1600,"Large-VI":1700,"Large-VII":1800,"gateway-flavor":750}
for flavor in flavors:
weight_value = 0
if 'weight' in flavor:
weight_value = flavor.weight
elif flavor.name in flavor_name_weight_value:
weight_value = flavor_name_weight_value[flavor.name]
cost_value = 0
if 'cost' in flavor:
cost_value = flavor.cost
_flavor = {
'id': flavor.id,
'name': flavor.name,
'ram': flavor.ram,
'vcpus': flavor.vcpus,
'disk': flavor.disk,
'weight': weight_value,
'cost': cost_value,
}
_flavors.append(_flavor)
new_provider['flavors']=_flavors
return new_provider
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_flavor(self, provider, **kwargs):
conn =self.connect(provider)
flavor = conn.create_flavor(kwargs['name'], kwargs['ram'], kwargs['vcpus'], kwargs['disk'], flavorid='auto', ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True)
log.info(flavor)
return flavor
def delete_flavor(self, provider, name_or_id):
conn = self.connect(provider)
deleted_falvor = conn.delete_flavor(name_or_id)
log.info(deleted_falvor)
return deleted_falvor
def get_flavors(self, provider):
conn = self.connect(provider)
_flavors = []
flavors = conn.list_flavors()
flavor_name_weight_value = {"Small-I": 100, "Small-II": 200, "Small-III": 300, "Small-IV": 400, "Small-V": 500,
"Medium-I": 600, "Medium-II": 700, "Medium-III": 800, "Medium-IV": 900,
"Medium-V": 1000, "Medium-VI": 1100, "Large-I": 1200, "Large-II": 1300,
"Large-III": 1400, "Large-IV": 1500, "Large-V": 1600, "Large-VI": 1700,
"Large-VII": 1800, "gateway-flavor": 750}
for flavor in flavors:
weight_value = 0
if 'weight' in flavor:
weight_value = flavor.weight
elif flavor.name in flavor_name_weight_value:
weight_value = flavor_name_weight_value[flavor.name]
cost_value = 0
if 'cost' in flavor:
cost_value = flavor.cost
_flavor = {
'id': flavor.id,
'name': flavor.name,
'ram': flavor.ram,
'vcpus': flavor.vcpus,
'disk': flavor.disk,
'weight': weight_value,
'cost': cost_value,
}
_flavors.append(_flavor)
return _flavors
def create_server(self, provider, **kwargs):
try:
log.info("Creating Server for Project ID: %s", kwargs['project_name'])
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
server_meta = { "Image ID" : kwargs['image'], "Image Name": kwargs['image_name']}
log.info(server_meta)
_instance = conn.create_server(wait=True, auto_ip=False,
name=kwargs['name'],
image=kwargs['image'],
flavor=kwargs['flavor'],
network=kwargs['network'],
userdata=kwargs['userdata'],
boot_from_volume=kwargs['boot_from_volume'],
volume_size=kwargs['volume_size'],
timeout = 3600,
terminate_volume = True,
meta = server_meta,
security_groups=[kwargs['security_groups']]
)
# f_ip = conn.available_floating_ip()
# created_instance_details = conn.add_ip_list(_instance, [f_ip['floating_ip_address']])
# server = conn.compute.wait_for_server(_instance)
# log.info("IP list: %s", f_ip)
# log.info("IP: %s", f_ip['floating_ip_address'])
# if server.public_v4 == '':
# server.public_v4 = f_ip['floating_ip_address']
log.info("aaaya")
log.info(_instance)
return _instance
except Exception as e:
log.info("Server Create Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def update_server(self, provider, **kwargs):
#try:
#log.info("Creating Server for Project ID: %s", kwargs['project_name'])
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
_instance = conn.update_server(
kwargs['server_id'],
networks=kwargs['network'],
timeout = 3600,
)
# f_ip = conn.available_floating_ip()
# created_instance_details = conn.add_ip_list(_instance, [f_ip['floating_ip_address']])
# server = conn.compute.wait_for_server(_instance)
# log.info("IP list: %s", f_ip)
# log.info("IP: %s", f_ip['floating_ip_address'])
# if server.public_v4 == '':
# server.public_v4 = f_ip['floating_ip_address']
return _instance
# except Exception as e:
# log.info("Server Create Exception: %s", e)
# abort(
# code=HTTPStatus.UNPROCESSABLE_ENTITY,
# message="%s" % e
# )
def list_servers(self, provider, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.list_servers(detailed=False, all_projects=False, bare=False, filters=None)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def get_server(self, provider, name_or_id, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.get_server(name_or_id)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def get_server_by_id(self, provider, server_id, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.get_server_by_id(server_id)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def delete_server(self, provider, name_or_id, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.delete_server(name_or_id, wait=True, timeout=3600, delete_ips=True, delete_ip_retry=5)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def server_action(self, provider, name_or_id, action, **kwargs):
try:
conn = self.connect(provider)
_e_status = 'ACTIVE'
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
if action == 'reboot':
conn.compute.reboot_server(name_or_id, 'SOFT')
_e_status = 'ACTIVE'
elif action == 'hard_reboot':
conn.compute.reboot_server(name_or_id, 'HARD')
_e_status = 'ACTIVE'
elif action == 'pause':
conn.compute.pause_server(name_or_id)
_e_status = 'PAUSED'
elif action == 'unpause':
conn.compute.unpause_server(name_or_id)
_e_status = 'ACTIVE'
elif action == 'rebuild':
conn.compute.rebuild_server(name_or_id)
_e_status = 'ACTIVE'
elif action == 'start':
conn.compute.start_server(name_or_id)
_e_status = 'ACTIVE'
elif action == 'stop':
conn.compute.stop_server(name_or_id)
_e_status = 'SHUTOFF'
elif action == 'resize_server':
_e_status = 'ACTIVE'
conn.compute.resize_server(name_or_id, kwargs['provider_flavor_id'])
elif action == 'confirm_server_resize':
conn.compute.confirm_server_resize(name_or_id)
_e_status = 'ACTIVE'
elif action == 'revert_server_resize':
conn.compute.revert_server_resize(name_or_id)
_e_status = 'ACTIVE'
elif action == 'status':
_e_status = 'STATUS'
log.info('action to carry out: %s', action)
else:
abort(
code=HTTPStatus.NOT_FOUND,
message="Action does not exist"
)
return _e_status
except Exception as e:
log.info("Exception: %s", e)
# abort(
# code=HTTPStatus.UNPROCESSABLE_ENTITY,
# message="%s" % e
# )
finally:
return _e_status
def get_server_console(self, provider, name_or_id, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.get_server_console(name_or_id, kwargs['length'])
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def get_server_console_url(self, provider, name_or_id, **kwargs):
try:
if provider.identity_api_version == 3:
conn = client.Client(2, provider.username, provider.password, provider.project_name, provider.auth_url, user_domain_id=provider.user_domain_id)
else:
conn = client.Client(2, provider.username, provider.password, provider.project_name, provider.auth_url)
server = conn.servers.get(name_or_id)
if 'console_type' in kwargs:
console_type = kwargs['console_type']
else :
console_type = 'novnc'
server_console = server.get_console_url(console_type)
return server_console['console']['url']
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def list_images(self, provider, **kwargs):
try:
conn = self.connect(provider)
return conn.list_images()
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def list_flavors(self, provider, **kwargs):
try:
conn = self.connect(provider)
return conn.list_flavors()
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_image_snapshot(self, provider, name, server, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.create_image_snapshot(name, server, wait=True, timeout=3600)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def delete_image(self, provider, name, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.delete_image(name, wait=False, timeout=3600, delete_objects=True)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def list_networks(self, provider, external=False, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
_networks = []
networks = conn.list_networks(filters={'router:external': external})
for network in networks:
_network = {}
_network['id'] = network.id
_network['name'] = network.name
_network['status'] = network.status
_network['external'] = network['router:external']
_subnet_networks = []
for network_subnet in network.subnets:
_network_subnet = {}
subnet = conn.get_subnet(network_subnet)
_network_subnet['subnet_id'] = subnet.id
_network_subnet['subnet_name'] = subnet.name
_network_subnet['subnet_cidr'] = subnet.cidr
_network_subnet['subnet_gateway_ip'] = subnet.gateway_ip
_network_subnet['subnet_ip_version'] = subnet.ip_version
_subnet_networks.append(_network_subnet)
_network['subnet'] = _subnet_networks
_networks.append(_network)
return _networks
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_network(self, provider, name, project_id, external=False, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.create_network(name, external=external, project_id=project_id)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def delete_network(self, provider, name, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.delete_network(name)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_subnet(self, provider, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.create_subnet(kwargs['network_name_or_id'],
cidr=kwargs['cidr'],
ip_version=kwargs['ip_version'],
enable_dhcp=False,
subnet_name=kwargs['subnet_name'],
tenant_id=kwargs['tenant_id'],
allocation_pools=None,
gateway_ip=None,
disable_gateway_ip=False,
dns_nameservers=None,
host_routes=None,
ipv6_ra_mode=None,
ipv6_address_mode=None,
use_default_subnetpool=False)
except Exception as e:
return e
# abort(
# code=HTTPStatus.UNPROCESSABLE_ENTITY,
# message="%s" % e
# )
def delete_subnet(self, provider, name, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.delete_subnet(name)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_router(self, provider, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.create_router(name=kwargs['name'],
admin_state_up=True,
ext_gateway_net_id=None,
enable_snat=None,
ext_fixed_ips=None,
project_id=kwargs['project_id'],
availability_zone_hints=None
)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def delete_router(self, provider, name, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.delete_router(name)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_security_group(self, provider, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.create_security_group(kwargs['name'],
description=kwargs['description'],
project_id=kwargs['project_id'])
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def delete_security_group(self, provider, name, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.delete_security_group(name)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def get_security_group_by_id(self, provider, name, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.get_security_group_by_id(name)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_security_group_rule(self, provider, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.create_security_group_rule(kwargs['secgroup_name_or_id'],
port_range_min=kwargs['port_range_min'],
port_range_max=kwargs['port_range_max'],
protocol=kwargs['protocol'],
remote_ip_prefix=kwargs['remote_ip_prefix'],
remote_group_id=kwargs['remote_group_id'],
direction=kwargs['direction'],
ethertype=kwargs['ethertype'],
project_id=kwargs['project_id']
)
except Exception as e:
log.info("Security Group Rule Exception: %s", e)
return None
# abort(
# code=HTTPStatus.UNPROCESSABLE_ENTITY,
# message="%s" % e
# )
def delete_security_group_rule(self, provider, name, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.delete_security_group_rule(name)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_project(self, provider, **kwargs):
try:
conn = self.connect(provider)
parameters = {
"tenant_name" : kwargs['project_name'],
"tenant_net_name": kwargs['project_name'] + "_int_net",
"public_net_name" : kwargs['external_network'],
"tenant_description" : kwargs['description']
}
stack_name = kwargs['project_name']
template_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates/openstack', "create_project.yaml"))
_project = conn.create_stack(kwargs['project_name'], tags=None, template_file=template_path, template_url=None, template_object=None, files=None, rollback=True, wait=True, timeout=3600, environment_files=None, **parameters)
def retry_if_result_none(result):
"""Return True if we should retry (in this case when result is None), False otherwise"""
return result is None
@retry(wait_fixed=1000, stop_max_delay=5000
, retry_on_result=retry_if_result_none)
def check_project_status(stack_name):
_project = conn.get_stack(stack_name, filters=None, resolve_outputs=True)
if _project.action == 'ROLLBACK':
return None
new_project = types.SimpleNamespace()
for resources in _project.outputs:
try:
if resources.output_key == 'tenant_id':
new_project.tenant_id = resources.output_value
new_project.id = resources.output_value
elif resources.output_key == 'network_id':
new_project.network_id = resources.output_value
elif resources.output_key == 'subnet_id':
new_project.subnet_id = resources.output_value
elif resources.output_key == 'router_id':
new_project.router_id = resources.output_value
except:
if resources['output_key'] == 'tenant_id':
new_project.tenant_id = resources['output_value']
new_project.id = resources['output_value']
elif resources['output_key'] == 'network_id':
new_project.network_id = resources['output_value']
elif resources['output_key'] == 'subnet_id':
new_project.subnet_id = resources['output_value']
elif resources['output_key'] == 'router_id':
new_project.router_id = resources['output_value']
if new_project.id != '':
new_conn = conn.connect_as_project(kwargs['project_name'])
gw_parameters = {
"sshgw_vm_name": kwargs['project_getway_name']+"-gateway",
"internal_network_id" : kwargs['project_name'] + "_int_net",
"public_net_name" : kwargs['external_network'],
"docker_registry" : provider.docker_registry
}
gw_template_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates/openstack', "create_project_gw.yaml"))
_gw_vm = new_conn.create_stack(kwargs['project_getway_name'], tags=None, template_file=gw_template_path, template_url=None, template_object=None, files=None, rollback=True, wait=True, timeout=3600, environment_files=None, **gw_parameters)
for gw_resources in _gw_vm.outputs:
try:
if gw_resources.output_key == 'internal_security_group_id':
new_project.gw_sec_group_id = gw_resources.output_value
elif gw_resources.output_key == '<KEY>':
new_project.gw_provider_instance_id = gw_resources.output_value
except:
if gw_resources['output_key'] == 'internal_security_group_id':
new_project.gw_sec_group_id = gw_resources['output_value']
elif gw_resources['output_key'] == '<KEY>':
new_project.gw_provider_instance_id = gw_resources['output_value']
_gw_vm_details = new_conn.get_server_by_id(new_project.gw_provider_instance_id)
try:
new_project.gw_provider_instance_ip = _gw_vm_details.public_v4
except:
new_project.gw_provider_instance_ip = _gw_vm_details['public_v4']
_sec_groups = new_conn.list_security_groups(filters={ "tenant_id" : new_project.id, "name" : "default"})
for _sec_group_rule in _sec_groups[0].security_group_rules:
new_conn.delete_security_group_rule(_sec_group_rule['id'])
new_project.sec_group_id = _sec_groups[0].id
# CreateInternal SSH security Group Rule
internal_ssh_rule_id = new_conn.create_security_group_rule(new_project.sec_group_id,
port_range_min=22,
port_range_max=22,
protocol="tcp",
remote_ip_prefix="192.168.3.0/24",
project_id=new_project.id
)
internal_av_firewall_rule_id = new_conn.create_security_group_rule(new_project.sec_group_id,
port_range_min=4118,
port_range_max=4118,
protocol="tcp",
remote_ip_prefix="0.0.0.0/0",
project_id=new_project.id
)
internal_monitoring_firewall_rule_id = new_conn.create_security_group_rule(new_project.sec_group_id,
port_range_min=9100,
port_range_max=9100,
protocol="tcp",
remote_ip_prefix="0.0.0.0/0",
project_id=new_project.id
)
internal_egress_rule_id = new_conn.create_security_group_rule(new_project.sec_group_id,
port_range_min=1,
port_range_max=65535,
protocol="tcp",
direction="egress",
remote_ip_prefix="0.0.0.0/0",
project_id=new_project.id
)
new_project.internal_ssh_rule_id = internal_ssh_rule_id
return new_project
else:
return None
return check_project_status(stack_name)
except Exception as e:
log.info("Exception: %s", e)
return None
# abort(
# code=HTTPStatus.UNPROCESSABLE_ENTITY,
# message="%s" % e
# )
def create_volume(self, provider, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.create_volume(kwargs['volume_size'],
wait=True,
timeout=None,
image=kwargs['image'],
bootable=kwargs['bootable'],
name=kwargs['name'])
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def get_volume(self, provider, volume_id, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.get_volume(volume_id, filters=None)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def delete_volume(self, provider, volume_id, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.delete_volume(volume_id)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def list_volume_snapshots(self, provider, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.list_volume_snapshots(detailed=kwargs['detailed'], search_opts=kwargs['search_opts'])
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_volume_snapshot(self, provider, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.create_volume_snapshot(kwargs['provider_volume_id'], description=kwargs['description'], force=False, wait=True, timeout=None)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def get_volume_snapshot_by_id(self, provider, volume_id, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.get_volume_snapshot_by_id(volume_id)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def delete_volume_snapshot(self, provider, volume_id, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.delete_volume_snapshot(volume_id, wait=False, timeout=3600)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def attach_volume(self, provider, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.attach_volume(kwargs['server'], kwargs['volume'],
device=kwargs['device'],
wait=True,
timeout=3600)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def detach_volume(self, provider, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.detach_volume(kwargs['server'], kwargs['volume'],
wait=True,
timeout=3600)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def get_limits(self, provider, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.compute.get_limits()
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_load_balancer(self, provider, **kwargs):
try:
conn = self.connect(provider)
new_lb = types.SimpleNamespace()
new_conn = conn.connect_as_project(kwargs['project_name'])
lb_parameters = {
"internal_network_id" : kwargs['project_name'] + "_int_net",
"public_net_name" : kwargs['external_network'],
"sec_group_name" : kwargs['lb_name'] + "_sec_group",
"docker_registry" : provider.docker_registry,
"lb_vm_name": kwargs['lb_name'] + "_vm"
}
lb_template_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates/openstack', "create_project_lb.yaml"))
_lb_vm = new_conn.create_stack(kwargs['lb_name'] + "_lb", tags=None, template_file=lb_template_path, template_url=None, template_object=None, files=None, rollback=True, wait=True, timeout=3600, environment_files=None, **lb_parameters)
log.info(_lb_vm)
if 'stack_status' in _lb_vm and _lb_vm.stack_status == 'CREATE_COMPLETE':
try:
for lb_resources in _lb_vm.outputs:
if lb_resources.output_key == 'internal_security_group_id':
new_lb.lb_sec_group_id = lb_resources.output_value
elif lb_resources.output_key == '<KEY>':
new_lb.lb_provider_instance_id = lb_resources.output_value
except:
for lb_resources in _lb_vm['outputs']:
if lb_resources['output_key'] == 'internal_security_group_id':
new_lb.lb_sec_group_id = lb_resources['output_value']
elif lb_resources['output_key'] == '<KEY>':
new_lb.lb_provider_instance_id = lb_resources['output_value']
_lb_vm_details = new_conn.get_server_by_id(new_lb.lb_provider_instance_id)
new_lb.lb_provider_instance_ip = _lb_vm_details.public_v4
return new_lb
except Exception as e:
log.info("Exception: %s", e)
return None
# abort(
# code=HTTPStatus.UNPROCESSABLE_ENTITY,
# message="%s" % e
# )
def create_bucket(self, provider, name, **kwargs):
try:
conn = self.connect(provider)
public = False
if 'public' in kwargs:
public = kwargs['public']
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.create_container(name, public=public)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def list_buckets(self, provider, **kwargs):
try:
conn = self.connect(provider)
full_listing = True
if 'full_listing' in kwargs:
full_listing = kwargs['full_listing']
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.list_containers(full_listing=full_listing)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_object(self, provider, bucket, name, **kwargs):
try:
conn = self.connect(provider)
filename = None
md5 = None
sha256 = None
segment_size = None
use_slo = True
metadata = None
if 'filename' in kwargs:
filename = kwargs['filename']
if 'md5' in kwargs:
md5 = kwargs['md5']
if 'sha256' in kwargs:
sha256 = kwargs['sha256']
if 'segment_size' in kwargs:
segment_size = kwargs['segment_size']
if 'use_slo' in kwargs:
use_slo = kwargs['use_slo']
if 'metadata' in kwargs:
metadata = kwargs['metadata']
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.create_object(bucket, name, filename=filename, md5=md5, sha256=sha256, segment_size=segment_size, use_slo=use_slo, metadata=metadata)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def list_objects(self, provider, bucket, **kwargs):
try:
conn = self.connect(provider)
full_listing = True
if 'full_listing' in kwargs:
full_listing = kwargs['full_listing']
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.list_objects(bucket, full_listing=full_listing)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def get_object(self, provider, bucket, name, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.get_object(bucket, name, query_string=None, resp_chunk_size=1024, outfile=None)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def update_object(self, provider, bucket, name, **kwargs):
try:
conn = self.connect(provider)
metadata = None
if 'metadata' in kwargs:
metadata = kwargs['metadata']
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.update_object(bucket, name, metadata=metadata, **headers)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def delete_object(self, provider, bucket, name, **kwargs):
try:
conn = self.connect(provider)
if 'project_name' in kwargs:
conn = conn.connect_as_project(kwargs['project_name'])
return conn.delete_object(bucket, name)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_server_group(self, provider, **kwargs):
ticket_id = None
new_server_group = types.SimpleNamespace()
try:
conn = self.connect(provider)
new_conn = conn.connect_as_project(kwargs['project_name'])
server_parameters = {
"stackname": kwargs['stackname'],
"image": kwargs['image'],
"flavor": kwargs['flavor'],
"private_network": kwargs['private_network'],
"public_network": kwargs['public_network'],
"docker_registry": provider.docker_registry,
"cluster_size": kwargs['cluster_size'],
"lb_sec_group_name": kwargs['lb_sec_group_name'],
"lb_vm_name": kwargs['lb_vm_name'],
"lb_private_network":kwargs['lb_private_network'],
"boot_vol_size": kwargs['boot_vol_size']
# "lb_flavor": kwargs['lb_flavor'],
# "lb_image": kwargs['lb_image']
}
lb_template_path = os.path.abspath( os.path.join(os.path.dirname(__file__), 'templates/openstack', "serverGroup.yaml"))
env_path = os.path.abspath( os.path.join(os.path.dirname(__file__), 'templates/openstack', "env.yaml"))
_lb_vm = new_conn.create_stack(kwargs['stackname'], tags=None, template_file=lb_template_path, template_url=None, template_object=None, files=None, rollback=True, wait=True, timeout=3600, environment_files=[env_path], **server_parameters)
ticket_id = _lb_vm.id
try:
if _lb_vm.stack_status == 'CREATE_COMPLETE':
for lb_resources in _lb_vm.outputs:
if lb_resources['output_key'] == 'internal_security_group_id':
new_server_group.lb_sec_group_id = lb_resources['output_value']
elif lb_resources['output_key'] == '<KEY>':
new_server_group.lb_provider_instance_id = lb_resources['output_value']
elif lb_resources['output_key'] == 'server_id':
new_server_group.server_id = lb_resources['output_value']
elif lb_resources['output_key'] == 'server_ip':
new_server_group.server_ip = lb_resources['output_value']
elif lb_resources['output_key'] == 'server_name':
new_server_group.server_name = lb_resources['output_value']
_lb_vm_details = new_conn.get_server_by_id(new_server_group.lb_provider_instance_id)
new_server_group.lb_provider_instance_ip = _lb_vm_details.public_v4
else:
new_server_group.ticket_id = ticket_id
return new_server_group
except Exception as e:
log.info("Stack Exception: %s", e)
new_server_group.ticket_id = ticket_id
return new_server_group
log.info(new_server_group)
return new_server_group
except Exception as e:
log.info("Exception: %s", e)
new_server_group.ticket_id = ticket_id
return new_server_group
# abort(
# code=HTTPStatus.UNPROCESSABLE_ENTITY,
# message="%s" % e
# )
def update_server_group(self, provider, **kwargs):
try:
conn = self.connect(provider)
updated_server_group = types.SimpleNamespace()
conn = conn.connect_as_project(kwargs['project_name'])
#new_conn = conn.connect_as_project(kwargs['project_name'])
# parameters = {
# "cluster_size": kwargs['cluster_size'],
# "flavor": kwargs['flavor']
# }
log.info(kwargs['private_network'])
parameters = {
"stackname": kwargs['stackname'],
"image": kwargs['image'],
"flavor": kwargs['flavor'],
# "private_network": kwargs['private_network'],
# "public_network": kwargs['public_network'],
"cluster_size": kwargs['cluster_size'],
"lb_sec_group_name": kwargs['lb_sec_group_name'],
"lb_vm_name": kwargs['lb_vm_name'],
"lb_private_network": kwargs['lb_private_network'],
}
lb_template_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'templates/openstack', "serverGroup.yaml"))
env_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates/openstack', "env.yaml"))
_server_response = conn.update_stack(kwargs['stackname'], template_file=lb_template_path,
template_url=None, template_object=None, files=None, rollback=True,
wait=True, timeout=3600, environment_files=[env_path], **parameters)
# lb_template_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates/openstack', "serverGroupAIO.yaml"))
# _server_response = conn.update_stack(kwargs['stackname'], template_file=lb_template_path, template_url=None,
# template_object=None, files=None, rollback=True, wait=True, timeout=3600, environment_files=None,
# **parameters)
log.info(_server_response)
new_server_group = types.SimpleNamespace()
for lb_resources in _server_response.outputs:
if lb_resources['output_key'] == 'internal_security_group_id':
new_server_group.lb_sec_group_id = lb_resources['output_value']
elif lb_resources['output_key'] == 'lb_vm_id':
new_server_group.lb_provider_instance_id = lb_resources['output_value']
elif lb_resources['output_key'] == 'server_id':
new_server_group.server_id = lb_resources['output_value']
elif lb_resources['output_key'] == 'server_ip':
new_server_group.server_ip = lb_resources['output_value']
elif lb_resources['output_key'] == 'server_name':
new_server_group.server_name = lb_resources['output_value']
_lb_vm_details = conn.get_server_by_id(new_server_group.lb_provider_instance_id)
new_server_group.lb_provider_instance_ip = _lb_vm_details.public_v4
return new_server_group
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def delete_server_group(self, provider, name_or_id, **kwargs):
conn = self.connect(provider)
#updated_server_group = types.SimpleNamespace()
conn = conn.connect_as_project(kwargs['project_name'])
_server_response = conn.delete_stack(name_or_id, wait=True)
return _server_response
def set_server_metadata(self, provider, name_or_id, **kwargs):
conn = self.connect(provider)
#updated_server_group = types.SimpleNamespace()
conn = conn.connect_as_project(kwargs['project_name'])
_server_response = conn.set_server_metadata(name_or_id, metadata=kwargs['metadata'])
return _server_response
def delete_server_metadata(self, provider, name_or_id, **kwargs):
conn = self.connect(provider)
#updated_server_group = types.SimpleNamespace()
conn = conn.connect_as_project(kwargs['project_name'])
_server_response = conn.set_server_metadata(name_or_id, metadata_keys=kwargs['metadata_keys'])
return _server_response
def add_network_to_server(self, provider, **kwargs):
conn = self.connect(provider)
conn = conn.connect_as_project(kwargs['project_name'])
_response = conn.compute.create_server_interface(kwargs['server_id'], net_id=kwargs['network_id'])
return _response
def remove_network_to_server(self, provider, **kwargs):
conn = self.connect(provider)
conn = conn.connect_as_project(kwargs['project_name'])
_response = conn.compute.delete_server_interface(kwargs['interface_id'],kwargs['server_id'])
return _response
def create_network_port(self, provider, **kwargs):
conn = self.connect(provider)
conn = conn.connect_as_project(kwargs['project_name'])
_response = conn.network.create_port(project_id=kwargs['project_id'], network_id=kwargs['network_id'], security_groups=[kwargs['sec_group_id']])
return _response
def get_stack_details(self, provider, **kwargs):
conn = self.connect(provider)
new_conn = conn.connect_as_project(kwargs['project_name'])
stack_details = new_conn.get_server_group(kwargs['stack_id'])
log.info(stack_details)
return stack_details
def add_interface_to_router(self, provider, **kwargs):
conn = self.connect(provider)
new_conn = conn.connect_as_project(kwargs['project_name'])
router = new_conn.get_router(kwargs['router'], filters=None)
log.info(router)
response = new_conn.add_router_interface(router, subnet_id=kwargs['subnet_id'], port_id=None)
log.info(response)
return response
def get_router(self, provider, **kwargs):
conn = self.connect(provider)
new_conn = conn.connect_as_project(kwargs['project_name'])
response = new_conn.get_router(kwargs['router_id'], filters=None)
log.info(response)
return response
def attach_security_groups(self, provider, **kwargs):
conn = self.connect(provider)
new_conn = conn.connect_as_project(kwargs['project_name'])
log.info(kwargs['security_groups'])
response = new_conn.add_server_security_groups(kwargs['server'], kwargs['security_groups'])
return response
def detach_security_groups(self, provider, **kwargs):
conn = self.connect(provider)
new_conn = conn.connect_as_project(kwargs['project_name'])
response = new_conn.remove_server_security_groups(kwargs['server'], kwargs['security_groups'])
return response | 1.820313 | 2 |
yaps/api/async_methods.py | victorhook/vqtt | 0 | 12757974 | <reponame>victorhook/vqtt
import struct
import asyncio
from yaps.utils import Log
from yaps.api.packet import Packet
from yaps.api import protocol_utils
async def async_read_packet(reader: asyncio.StreamReader) -> Packet:
try:
header = await reader.read(protocol_utils.Sizes.HEADER)
# Log.debug(f'Header: {header}')
cmd, flags, length = protocol_utils.unpack_header(header)
# Log.debug(f'CMD: {cmd} Flags: {flags} Lengt: {length}')
data = await reader.read(length)
return Packet(cmd, flags, length, data)
except struct.error as e:
Log.debug(f'Failed to read packet: {e}')
return None
except RuntimeError:
Log.debug('Race condition reading packet.')
async def async_send_packet(writer: asyncio.StreamWriter,
cmd: int, flags: int = 0,
data: bytes = b'') -> None:
packet = Packet(cmd, flags, len(data), data,
protocol_utils.Formats.HEADER)
# Log.debug(f'Sending packet: {packet}')
writer.write(packet.to_bytes())
await writer.drain()
async def async_cmd_ok(packet: Packet, cmd: int,
writer: asyncio.StreamWriter = None) -> bool:
"""
Returns true if command is okay, and logs if not.
If the command is INCORRECT, a packet is sent to the
client with BAD_CMD command.
"""
ok = True
if packet is None:
Log.debug('Failed to read packet!')
ok = False
elif packet.cmd != cmd:
Log.err('Packet command incorrect! '
f'Expected: "{protocol_utils.DEBUG_COMMANDS[cmd]}", '
f'Got: "{protocol_utils.DEBUG_COMMANDS[packet.cmd]}"')
if writer is not None:
await async_send_packet(writer, protocol_utils.Commands.BAD_CMD)
ok = False
return ok
| 2.375 | 2 |
contest/models.py | pwqbot/eoj3 | 66 | 12757975 | <filename>contest/models.py<gh_stars>10-100
import json
from datetime import datetime
import shortuuid
from django.core.validators import EmailValidator
from django.db import models
from django.db.models import Count
from django.utils import timezone
from account.models import User, School
from problem.models import Problem
from utils.language import LANG_CHOICE
def get_invitation_code():
return shortuuid.ShortUUID().random(12)
def get_language_all_list():
return ','.join(sorted(dict(LANG_CHOICE).keys()))
class ContestManager(models.Manager):
def get_status_list(self, show_all=False, filter_user=None, sorting_by_id=False, contest_type=None):
q = models.Q()
if not show_all:
q &= models.Q(access_level__gt=0)
if filter_user:
q |= models.Q(managers=filter_user)
if contest_type is not None:
q &= models.Q(contest_type=contest_type)
contest_list = self.get_queryset().prefetch_related('authors', 'managers'). \
annotate(Count('participants', distinct=True)).filter(q)
if sorting_by_id:
contest_list = contest_list.order_by("-pk").distinct()
else:
contest_list = contest_list.order_by("-start_time").distinct()
return contest_list
class Contest(models.Model):
SCORING_METHOD_CHOICE = (
('acm', "ACM 赛制"),
('oi', "OI 赛制"),
('cf', "School of Data Analysis (SDA) 赛制 (Codeforces...)"),
('tcmtime', "TCM/TIME 赛制 (GCJ...)"),
)
TEST_DURING_CONTEST_CHOICE = (
('all', "所有测试点"),
('pretest', "只测试 Pretests"),
('sample', "只测试样例"),
('none', "不作测试")
)
CODE_SHARE_CHOICE = (
(0, "不允许"),
(1, "代码在赛后对 AC 用户公开(默认)"),
(2, "代码在赛后完全公开"),
(3, "代码在比赛过程中对 AC 用户公开"),
)
CASE_PUBLIC_CHOICE = (
(0, '不允许'),
(1, '评测报告有偿公开'),
(2, '评测报告总是开放'),
)
ACCESS_LEVEL_OPTIONS = (
(0, '仅比赛管理员可见'),
(10, '仅受邀用户可见,赛后题目不公开'),
(20, '仅受邀用户可见,赛后题目直接公开'),
(30, '公开,需要比赛前注册'),
(40, '公开')
)
COMMON_STATUS_ACCESS_LEVEL_OPTIONS = (
(-10, '不可见'),
(0, '默认'),
(10, '总是可见')
)
title = models.CharField("标题", max_length=192)
description = models.TextField("描述", blank=True)
allowed_lang = models.CharField("允许语言", max_length=192, default=get_language_all_list())
contest_type = models.IntegerField(default=0, choices=(
(0, '常规比赛'),
(1, '作业'),
))
start_time = models.DateTimeField("开始时间", blank=True, null=True, default=timezone.now)
end_time = models.DateTimeField("结束时间", blank=True, null=True, default=timezone.now)
create_time = models.DateTimeField("创建时间", auto_now_add=True)
standings_update_time = models.DateTimeField("榜单更新时间", blank=True, null=True)
freeze = models.BooleanField("是否封榜", default=False)
freeze_time = models.DateTimeField("封榜时间", blank=True, null=True)
scoring_method = models.CharField("计分规则", default='acm', max_length=10, choices=SCORING_METHOD_CHOICE)
run_tests_during_contest = models.CharField("比赛过程中对代码进行评测", max_length=10, choices=TEST_DURING_CONTEST_CHOICE,
default=TEST_DURING_CONTEST_CHOICE[0][0])
allow_code_share = models.IntegerField("允许代码共享", default=1,
choices=CODE_SHARE_CHOICE) # Can view others' codes after AC
last_counts = models.BooleanField("认为最后一次提交有效(默认使用成绩最好的)", default=False) # Treat last submission as valid submission
penalty_counts = models.PositiveIntegerField('错误提交罚时(秒)', default=1200)
standings_without_problem = models.BooleanField('排行榜上不显示具体题目的通过情况',
default=False) # Have a standing without specific problems
case_public = models.PositiveIntegerField(choices=CASE_PUBLIC_CHOICE, default=0)
system_tested = models.BooleanField("系统测试准备就绪", default=False)
# Passing system test or not, shall be available for run_tests_during_contest none, sample and pretest
problems = models.ManyToManyField(Problem, through='ContestProblem')
participants = models.ManyToManyField(User, through='ContestParticipant', related_name='contests')
access_level = models.PositiveIntegerField("访问控制", default=0, choices=ACCESS_LEVEL_OPTIONS)
common_status_access_level = models.IntegerField("所有提交和榜单的访问控制", default=0,
choices=COMMON_STATUS_ACCESS_LEVEL_OPTIONS)
ip_sensitive = models.BooleanField("首次登录绑定 IP", default=False)
analysis_blog_id = models.IntegerField("题解博客 ID", default=0) # related to a blog id
pdf_statement = models.FileField('PDF 题面', upload_to='contest_statements/%Y%m%d/', null=True, blank=True)
objects = ContestManager()
managers = models.ManyToManyField(User, related_name='managing_contests')
authors = models.ManyToManyField(User, related_name='written_contests')
volunteers = models.ManyToManyField(User, related_name='volunteering_contests')
class Meta:
ordering = ['-pk']
@property
def status(self):
now = timezone.now()
if self.start_time is not None and now < self.start_time:
return -1
if self.end_time is not None and now > self.end_time:
return 1
return 0
@property
def finite(self):
return self.start_time is not None and self.end_time is not None
@property
def length(self):
if not self.finite:
return None
return self.end_time - self.start_time
@property
def is_frozen(self):
if self.freeze and self.freeze_time <= timezone.now() <= self.end_time:
return True
return False
@property
def pending_system_tests(self):
return self.status > 0 and self.run_tests_during_contest != 'all' and not self.system_tested
@property
def supported_language_list(self):
return list(filter(lambda x: x, map(lambda x: x.strip(), self.allowed_lang.split(','))))
@property
def verbose_supported_language_list(self):
def rreplace(s, old, new, count):
return (s[::-1].replace(old[::-1], new[::-1], count))[::-1]
lang_choices = dict(LANG_CHOICE)
return rreplace(', '.join(list(map(lambda x: lang_choices[x], self.supported_language_list))), ', ', ' and ', 1)
@property
def contest_problem_list(self):
if not hasattr(self, '_contest_problem_list'):
self._contest_problem_list = list(self.contestproblem_set.select_related('problem').
defer('problem__description', 'problem__input', 'problem__output',
'problem__hint', 'problem__cases').all())
return self._contest_problem_list
def get_contest_problem(self, problem_id):
get_result = list(filter(lambda p: p.problem_id == problem_id, self.contest_problem_list))
if len(get_result) > 0:
return get_result[0]
else:
return None
def add_contest_problem_to_submissions(self, submissions):
find_contest_problem = {k.problem_id: k for k in self.contest_problem_list}
for submission in submissions:
submission.contest_problem = find_contest_problem.get(submission.problem_id)
@property
def participants_ids(self):
if not hasattr(self, '_contest_user_ids'):
self._contest_user_ids = list(self.contestparticipant_set.order_by().values_list("user_id", flat=True))
return self._contest_user_ids
def fetch_problem_entities_from_ids(self, problem_ids):
pool = {t.problem_id: t for t in self.contest_problem_list}
return [pool[problem_id] for problem_id in problem_ids]
def __str__(self):
return self.title
class ContestProblem(models.Model):
problem = models.ForeignKey(Problem, on_delete=models.CASCADE)
contest = models.ForeignKey(Contest, on_delete=models.CASCADE)
identifier = models.CharField(max_length=12)
weight = models.IntegerField(default=100)
ac_user_count = models.PositiveIntegerField(default=0)
total_user_count = models.PositiveIntegerField(default=0)
ac_count = models.PositiveIntegerField(default=0)
total_count = models.PositiveIntegerField(default=0)
first_yes_time = models.DurationField(null=True, blank=True)
first_yes_by = models.PositiveIntegerField(null=True, blank=True)
max_score = models.FloatField(default=0)
avg_score = models.FloatField(default=0)
class Meta:
unique_together = ('problem', 'contest')
ordering = ['identifier']
@property
def user_ratio(self):
return self.ac_user_count / self.total_user_count if self.total_user_count > 0 else 0.0
@property
def ratio(self):
return self.ac_count / self.total_count if self.total_count > 0 else 0.0
def __str__(self):
return self.identifier + '. ' + self.problem.title
class ContestClarification(models.Model):
contest = models.ForeignKey(Contest, on_delete=models.CASCADE)
text = models.TextField(blank=True)
time = models.DateTimeField(auto_now=True)
important = models.BooleanField(default=False)
author = models.ForeignKey(User, on_delete=models.CASCADE)
answer = models.TextField(blank=True)
class Meta:
ordering = ["-time"]
def __str__(self):
return self.text
class ContestParticipant(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
star = models.BooleanField(default=False)
comment = models.TextField(blank=True)
hidden_comment = models.TextField(blank=True)
contest = models.ForeignKey(Contest, on_delete=models.CASCADE)
score = models.IntegerField(default=0)
penalty = models.BigIntegerField(default=0)
detail_raw = models.TextField(blank=True)
is_disabled = models.BooleanField(default=False)
ip_address = models.GenericIPAddressField(blank=True, null=True)
join_time = models.DateTimeField(blank=True, null=True) # default: join when contest begins
is_confirmed = models.BooleanField(default=False)
def start_time(self, contest: Contest):
# the contest should be a cached contest
if self.join_time is None:
return contest.start_time
else:
return self.join_time
def end_time(self, contest: Contest):
st = self.start_time(contest)
if st is None or contest.end_time is None:
return contest.end_time
return st + (contest.end_time - contest.start_time)
def as_contest_time(self, contest: Contest, real_time):
return real_time - self.start_time(contest)
def status(self, contest: Contest):
start_time = self.start_time(contest)
end_time = self.end_time(contest)
if start_time is not None and datetime.now() < start_time:
return -1
if end_time is not None and datetime.now() > end_time:
return 1
return 0
@property
def detail(self):
try:
if hasattr(self, "_detail"):
return self._detail # pylint: disable=access-member-before-definition
if not self.detail_raw:
return {}
self._detail = {int(k): v for k, v in json.loads(self.detail_raw).items()}
return self._detail
except:
return {}
@detail.setter
def detail(self, d):
self.detail_raw = json.dumps(d)
class Meta:
unique_together = ["user", "contest"]
ordering = ("-is_confirmed", "-score", "penalty", "star")
class ContestInvitation(models.Model):
contest = models.ForeignKey(Contest, on_delete=models.CASCADE)
star = models.BooleanField(default=False)
code = models.CharField(max_length=24)
comment = models.TextField(blank=True)
class Meta:
unique_together = ('contest', 'code')
ordering = ['-pk']
class ContestUserRating(models.Model):
rating = models.IntegerField(default=1500)
user = models.ForeignKey(User, on_delete=models.CASCADE)
contest = models.ForeignKey(Contest, on_delete=models.CASCADE)
solved = models.IntegerField()
rank = models.IntegerField()
modified = models.DateTimeField()
class Meta:
unique_together = ('contest', 'user')
ordering = ["-modified"]
def __str__(self):
return 'ContestUserRating: {user: %s, rating: %s}' % (self.user_id, self.rating)
class ContestProblemPlag(models.Model):
contest = models.ForeignKey(Contest, on_delete=models.CASCADE)
fingerprint = models.CharField(max_length=100)
status = models.IntegerField(choices=((-1, 'Pending'), (0, 'Ready'), (1, 'Failed')))
identifier = models.CharField(max_length=20, blank=True)
language = models.CharField(max_length=20, default="c/c++")
keep_match = models.PositiveIntegerField(default=20)
create_time = models.DateTimeField(auto_now_add=True)
update_time = models.DateTimeField(auto_now=True)
class Activity(models.Model):
title = models.CharField("标题", unique=True, max_length=192)
description = models.TextField("内容", blank=True)
author = models.ForeignKey(User, on_delete=models.CASCADE)
create_time = models.DateTimeField(auto_now_add=True)
update_time = models.DateTimeField(auto_now_add=True)
register_start_time = models.DateTimeField("开始注册时间", blank=True)
register_end_time = models.DateTimeField("结束注册时间", blank=True)
participants = models.ManyToManyField(User, through='ActivityParticipant', related_name="activities")
class ActivityParticipant(models.Model):
MAJOR_CHOICES = (
('art', '艺术'),
('accounting', '会计'),
('business', '商业'),
('business_admin', '工商管理'),
('chemistry', '化学'),
('communication', '通信'),
('ce', '计算机工程'),
('cs', '计算机科学'),
('economics', '经济'),
('education', '教育'),
('ee', '电子工程'),
('finance', '金融'),
('geology', '地理'),
('interaction', '人机交互'),
('it', '信息技术'),
('life', '生命科学'),
('mechanics', '机械'),
('linguistics', '语言学'),
('literature', '文学'),
('math', '数学'),
('se', '软件工程'),
('philosophy', '哲学'),
('physics', '物理'),
('politics', '政治学'),
('psycho', '心理学'),
('social', '社会学'),
('translation', '翻译'),
('others', '其他')
)
user = models.ForeignKey(User, on_delete=models.CASCADE)
activity = models.ForeignKey(Activity, on_delete=models.CASCADE)
real_name = models.CharField("真实姓名", max_length=30)
student_id = models.CharField("学号", max_length=30)
school = models.ForeignKey(School, verbose_name="学校", on_delete=models.CASCADE)
email = models.CharField("电子邮箱", max_length=192, validators=[EmailValidator()])
phone = models.CharField("电话", max_length=30, blank=True)
major = models.CharField("专业", max_length=30, choices=MAJOR_CHOICES, blank=True)
gender = models.CharField("性别", max_length=5, choices=(
('m', '男'),
('f', '女'),
('d', '拒绝回答')
), blank=True)
graduate_year = models.IntegerField("毕业年份", blank=True, null=True)
is_deleted = models.BooleanField("已删除", default=False)
is_confirmed = models.BooleanField("已确认", default=False)
class Meta:
unique_together = ('user', 'activity')
| 2.140625 | 2 |
10_python_qa_pageobject-master/page_objects/common/Footer.py | turovod/Otus | 1 | 12757976 | class Footer:
pass
| 0.972656 | 1 |
orbit/actions/new_best_metric.py | mcasanova1445/models | 1 | 12757977 | # Copyright 2022 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides the `NewBestMetric` condition and associated helper classes."""
import json
import os
import sys
from typing import Any, Callable, Optional, Union
import uuid
from orbit import runner
from orbit import utils
import tensorflow as tf
MetricFn = Callable[[runner.Output], Union[float, tf.Tensor]]
class NewBestMetric:
"""Condition that is satisfied when a new best metric is achieved.
This class keeps track of the best metric value seen so far, optionally in a
persistent (preemption-safe) way.
Two methods are provided, which each satisfy the `Action` protocol: `test` for
only testing whether a new best metric is achieved by a given train/eval
output, and `commit`, which both tests and records the new best metric value
if it is achieved. These separate methods enable the same `NewBestMetric`
instance to be reused as a condition multiple times, and can also provide
additional preemption/failure safety. For example, to avoid updating the best
metric if a model export fails or is pre-empted:
new_best_metric = orbit.actions.NewBestMetric(
'accuracy', filename='/model/dir/best_metric')
action = orbit.actions.ConditionalAction(
condition=new_best_metric.test,
action=[
orbit.actions.ExportSavedModel(...),
new_best_metric.commit
])
The default `__call__` implementation is equivalent to `commit`.
This class is safe to use in multi-client settings if all clients can be
guaranteed to compute the same metric. However when saving metrics it may be
helpful to avoid unnecessary writes by setting the `write_value` parameter to
`False` for most clients.
Attributes:
metric: The metric passed to __init__ (may be a string key or a callable
that can be applied to train/eval output).
higher_is_better: Whether higher metric values are better.
"""
def __init__(self,
metric: Union[str, MetricFn],
higher_is_better: bool = True,
filename: Optional[str] = None,
write_metric=True):
"""Initializes the instance.
Args:
metric: Either a string key name to use to look up a metric (assuming the
train/eval output is a dictionary), or a callable that accepts the
train/eval output and returns a metric value.
higher_is_better: Whether higher metric values are better. If `True`, a
new best metric is achieved when the metric value is strictly greater
than the previous best metric. If `False`, a new best metric is achieved
when the metric value is strictly less than the previous best metric.
filename: A filename to use for storage of the best metric value seen so
far, to allow peristence of the value across preemptions. If `None`
(default), values aren't persisted.
write_metric: If `filename` is set, this controls whether this instance
will write new best metric values to the file, or just read from the
file to obtain the initial value. Setting this to `False` for most
clients in some multi-client setups can avoid unnecessary file writes.
Has no effect if `filename` is `None`.
"""
self.metric = metric
self.higher_is_better = higher_is_better
float_max = sys.float_info.max
self._best_value = JSONPersistedValue(
initial_value=-float_max if higher_is_better else float_max,
filename=filename,
write_value=write_metric)
def __call__(self, output: runner.Output) -> bool:
"""Tests `output` and updates the current best value if necessary.
This is equivalent to `commit` below.
Args:
output: The train or eval output to test.
Returns:
`True` if `output` contains a new best metric value, `False` otherwise.
"""
return self.commit(output)
def metric_value(self, output: runner.Output) -> float:
"""Computes the metric value for the given `output`."""
if callable(self.metric):
value = self.metric(output)
else:
value = output[self.metric]
return float(utils.get_value(value))
@property
def best_value(self) -> float:
"""Returns the best metric value seen so far."""
return self._best_value.read()
def test(self, output: runner.Output) -> bool:
"""Tests `output` to see if it contains a new best metric value.
If `output` does contain a new best metric value, this method does *not*
save it (i.e., calling this method multiple times in a row with the same
`output` will continue to return `True`).
Args:
output: The train or eval output to test.
Returns:
`True` if `output` contains a new best metric value, `False` otherwise.
"""
metric_value = self.metric_value(output)
if self.higher_is_better:
if metric_value > self.best_value:
return True
else: # Lower is better.
if metric_value < self.best_value:
return True
return False
def commit(self, output: runner.Output) -> bool:
"""Tests `output` and updates the current best value if necessary.
Unlike `test` above, if `output` does contain a new best metric value, this
method *does* save it (i.e., subsequent calls to this method with the same
`output` will return `False`).
Args:
output: The train or eval output to test.
Returns:
`True` if `output` contains a new best metric value, `False` otherwise.
"""
if self.test(output):
self._best_value.write(self.metric_value(output))
return True
return False
class JSONPersistedValue:
"""Represents a value that is persisted via a file-based backing store.
The value must be JSON-serializable. Each time the value is updated, it will
be written to the backing file. It is only read from the file at
initialization.
"""
def __init__(self,
initial_value: Any,
filename: str,
write_value: bool = True):
"""Initializes the instance.
Args:
initial_value: The initial value to use if no backing file exists or was
given. This must be a JSON-serializable value (possibly nested
combination of lists, dicts, and primitive values).
filename: The path to use for persistent storage of the value. This may be
`None`, in which case the value is not stable across preemptions.
write_value: If `True`, new values will be written to `filename` on calls
to `write()`. If `False`, `filename` is only read once to restore any
persisted value, and new values will not be written to it. This can be
useful in certain multi-client settings to avoid race conditions or
excessive file writes. If `filename` is `None`, this parameter has no
effect.
"""
self._value = None
self._filename = filename
self._write_value = write_value
if self._filename is not None:
if tf.io.gfile.exists(self._filename):
if tf.io.gfile.stat(self._filename).length > 0:
with tf.io.gfile.GFile(self._filename, 'r') as f:
self._value = json.load(f)
elif self._write_value:
tf.io.gfile.makedirs(os.path.dirname(self._filename))
if self._value is None:
self.write(initial_value)
def read(self):
"""Returns the value."""
return self._value
def write(self, value):
"""Writes the value, updating the backing store if one was provided."""
self._value = value
if self._filename is not None and self._write_value:
# To achieve atomic writes, we first write to a temporary file, and then
# rename it to `self._filename`.
tmp_filename = f'{self._filename}.tmp.{uuid.uuid4().hex}'
with tf.io.gfile.GFile(tmp_filename, 'w') as f:
json.dump(self._value, f)
tf.io.gfile.rename(tmp_filename, self._filename, overwrite=True)
| 2.109375 | 2 |
python/aop/proxy.py | Soontao/snippets | 0 | 12757978 | # -*- coding: utf-8 -*-
import inspect
import datetime
# 默认处理
def defaultHandler(func,*args,**kwargs):
print("function %s has been invoked "%func.__name__)
result = func(*args,**kwargs)
return result
# 获取代理
def getProxy(func,handler=defaultHandler):
def proxy(*args, **kwargs):
result = handler(func,*args,**kwargs)
return result
return proxy
| 2.875 | 3 |
policies/rl/td3.py | twni2016/pomdp-baselines | 40 | 12757979 | import torch
from .base import RLAlgorithmBase
from policies.models.actor import DeterministicPolicy
from torchkit.networks import FlattenMlp
class TD3(RLAlgorithmBase):
name = "td3"
continuous_action = True
use_target_actor = True
def __init__(
self, exploration_noise=0.1, target_noise=0.2, target_noise_clip=0.5, **kwargs
):
self.exploration_noise = exploration_noise
self.target_noise = target_noise
self.target_noise_clip = target_noise_clip
@staticmethod
def build_actor(input_size, action_dim, hidden_sizes, **kwargs):
return DeterministicPolicy(
obs_dim=input_size,
action_dim=action_dim,
hidden_sizes=hidden_sizes,
**kwargs,
)
@staticmethod
def build_critic(hidden_sizes, input_size=None, obs_dim=None, action_dim=None):
if obs_dim is not None and action_dim is not None:
input_size = obs_dim + action_dim
qf1 = FlattenMlp(
input_size=input_size, output_size=1, hidden_sizes=hidden_sizes
)
qf2 = FlattenMlp(
input_size=input_size, output_size=1, hidden_sizes=hidden_sizes
)
return qf1, qf2
def select_action(self, actor, observ, deterministic: bool, **kwargs):
mean = actor(observ)
if deterministic:
action_tuple = (mean, mean, None, None)
else:
action = (mean + torch.randn_like(mean) * self.exploration_noise).clamp(
-1, 1
) # NOTE
action_tuple = (action, mean, None, None)
return action_tuple
@staticmethod
def forward_actor(actor, observ):
new_actions = actor(observ) # (*, B, dim)
return new_actions, None
def _inject_noise(self, actions):
action_noise = (torch.randn_like(actions) * self.target_noise).clamp(
-self.target_noise_clip, self.target_noise_clip
)
new_actions = (actions + action_noise).clamp(-1, 1) # NOTE
return new_actions
def critic_loss(
self,
markov_actor: bool,
markov_critic: bool,
actor,
actor_target,
critic,
critic_target,
observs,
actions,
rewards,
dones,
gamma,
next_observs=None, # used in markov_critic
):
with torch.no_grad():
# first next_actions from target policy,
# (T+1, B, dim) including reaction to last obs
if markov_actor:
new_actions, _ = self.forward_actor(
actor_target, next_observs if markov_critic else observs
)
else:
new_actions, _ = actor_target(
prev_actions=actions,
rewards=rewards,
observs=next_observs if markov_critic else observs,
)
new_actions = self._inject_noise(new_actions)
if markov_critic: # (B, 1)
next_q1 = critic_target[0](next_observs, new_actions)
next_q2 = critic_target[1](next_observs, new_actions)
else:
next_q1, next_q2 = critic_target(
prev_actions=actions,
rewards=rewards,
observs=observs,
current_actions=new_actions,
) # (T+1, B, 1)
min_next_q_target = torch.min(next_q1, next_q2)
# q_target: (T, B, 1)
q_target = rewards + (1.0 - dones) * gamma * min_next_q_target # next q
if not markov_critic:
q_target = q_target[1:] # (T, B, 1)
if markov_critic:
q1_pred = critic[0](observs, actions)
q2_pred = critic[1](observs, actions)
else:
# Q(h(t), a(t)) (T, B, 1)
q1_pred, q2_pred = critic(
prev_actions=actions,
rewards=rewards,
observs=observs,
current_actions=actions[1:],
) # (T, B, 1)
return (q1_pred, q2_pred), q_target
def actor_loss(
self,
markov_actor: bool,
markov_critic: bool,
actor,
actor_target,
critic,
critic_target,
observs,
actions=None,
rewards=None,
):
if markov_actor:
new_actions, _ = self.forward_actor(actor, observs)
else:
new_actions, _ = actor(
prev_actions=actions, rewards=rewards, observs=observs
) # (T+1, B, A)
if markov_critic:
q1 = critic[0](observs, new_actions)
q2 = critic[1](observs, new_actions)
else:
q1, q2 = critic(
prev_actions=actions,
rewards=rewards,
observs=observs,
current_actions=new_actions,
) # (T+1, B, 1)
min_q_new_actions = torch.min(q1, q2) # (T+1,B,1)
policy_loss = -min_q_new_actions
if not markov_critic:
policy_loss = policy_loss[:-1] # (T,B,1) remove the last obs
return policy_loss, None
#### Below are used in shared RNN setting
def forward_actor_in_target(self, actor, actor_target, next_observ):
new_next_actions, _ = self.forward_actor(actor_target, next_observ)
return self._inject_noise(new_next_actions), None
def entropy_bonus(self, log_probs):
return 0.0
| 2.28125 | 2 |
build-x86_64-linux-gnu/quickfix-v.1.14.4/src/at.py | yerkobits/peatiobot | 8 | 12757980 | import sys
import time
import quickfix as fix
import at_application
if len(sys.argv) == 0:
print "usage: at.py -f FILE"
exit
file = sys.argv[1]
settings = fix.SessionSettings( file )
application = at_application.Application()
factory = fix.FileStoreFactory( "store" )
acceptor = fix.SocketAcceptor( application, factory, settings )
acceptor.start()
while 1:
time.sleep( 1 )
acceptor.stop()
| 2.1875 | 2 |
contest/biweekly/47/sum_power_of_3.py | phunc20/leetcode | 0 | 12757981 | #class Solution:
# def checkPowersOfThree(self, n: int) -> bool:
#def checkPowersOfThree(n):
# def divisible_by_3(k):
# return k % 3 == 0
# #if 1 <= n <= 2:
# if n == 2:
# return False
# if divisible_by_3(n):
# return checkPowersOfThree(n//3)
# else:
# n = n - 1
# if divisible_by_3(n):
# return checkPowersOfThree(n//3)
# else:
# return False
qualified = {1, 3, 4, 9}
def checkPowersOfThree(n):
if n in qualified:
return True
remainder = n % 3
if remainder == 2:
return False
#elif remainder == 1:
# reduced = (n-1) // 3
# if checkPowersOfThree(reduced):
# qualified.add(reduced)
# return True
# else:
# return False
else:
reduced = (n-remainder) // 3
if checkPowersOfThree(reduced):
qualified.add(reduced)
return True
else:
return False
if __name__ == "__main__":
n = 12
print(f"{checkPowersOfThree(n)}")
n = 91
print(f"{checkPowersOfThree(n)}")
n = 21
print(f"{checkPowersOfThree(n)}")
| 3.96875 | 4 |
src/tab_am/col_preview.py | paltusplintus/arrows_dm | 0 | 12757982 | import json
import ipyvuetify as v
import ipywidgets as w
from src.neo_utils import delete_content, extract_jsons, get_file_content, get_files_list, get_files_stats, \
get_arrows_json, get_label_properties, save_merge_on, delete_merge_on, get_merge_on
import pprint
class ColPreview(v.Col):
TEXTFIELD_PREFIX = "text_field_"
def __init__(self, parent, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pp = pprint.PrettyPrinter(indent=1)
self.parent = parent
self.neo = None
self.label_properties = {}
self.merge_on = {}
# #files
self.list_of_chkboxes = []
self.list_of_selected_files = []
self.col_list_files = v.Col(children=[])
self.btn_delete = v.Btn(children=["Delete"], disabled=True)
self.btn_delete.on_event('click', self.on_btn_delete_clicked)
self.btn_edit = v.Btn(children=["Edit"], disabled=True)
self.btn_edit.on_event('click', self.on_btn_edit_clicked)
self.btn_edit_all = v.Btn(children=["Edit All"])
self.btn_edit_all.on_event('click', self.on_btn_edit_all_clicked)
# #summary
self.chk_detailed_summary = v.Checkbox(label="Detailed Summary", v_model=False, disabled=True)
self.chk_detailed_summary.on_event("change", self.on_chk_detailed_summary_changed)
self.out_summary = w.Output()
# #labels
self.btn_edit_by_label = v.Btn(children=["Edit"])
self.btn_edit_by_label.on_event('click', self.on_btn_edit_by_label_clicked)
self.chk_include_neighbors = v.Checkbox(label="Include Neighbors", v_model=False)
self.select_edit_selected = v.Autocomplete(items=[], select_first=True, hint='Edit')
# #setup merge
self.select_setup_merge_label = v.Autocomplete(items=[], select_first=True, label="Select Label")
self.select_setup_merge_label.on_event('change', self.on_select_setup_merge_label_changed)
self.text_setup_merge_label = v.TextField(v_model="", label="Enter other Label")
self.text_setup_merge_label.on_event('input', self.on_text_setup_merge_label_changed)
self.text_setup_merge_prop = v.TextField(v_model="", label="Comma-separated property list")
self.text_setup_merge_prop.on_event('input', self.on_text_setup_merge_prop_changed)
self.btn_save_merge = v.Btn(children=["Save"])
self.btn_save_merge.on_event('click', self.on_btn_save_merge_clicked)
self.btn_delete_merge = v.Btn(children=["Delete"])
self.btn_delete_merge.on_event('click', self.on_btn_delete_merge_clicked)
self.out_setup_merge = w.Output()
# #all
self.out = w.Output()
self.children = [
v.Tabs(children=[
v.Tab(children=["Files"]),
v.TabItem(children=[
v.Row(children=[
v.Col(children=[
self.col_list_files,
self.btn_delete,
self.btn_edit,
self.btn_edit_all,
]),
v.Col(children=[
self.chk_detailed_summary,
self.out_summary,
]),
])
]),
v.Tab(children=["Labels"]),
v.TabItem(children=[
v.Row(children=[
self.select_edit_selected,
self.chk_include_neighbors
]),
self.btn_edit_by_label,
]),
v.Tab(children=["Merge_On Setup"]),
v.TabItem(children=[
v.Row(children=[
v.Col(children=[
self.select_setup_merge_label,
self.text_setup_merge_label,
self.text_setup_merge_prop,
self.btn_save_merge,
self.btn_delete_merge,
]),
v.Col(children=[
self.out_setup_merge,
])
])
]),
]),
self.out
]
self.render()
def on_btn_edit_clicked(self, widget, event, data):
neores = get_file_content(self.neo, self.list_of_selected_files[0])
if len(neores) == 1:
json_content = neores[0]['content']
if json_content:
self.parent.col_json.text_area.v_model = json_content
else:
self.out.clear_output()
with self.out:
print(
"ERROR: selected file does not have content in .json property. Please contact database administrator.")
elif len(neores) > 1:
self.out.clear_output()
with self.out:
print("ERROR: there is >1 file with specified name. Please contact database administrator.")
else:
self.out.clear_output()
with self.out:
print(
f"ERROR: no file with name {self.list_of_selected_files[0]} was found. Please contact database administrator.")
def on_btn_edit_all_clicked(self, widget, event, data):
res = get_arrows_json(neo=self.neo, where="NOT (x:_File_:_Metadata_) and NOT (x:_MergeOn_:_Metadata_)",
incl_neighbors=True)
if res:
self.parent.col_json.text_area.v_model = json.dumps(res[0])
else:
self.out.clear_output()
with self.out:
print(
f"No data found in the database")
def on_btn_edit_by_label_clicked(self, widget, event, data):
if self.select_edit_selected.v_model:
label = self.select_edit_selected.v_model
res = get_arrows_json(neo=self.neo, where=f"x:`{label}`", incl_neighbors=self.chk_include_neighbors.v_model)
if res:
self.parent.col_json.text_area.v_model = json.dumps(res[0])
else:
self.out.clear_output()
with self.out:
print(
f"No {label} data found in the database")
def on_btn_delete_clicked(self, widget, event, data):
delete_content(self.neo, names=self.list_of_selected_files)
self.neo.clean_slate(keep_labels=['_File_', '_MergeOn_', '_Metadata_'])
extract_jsons(neo=self.neo, merge_on=self.parent.get_merge_on())
self.render()
def on_chk_detailed_summary_changed(self, widget, event, data):
self.refresh_selected_files_stats()
def get_selected_files(self):
return [item.label for item in self.list_of_chkboxes if item.v_model]
def on_chkbox_changed(self, widget, event, data):
self.list_of_selected_files = self.get_selected_files()
self.refresh_selected_files_stats()
if len(self.list_of_selected_files) != 1:
self.btn_edit.disabled = True
else:
self.btn_edit.disabled = False
if len(self.list_of_selected_files) > 0:
self.chk_detailed_summary.disabled = False
self.btn_delete.disabled = False
else:
self.chk_detailed_summary.disabled = True
self.btn_delete.disabled = True
# print(self.list_of_selected_files)
def refresh_col_list_files(self):
res = get_files_list(self.neo)
self.out.clear_output()
if res:
with self.out:
assert len(res) == 1, """
>1 chain of _File_ nodes exists in the database.
Clear your database if you have a backup of you data, otherwise contact the database administrator.
"""
files = res[0]['filenames']
if files:
self.col_list_files.children = []
self.list_of_chkboxes = []
for i, file in enumerate(files):
chkbox = v.Checkbox(label=file, v_model=False)
self.list_of_chkboxes.append(chkbox)
self.list_of_chkboxes[i].on_event("change", self.on_chkbox_changed)
self.col_list_files.children = self.list_of_chkboxes
else:
self.col_list_files.children = []
self.out.clear_output()
with self.out:
print("No files data was found in the database")
else:
self.col_list_files.children = []
with self.out:
print("No files data was found in the database or >1000 files")
def refresh_selected_files_stats(self):
res = get_files_stats(self.neo, filenames=self.get_selected_files(), detailed=self.chk_detailed_summary.v_model)
self.out_summary.clear_output()
if res:
with self.out_summary:
self.pp.pprint(res)
def refresh_select_edit_selected(self):
self.select_edit_selected.items = [label for label in self.neo.get_labels() if
label not in ['_File_', '_MergeOn_', '_Metadata_']]
if self.select_edit_selected.items:
self.select_edit_selected.v_model = self.select_edit_selected.items[0]
def on_btn_save_merge_clicked(self, widget, event, data):
label = (self.select_setup_merge_label.v_model
if self.select_setup_merge_label.v_model != "Other" else
self.text_setup_merge_label.v_model)
prop_list = self.text_setup_merge_prop.v_model.split(",")
for prop in prop_list:
self.neo.create_index(label, prop) #TODO: for Neo4j enterprise edition the can set index on pair of properties
save_merge_on(
neo=self.neo,
label=label,
properties=self.text_setup_merge_prop.v_model
)
self.refresh_out_setup_merge()
def on_btn_delete_merge_clicked(self, widget, event, data):
label = (self.select_setup_merge_label.v_model
if self.select_setup_merge_label.v_model != "Other" else
self.text_setup_merge_label.v_model)
prop_list = self.text_setup_merge_prop.v_model.split(",")
for prop in prop_list:
self.neo.drop_index(f"{label}.{prop}")
delete_merge_on(
neo=self.neo,
label=label
)
self.refresh_out_setup_merge()
def on_select_setup_merge_label_changed(self, widget, event, data):
cur_selection = self.select_setup_merge_label.v_model
if cur_selection and cur_selection != 'Other':
self.text_setup_merge_prop.v_model = self.label_properties[cur_selection]
self.text_setup_merge_label.disabled = True
self.btn_save_merge.disabled = False
self.btn_delete_merge.disabled = False
else:
self.text_setup_merge_prop.v_model = ''
self.text_setup_merge_label.disabled = False
self.btn_save_merge.disabled = True
self.btn_delete_merge.disabled = True
def on_text_setup_merge_label_changed(self, widget, event, data):
if self.text_setup_merge_label.v_model and self.text_setup_merge_prop:
self.btn_save_merge.disabled = False
def on_text_setup_merge_prop_changed(self, widget, event, data):
if (self.text_setup_merge_label.v_model or self.select_setup_merge_label.v_model != 'Other') \
and self.text_setup_merge_prop:
self.btn_save_merge.disabled = False
def refresh_select_setup_merge(self):
res1 = get_label_properties(self.neo)
res2 = get_merge_on(self.neo)
if res1:
self.label_properties = {k: ','.join(i) for k, i in res1[0]['map'].items() if
not k in ['_File_', '_MergeOn_', '_Metadata_']}
if res2:
self.label_properties.update(res2[0]['map'].items())
else:
self.label_properties = {}
cur_selection = self.select_setup_merge_label.v_model
self.select_setup_merge_label.items = ['Other'] + list(self.label_properties.keys())
if cur_selection in self.select_setup_merge_label.items:
pass
else:
self.select_setup_merge_label.v_model = 'Other'
self.on_select_setup_merge_label_changed(None, None, None)
def refresh_out_setup_merge(self):
res = get_merge_on(self.neo)
self.out_setup_merge.clear_output()
if res:
self.merge_on = res[0]['map']
else:
self.merge_on = {}
with self.out_setup_merge:
self.pp.pprint(self.merge_on)
def render(self):
if self.neo:
self.refresh_col_list_files()
self.refresh_selected_files_stats()
self.refresh_select_edit_selected()
self.refresh_select_setup_merge()
self.refresh_out_setup_merge()
| 2.34375 | 2 |
openpds/core/views.py | eschloss/FluFuture | 0 | 12757983 | from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponse
from openpds.core.models import Profile
from pymongo import Connection
from openpds import settings
import json
import random
def dump(request):
profiles = Profile.objects.all()
data = {}
connection = Connection(
host=random.choice(getattr(settings, "MONGODB_HOST", None)),
port=getattr(settings, "MONGODB_PORT", None),
readPreference='nearest'
)
for profile in profiles:
db = connection["User_" + str(profile.id)]
funf = db["funf"]
data[profile.uuid] = funf.find()
connection.close()
return render_to_response("dataDump.csv", data)
| 2.265625 | 2 |
deepblocks/optim/__init__.py | blurry-mood/Deep-Learning-Blocks | 3 | 12757984 | """ **Optim** implements weight optimization algorithms.
It contains:
- SAM algorithm (Sharpness-Aware Minimization).
"""
from .SAM import SAM | 1.476563 | 1 |
YouTube Bot Server/socketservervideogenerator.py | bluesammer/Automatic-Youtube-Reddit-Text-To-Speech-Video-Generator-and-Uploader | 0 | 12757985 | <reponame>bluesammer/Automatic-Youtube-Reddit-Text-To-Speech-Video-Generator-and-Uploader<gh_stars>0
import settings
import socket
from time import sleep
from threading import Thread
import database
import datetime
import pickle
socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connectedVideoGenerator = None
class VideoGeneratorClient():
def __init__(self, connection, address, authorized):
self.connection = connection
self.address = address
self.authorized = authorized
def startVideoGeneratorServer():
server_address = (settings.server_location, int(settings.server_port_vid_gen))
print('Starting video generator server on %s port %s' % server_address)
socket.bind(server_address)
socket.listen(5)
thread = Thread(target=waitConnect)
thread.start()
servertick = Thread(target=serverTick)
servertick.start()
def waitConnect():
print("Server Video Generator wait client thread started")
global connectedVideoGenerator
while True:
if connectedVideoGenerator is None:
connection, address = socket.accept()
print("%s Video Generator connected on %s" % (datetime.datetime.now(), address))
connectedVideoGenerator = (connection, address)
clientthread = Thread(target=videoGeneratorTick)
clientthread.start()
def sendToClient(client_connection, payloadattachment):
try:
payload_attach = pickle.dumps(payloadattachment)
HEADERSIZE = 10
payload = bytes(f"{len(payload_attach):<{HEADERSIZE}}", 'utf-8') + payload_attach
client_connection.sendall(payload)
except ConnectionResetError:
print("Couldn't send message cuz client disconnected")
def videoGeneratorTick():
global connectedVideoGenerator
print("Server tick thread started for Video Generator")
HEADERSIZE = 10
disconnect = False
while not disconnect:
full_msg = b''
new_msg = True
while True:
try:
client_connection = connectedVideoGenerator[0]
buf = client_connection.recv(2048)
if new_msg:
try:
msglen = int(buf[:HEADERSIZE])
except ValueError:
# happens when client disconnects
disconnect = True
break
new_msg = False
full_msg += buf
except ConnectionResetError:
print("%s VID GEN SERVER connecton reset error" % (datetime.datetime.now()))
disconnect = True
break
download_size = len(full_msg) - HEADERSIZE
if download_size == msglen:
if download_size > 100000:
print(
"%s VID GEN SERVER received large message (%s)" % (datetime.datetime.now(), str(download_size / 1000000) + "MB"))
try:
incomingdata = pickle.loads(full_msg[HEADERSIZE:])
except EOFError:
print("%s VID GEN SERVER disconnected" % (datetime.datetime.now()))
break
new_msg = True
full_msg = b""
if "video-generator-request-scripts" == incomingdata[0]:
current_scripts_in_generator = incomingdata[1]
print("%s VID GEN SERVER request scripts: current scripts %s" % (datetime.datetime.now(), current_scripts_in_generator))
scripts = retrieveScripts(current_scripts_in_generator)
sendToClient(client_connection, ('script-send-to-generator', scripts))
elif "flag-scripts" == incomingdata[0]:
scriptno = incomingdata[1]
flagtype = incomingdata[2]
database.updateScriptStatus(flagtype, None, scriptno)
print("%s VID GEN SERVER user %s flagging script %s as %s" % (
datetime.datetime.now(), None, scriptno, flagtype))
elif "fin-script" == incomingdata[0]:
scriptno = incomingdata[1]
timeuploaded = incomingdata[2]
scedualedrelease = incomingdata[3]
database.completeUpload(scriptno, timeuploaded, scedualedrelease)
print("%s VID GEN SERVER completing script %s time uploaded %s scedualedrelease %s" % (
datetime.datetime.now(), scriptno, timeuploaded, scedualedrelease))
elif "last-uploaded" == incomingdata[0]:
last_times = database.getLastUploadedScripts()
if last_times is None:
sendToClient(client_connection, ('last-uploaded', 0))
else:
sendToClient(client_connection, ('last-uploaded', last_times))
print("%s VID GEN SERVER sending last uploaded videos times" % (
datetime.datetime.now()))
print("VID GEN CLIENT DISCONNECTED")
connectedVideoGenerator = None
def retrieveScripts(scripts_in_generator):
completed_scripts = database.getCompletedScripts(5)
print("%s VID GEN SERVER downloaded %s video scripts from server" % (datetime.datetime.now(), len(completed_scripts)))
scripts_to_send = []
scriptnostosend = []
for script in completed_scripts:
if script[0] not in scripts_in_generator:
scriptnostosend.append(script[0])
scripts_to_send.append(script)
print("%s VID GEN SERVER scripts to send %s (%s)" % (datetime.datetime.now(), len(scripts_to_send), scriptnostosend))
return scripts_to_send
def serverTick():
global connectedVideoGenerator
while True:
sleep(0.1)
#print(database.getCompletedScripts())
| 2.859375 | 3 |
app/controllers/cards_controller.py | AndreiBarbuOz/uibank-api-server | 0 | 12757986 | import connexion
import six
from app.models.bank_card import BankCard # noqa: E501
from app import util
def add_bank_card(body, account_id): # noqa: E501
"""Add a new bank card for an account
Add a new card for the specified account # noqa: E501
:param body: Bank card details
:type body: dict | bytes
:param account_id: Id of account
:type account_id: str
:rtype: BankCard
"""
if connexion.request.is_json:
body = BankCard.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def get_card(card_id): # noqa: E501
"""Return all bank cards for an account
Return all cards for the specified account # noqa: E501
:param card_id: Id of the card
:type card_id: str
:rtype: BankCard
"""
return 'do some magic!'
def list_bank_cards(account_id): # noqa: E501
"""Return all bank cards for an account
Return all cards for the specified account # noqa: E501
:param account_id: Id of account
:type account_id: str
:rtype: List[BankCard]
"""
return 'do some magic!'
| 2.78125 | 3 |
tests/test_sql.py | dtheodor/sqlalchemy-sqlschema | 6 | 12757987 | # -*- coding: utf-8 -*-
"""
Created by @dtheodor at 2015-05-31
Test SQL compilation of SQL schema set and get.
"""
from sqlalchemy.dialects.postgres import dialect as pg_dialect
from sqlalchemy.dialects.oracle import dialect as oracle_dialect
from sqlalchemy.dialects.mssql import dialect as mssql_dialect
from sqlalchemy_sqlschema.sql import set_schema, get_schema
class TestDefaultSqlCompilation(object):
def test_get_schema(self):
assert str(get_schema()) == "SHOW SCHEMA"
def test_set_schema(self):
assert str(set_schema("new_schema")) == "SET SCHEMA new_schema"
class TestPostgresSqlCompilation(object):
def test_get_schema(self):
get_schema_stmt = get_schema()
assert str(get_schema_stmt.compile(dialect=pg_dialect())) == \
"SHOW search_path"
def test_set_schema(self):
set_schema_stmt = set_schema("new_schema")
assert str(set_schema_stmt.compile(dialect=pg_dialect())) == \
"SET search_path TO new_schema"
class TestOracleCompilation(object):
def test_get_schema(self):
get_schema_stmt = get_schema()
assert str(get_schema_stmt.compile(dialect=oracle_dialect())) == \
"SELECT sys_context('USERENV', 'CURRENT_SCHEMA') FROM dual"
def test_set_schema(self):
set_schema_stmt = set_schema("new_schema")
assert str(set_schema_stmt.compile(dialect=oracle_dialect())) == \
"ALTER SESSION SET CURRENT_SCHEMA = new_schema"
class TestMssqlCompilation(object):
def test_get_schema(self):
get_schema_stmt = get_schema()
assert str(get_schema_stmt.compile(dialect=mssql_dialect())) == \
"SELECT SCHEMA_NAME()"
| 2.234375 | 2 |
tests/test_Project_1.py | yaraki0912/Project_1 | 0 | 12757988 | <reponame>yaraki0912/Project_1<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `Project_1` package."""
import pytest
import matplotlib
matplotlib.use('Agg')
import matplotlib
import matplotlib.pyplot as plt
from click.testing import CliRunner
import Project_1
from Project_1 import Project_1
import io
import os
import numpy as np
import argparse
import unittest
import Project_1.Project_1 as project_1
from io import StringIO
class Test_Project_1(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_inputs(self):
'''
Tests if parser is working properly. if its taking user input correctly
'''
self.parser = project_1.input()
parsed = self.parser.parse_args(['--temperature', '300.0','--total_time', '1000.0','--time_step', '0.1','--initial_position', '0.0','--initial_velocity', '0.0','--damping_coefficient', '0.1'])
self.assertEqual([parsed.temperature,parsed.total_time, parsed.time_step, parsed.initial_position, parsed.initial_velocity, parsed.damping_coefficient], [[300.0], [1000.0], [0.1], [0.0], [0.0], [0.1]] )
def test_random_f(self):
'''Tests if random force generator works '''
self.assertEqual(project_1.random_f(1, 0,K_B=1,epsi=1), 0)
def test_drag_f(self):
'''Tests if drag force generator works '''
self.assertEqual(project_1.drag_f(0, 2), 0)
def test_euler(self):
'''Tests if the force, position and velocity are calculated are correctly'''
self.assertEqual(project_1.euler(0,0,0,0,0), (0,0))
def test_hit_wall(self):
'''Tests if the position and time calculated by hit wall is correct'''
self.assertEqual(project_1.hit_wall(200,1,1,1,1,1,1),(200, [1, 201], [0, 200]))
def test_write_output(self):
'''Tests is writing out put function writes a file that contains correct values'''
test = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]
test_value1 = '1 2.0000 3.0000 4.0000 \n'
test_value2 = '5 6.0000 7.0000 8.0000 \n'
project_1.write_output(test)
out='Output.txt'
f = open(out, 'r')
test_data = list(f.readlines())
f.close()
self.assertEqual(test_data[1], test_value1)
self.assertEqual(test_data[2], test_value2)
def test_plot(self):
'''Tests to make sure if the plot function is making plots'''
trj_path, hist_path = project_1.plot()
self.assertEqual(hist_path, 'histogram.png')
| 2.5625 | 3 |
example/myshop/modifiers.py | taime/django-shop | 39 | 12757989 | <gh_stars>10-100
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from shop.modifiers.pool import cart_modifiers_pool
from shop.serializers.cart import ExtraCartRow
from shop.modifiers.base import ShippingModifier
from shop.money import Money
from shop.shipping.defaults import DefaultShippingProvider
from shop_stripe import modifiers
class PostalShippingModifier(ShippingModifier):
identifier = 'postal-shipping'
shipping_provider = DefaultShippingProvider()
def get_choice(self):
return (self.identifier, _("Postal shipping"))
def add_extra_cart_row(self, cart, request):
if not self.is_active(cart) and len(cart_modifiers_pool.get_shipping_modifiers()) > 1:
return
# add a shipping flat fee
amount = Money('5')
instance = {'label': _("Shipping costs"), 'amount': amount}
cart.extra_rows[self.identifier] = ExtraCartRow(instance)
cart.total += amount
class CustomerPickupModifier(ShippingModifier):
identifier = 'customer-pickup'
def get_choice(self):
return (self.identifier, _("Customer pickups the goods"))
class StripePaymentModifier(modifiers.StripePaymentModifier):
commision_percentage = 3
| 2.15625 | 2 |
examples/simulation/source_simulator.py | lokinou/mne-python | 0 | 12757990 | <reponame>lokinou/mne-python
# -*- coding: utf-8 -*-
"""
.. _ex-sim-source:
==============================
Generate simulated source data
==============================
This example illustrates how to use the :class:`mne.simulation.SourceSimulator`
class to generate source estimates and raw data. It is meant to be a brief
introduction and only highlights the simplest use case.
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD-3-Clause
# %%
import os.path as op
import numpy as np
import mne
from mne.datasets import sample
print(__doc__)
# For this example, we will be using the information of the sample subject.
# This will download the data if it not already on your machine. We also set
# the subjects directory so we don't need to give it to functions.
data_path = sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
subject = 'sample'
# First, we get an info structure from the test subject.
evoked_fname = op.join(data_path, 'MEG', subject, 'sample_audvis-ave.fif')
info = mne.io.read_info(evoked_fname)
tstep = 1. / info['sfreq']
# To simulate sources, we also need a source space. It can be obtained from the
# forward solution of the sample subject.
fwd_fname = op.join(data_path, 'MEG', subject,
'sample_audvis-meg-eeg-oct-6-fwd.fif')
fwd = mne.read_forward_solution(fwd_fname)
src = fwd['src']
# To select a region to activate, we use the caudal middle frontal to grow
# a region of interest.
selected_label = mne.read_labels_from_annot(
subject, regexp='caudalmiddlefrontal-lh', subjects_dir=subjects_dir)[0]
location = 'center' # Use the center of the region as a seed.
extent = 10. # Extent in mm of the region.
label = mne.label.select_sources(
subject, selected_label, location=location, extent=extent,
subjects_dir=subjects_dir)
# Define the time course of the activity for each source of the region to
# activate. Here we use a sine wave at 18 Hz with a peak amplitude
# of 10 nAm.
source_time_series = np.sin(2. * np.pi * 18. * np.arange(100) * tstep) * 10e-9
# Define when the activity occurs using events. The first column is the sample
# of the event, the second is not used, and the third is the event id. Here the
# events occur every 200 samples.
n_events = 50
events = np.zeros((n_events, 3), int)
events[:, 0] = 100 + 200 * np.arange(n_events) # Events sample.
events[:, 2] = 1 # All events have the sample id.
# Create simulated source activity. Here we use a SourceSimulator whose
# add_data method is key. It specified where (label), what
# (source_time_series), and when (events) an event type will occur.
source_simulator = mne.simulation.SourceSimulator(src, tstep=tstep)
source_simulator.add_data(label, source_time_series, events)
# Project the source time series to sensor space and add some noise. The source
# simulator can be given directly to the simulate_raw function.
raw = mne.simulation.simulate_raw(info, source_simulator, forward=fwd)
cov = mne.make_ad_hoc_cov(raw.info)
mne.simulation.add_noise(raw, cov, iir_filter=[0.2, -0.2, 0.04])
raw.plot()
# Plot evoked data to get another view of the simulated raw data.
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, 1, tmin=-0.05, tmax=0.2)
evoked = epochs.average()
evoked.plot()
| 3.0625 | 3 |
simnode/dbtypes/criteria.py | timbleman/DriveBuild | 0 | 12757991 | from abc import ABC
from dataclasses import dataclass
from enum import Enum
from logging import getLogger
from typing import List, Tuple, Callable
from beamngpy import Scenario
_logger = getLogger("DriveBuild.SimNode.DBTypes.Criteria")
class KPValue(Enum):
"""
Represents the Kleene-Priest logic.
"""
TRUE = True,
FALSE = False,
UNKNOWN = None
# NOTE Do not underestimate the complexity of the implementation of these logical operators!
def __and__(self, other):
if self == self.FALSE or other == self.FALSE:
return self.FALSE
if self == self.UNKNOWN or other == self.UNKNOWN:
return self.UNKNOWN
return self.TRUE
def __or__(self, other):
if self == self.TRUE or other == self.TRUE:
return self.TRUE
if self == self.UNKNOWN or other == self.UNKNOWN:
return self.UNKNOWN
return self.FALSE
def __neg__(self):
if self == self.TRUE:
return self.FALSE
if self == self.FALSE:
return self.TRUE
return self.UNKNOWN
class Evaluable(ABC):
from abc import abstractmethod
@abstractmethod
def eval(self) -> KPValue:
"""
Evaluates to KPValue.TRUE only if the condition got triggered.
"""
pass
class UnknownEvaluable(Evaluable):
"""
A class that can be used for representing an "empty" evaluable e.g. representing an empty precondition criterion.
"""
def eval(self) -> KPValue:
return KPValue.UNKNOWN
class Criterion(Evaluable, ABC):
def __init__(self, scenario: Scenario) -> None:
self.scenario = scenario
# State conditions
# FIXME Recognize "any" participant
class StateCondition(Criterion, ABC):
"""
NOTE: A StateCondition does never call Vehicle::update_vehicle() which has to be called before every evaluation.
"""
from abc import abstractmethod
from requests import AiRequest
from beamngpy import Vehicle
from typing import Any
from drivebuildclient import static_vars
def __init__(self, scenario: Scenario, participant: str) -> None:
super().__init__(scenario)
# TODO Check existence of participant id
self.participant = participant
self.requests = self._create_requests()
for request in self.requests:
vehicle = self._get_vehicle()
request.add_sensor_to(vehicle)
# Make sure vehicle sensor_cache is not empty
if self._is_simulation_running():
scenario.bng.poll_sensors(vehicle)
def _get_vehicle(self) -> Vehicle:
return self.scenario.get_vehicle(self.participant)
def _poll_request_data(self) -> List[Any]:
request_data = []
for request in self.requests:
request_data.append(request.read_sensor_cache_of(self._get_vehicle(), self.scenario))
return request_data
@static_vars(prefix="criterion_", counter=0)
def _generate_rid(self) -> str:
while True: # Pseudo "do-while"-loop
rid = StateCondition._generate_rid.prefix + str(StateCondition._generate_rid.counter)
if rid in self._get_vehicle().sensors:
StateCondition._generate_rid.counter += 1
else:
break
return rid
def _is_simulation_running(self) -> bool:
return self.scenario.bng is not None
def eval(self) -> KPValue:
if self._is_simulation_running():
return self._eval_impl()
else:
return KPValue.UNKNOWN
@abstractmethod
def _eval_impl(self) -> KPValue:
pass
@abstractmethod
def _create_requests(self) -> List[AiRequest]:
pass
class SCPosition(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, x: float, y: float, tolerance: float):
super().__init__(scenario, participant)
if tolerance < 0:
raise ValueError("The tolerance must be non negative.")
self.x = x
self.y = y
self.tolerance = tolerance
def _create_requests(self) -> List[AiRequest]:
from requests import PositionRequest
return [PositionRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
from numpy import array
from numpy.linalg import norm
position = self._poll_request_data()[0]
if position:
x, y = position
return KPValue.TRUE if norm(array((x, y)) - array((self.x, self.y))) <= self.tolerance else KPValue.FALSE
else:
return KPValue.UNKNOWN
class SCArea(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, points: List[Tuple[float, float]]):
from shapely.geometry import Polygon
super().__init__(scenario, participant)
self.polygon = Polygon(points)
def _create_requests(self) -> List[AiRequest]:
from requests import PositionRequest
return [PositionRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
from shapely.geometry import Point
position = self._poll_request_data()[0]
if position:
x, y = position
return KPValue.TRUE if self.polygon.contains(Point(x, y)) else KPValue.FALSE
else:
return KPValue.UNKNOWN
class SCLane(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, lane: str):
super().__init__(scenario, participant)
# TODO Check existence of lane id
self.lane = lane
def _create_requests(self) -> List[AiRequest]:
from requests import BoundingBoxRequest
return [BoundingBoxRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
from typing import Dict
from shapely.geometry import Polygon
bbox = self._poll_request_data()[0]
def _to_polygon(road_edges: List[Dict[str, float]]) -> Polygon:
points = [p["left"][0:2] for p in road_edges]
right_edge_points = [p["right"][0:2] for p in road_edges]
right_edge_points.reverse()
points.extend(right_edge_points)
return Polygon(shell=points)
if bbox:
if self.lane == "offroad":
is_offroad = KPValue.TRUE
for road in self.scenario.roads:
if road.rid:
edges = self.scenario.bng.get_road_edges(road.rid)
polygon = _to_polygon(edges)
if polygon.intersects(bbox):
is_offroad = KPValue.FALSE
break
else:
_logger.warning("SCLane can not consider roads without ID.")
return is_offroad
else:
for road in self.scenario.roads:
edges = self.scenario.bng.get_road_edges(road.rid)
polygon = _to_polygon(edges)
return KPValue.TRUE if polygon.intersects(bbox) else KPValue.FALSE
else:
return KPValue.UNKNOWN
class SCSpeed(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, speed_limit: float):
super().__init__(scenario, participant)
if speed_limit < 0:
raise ValueError("Speed limits must be non negative.")
self.speed_limit = speed_limit
def _create_requests(self) -> List[AiRequest]:
from requests import SpeedRequest
return [SpeedRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
speed = self._poll_request_data()[0]
if speed:
return KPValue.TRUE if speed > self.speed_limit else KPValue.FALSE
else:
return KPValue.UNKNOWN
class SCDamage(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str):
super().__init__(scenario, participant)
def _create_requests(self) -> List[AiRequest]:
from requests import DamageRequest
return [DamageRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
damage = self._poll_request_data()[0]
if damage:
return KPValue.TRUE if damage else KPValue.FALSE
else:
return KPValue.UNKNOWN
class SCDistance(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, other_participant: str, max_distance: float):
super().__init__(scenario, participant)
if max_distance < 0:
raise ValueError("The maximum allowed distance has to be non negative.")
# TODO Check whether other_participant id exists
self.other_participant = other_participant
self.max_distance = max_distance
def _create_requests(self) -> List[AiRequest]:
from requests import PositionRequest
return [PositionRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
from numpy import array
from numpy.linalg import norm
position1 = self._poll_request_data()[0]
# FIXME This circumvents the request mechanism...
other_vehicle = self.scenario.get_vehicle(self.other_participant)
position2 = other_vehicle["pos"] if other_vehicle else None
if position1 and position2:
x1, y1 = position1
x2, y2, _ = position2
return KPValue.FALSE if norm(array((x1, y1)) - array((x2, y2))) > self.max_distance else KPValue.TRUE
else:
return KPValue.UNKNOWN
class SCLight(StateCondition):
from dbtypes.scheme import CarLight
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, light: CarLight):
super().__init__(scenario, participant)
self.light = light
def _create_requests(self) -> List[AiRequest]:
from requests import LightRequest
return [LightRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
# FIXME Implement light criterion
print(self._poll_request_data()[0])
return KPValue.UNKNOWN
class SCWaypoint(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, waypoint: str):
super().__init__(scenario, participant)
# TODO Check whether waypoint id exists
self.waypoint = waypoint
def _create_requests(self) -> List[AiRequest]:
return []
def _eval_impl(self) -> KPValue:
# FIXME Implement waypoint criterion
return KPValue.UNKNOWN
# Validation constraints
class ValidationConstraint(Criterion, ABC):
from abc import abstractmethod
def __init__(self, scenario: Scenario, inner: Evaluable) -> None:
super().__init__(scenario)
self.inner = inner
def eval(self) -> KPValue:
# FIXME How to distinguish VCs that got ignored from ones that could not be determined?
return self.inner.eval() if self.eval_cond() == KPValue.TRUE else KPValue.UNKNOWN
@abstractmethod
def eval_cond(self) -> KPValue:
pass
class ValidationConstraintSC(ValidationConstraint, ABC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: StateCondition):
super().__init__(scenario, inner)
self.sc = sc
def eval_cond(self) -> KPValue:
return self.sc.eval()
class VCPosition(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCPosition):
super().__init__(scenario, inner, sc)
class VCArea(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCArea):
super().__init__(scenario, inner, sc)
class VCLane(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCLane):
super().__init__(scenario, inner, sc)
class VCSpeed(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCSpeed):
super().__init__(scenario, inner, sc)
class VCDamage(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCDamage):
super().__init__(scenario, inner, sc)
class VCTime(ValidationConstraint):
def __init__(self, scenario: Scenario, inner: Evaluable, from_tick: int, to_tick: int):
# FIXME from_step/to_step inclusive/exclusive?
super().__init__(scenario, inner)
self.from_tick = from_tick
self.to_tick = to_tick
def eval_cond(self) -> KPValue:
from dbtypes.beamngpy import DBBeamNGpy
from warnings import warn
bng = self.scenario.bng
if bng and type(bng) is DBBeamNGpy:
# FIXME from_step/to_step inclusive/exclusive?
return KPValue.TRUE if self.from_tick <= bng.current_tick <= self.to_tick else KPValue.FALSE
else:
warn("The underlying BeamNGpy instance does not provide time information.")
class VCDistance(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCDistance):
super().__init__(scenario, inner, sc)
class VCTTC(ValidationConstraint):
from beamngpy import Scenario
def __init__(self, scenario: Scenario, inner: Evaluable):
super().__init__(scenario, inner)
def eval_cond(self) -> KPValue:
# TODO Determine collision to which participant/obstacle
# FIXME Position is in center of car vs crash when colliding with its bounding box
return KPValue.UNKNOWN
class VCLight(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCLight):
super().__init__(scenario, inner, sc)
class VCWaypoint(ValidationConstraintSC):
from beamngpy import Scenario
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCWaypoint):
super().__init__(scenario, inner, sc)
# Connectives
class Connective(Evaluable, ABC):
pass
class BinaryConnective(Connective, ABC):
def __init__(self, evaluables: List[Evaluable]) -> None:
self.evaluables = evaluables
class And(BinaryConnective):
def eval(self) -> KPValue:
return KPValue.TRUE if all(map(lambda e: e.eval() is KPValue.TRUE, self.evaluables)) else KPValue.FALSE
class Or(BinaryConnective):
def eval(self) -> KPValue:
return KPValue.TRUE if any(map(lambda e: e.eval() is KPValue.TRUE, self.evaluables)) else KPValue.FALSE
class Not(Connective):
def __init__(self, evaluable: Evaluable) -> None:
self.evaluable = evaluable
def eval(self) -> KPValue:
return self.evaluable.eval().__neg__()
CriteriaFunction = Callable[[Scenario], Evaluable]
# Test case type
@dataclass
class TestCase:
from generator import ScenarioBuilder
name: str
scenario: ScenarioBuilder
precondition_fct: CriteriaFunction
success_fct: CriteriaFunction
failure_fct: CriteriaFunction
stepsPerSecond: int
aiFrequency: int
authors: List[str]
| 2.515625 | 3 |
app/main/models.py | HenriqueLR/hangman-game | 0 | 12757992 | <gh_stars>0
#coding: utf-8
from django.db import models, transaction
from django.db.models.signals import post_save
from django.db.utils import IntegrityError
from django.dispatch import receiver
from main.utils import remove_special_word, parse_csv_file
from main.validators import ContentTypeRestrictedFileField
from conf.settings import MAX_UPLOAD_SIZE, CONTENT_TYPES, EXT
class Words(models.Model):
id_words = models.AutoField(primary_key=True, verbose_name=u'words', db_column='id_words')
word = models.CharField(verbose_name=u'word', db_column='word', max_length=46, unique=True)
updated_at = models.DateTimeField(verbose_name=u'Atualizado em', auto_now=True, db_column='updated_at')
created_at = models.DateTimeField(verbose_name=u'Criado em', auto_now_add=True, db_column='created_at')
def __unicode__(self):
return (u'%s') % (self.word)
def save(self, *args, **kwargs):
self.word = remove_special_word(self.word)
super(Words, self).save(*args, **kwargs)
class Meta:
verbose_name = 'Word'
verbose_name_plural = 'Words'
ordering=['-id_words']
db_table='words'
class Files(models.Model):
id_file = models.AutoField(primary_key=True, verbose_name=u'file', db_column='id_file')
file = ContentTypeRestrictedFileField(
upload_to='uploads/',
content_types=CONTENT_TYPES,
max_upload_size=MAX_UPLOAD_SIZE,
extensions=EXT
)
created_at = models.DateTimeField(verbose_name=u'Criado em', auto_now_add=True, db_column='created_at')
def __unicode__(self):
return (u'%s') % (self.file)
class Meta:
verbose_name = 'File'
verbose_name_plural = 'Files'
ordering=['-id_file']
db_table='files'
@receiver(post_save, sender=Files)
def populate_words(sender, instance, *args, **kwargs):
for row in parse_csv_file(instance.file):
try:
with transaction.atomic():
Words(word=row['word']).save()
except IntegrityError as e:
pass
except Exception as e:
pass
| 1.984375 | 2 |
appengine/monorail/tracker/template_helpers.py | xswz8015/infra | 0 | 12757993 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Helper functions for issue template servlets"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import collections
import logging
from framework import authdata
from framework import exceptions
from framework import framework_bizobj
from framework import framework_helpers
from tracker import field_helpers
from tracker import tracker_bizobj
from tracker import tracker_constants
from tracker import tracker_helpers
from proto import tracker_pb2
MAX_NUM_PHASES = 6
PHASE_INPUTS = [
'phase_0', 'phase_1', 'phase_2', 'phase_3', 'phase_4', 'phase_5']
_NO_PHASE_VALUE = 'no_phase'
ParsedTemplate = collections.namedtuple(
'ParsedTemplate', 'name, members_only, summary, summary_must_be_edited, '
'content, status, owner_str, labels, field_val_strs, component_paths, '
'component_required, owner_defaults_to_member, admin_str, add_approvals, '
'phase_names, approvals_to_phase_idx, required_approval_ids')
def ParseTemplateRequest(post_data, config):
"""Parse an issue template."""
name = post_data.get('name', '')
members_only = (post_data.get('members_only') == 'on')
summary = post_data.get('summary', '')
summary_must_be_edited = (
post_data.get('summary_must_be_edited') == 'on')
content = post_data.get('content', '')
content = framework_helpers.WordWrapSuperLongLines(content, max_cols=75)
status = post_data.get('status', '')
owner_str = post_data.get('owner', '')
labels = post_data.getall('label')
field_val_strs = collections.defaultdict(list)
for fd in config.field_defs:
field_value_key = 'custom_%d' % fd.field_id
if post_data.get(field_value_key):
field_val_strs[fd.field_id].append(post_data[field_value_key])
component_paths = []
if post_data.get('components'):
for component_path in post_data.get('components').split(','):
if component_path.strip() not in component_paths:
component_paths.append(component_path.strip())
component_required = post_data.get('component_required') == 'on'
owner_defaults_to_member = post_data.get('owner_defaults_to_member') == 'on'
admin_str = post_data.get('admin_names', '')
add_approvals = post_data.get('add_approvals') == 'on'
phase_names = [post_data.get(phase_input, '') for phase_input in PHASE_INPUTS]
required_approval_ids = []
approvals_to_phase_idx = {}
for approval_def in config.approval_defs:
phase_num = post_data.get('approval_%d' % approval_def.approval_id, '')
if phase_num == _NO_PHASE_VALUE:
approvals_to_phase_idx[approval_def.approval_id] = None
else:
try:
idx = PHASE_INPUTS.index(phase_num)
approvals_to_phase_idx[approval_def.approval_id] = idx
except ValueError:
logging.info('approval %d was omitted' % approval_def.approval_id)
required_name = 'approval_%d_required' % approval_def.approval_id
if (post_data.get(required_name) == 'on'):
required_approval_ids.append(approval_def.approval_id)
return ParsedTemplate(
name, members_only, summary, summary_must_be_edited, content, status,
owner_str, labels, field_val_strs, component_paths, component_required,
owner_defaults_to_member, admin_str, add_approvals, phase_names,
approvals_to_phase_idx, required_approval_ids)
def GetTemplateInfoFromParsed(mr, services, parsed, config):
"""Get Template field info and PBs from a ParsedTemplate."""
admin_ids, _ = tracker_helpers.ParsePostDataUsers(
mr.cnxn, parsed.admin_str, services.user)
owner_id = 0
if parsed.owner_str:
try:
user_id = services.user.LookupUserID(mr.cnxn, parsed.owner_str)
auth = authdata.AuthData.FromUserID(mr.cnxn, user_id, services)
if framework_bizobj.UserIsInProject(mr.project, auth.effective_ids):
owner_id = user_id
else:
mr.errors.owner = 'User is not a member of this project.'
except exceptions.NoSuchUserException:
mr.errors.owner = 'Owner not found.'
component_ids = tracker_helpers.LookupComponentIDs(
parsed.component_paths, config, mr.errors)
# TODO(jojwang): monorail:4678 Process phase field values.
phase_field_val_strs = {}
field_values = field_helpers.ParseFieldValues(
mr.cnxn, services.user, parsed.field_val_strs,
phase_field_val_strs, config)
for fv in field_values:
logging.info('field_value is %r: %r',
fv.field_id, tracker_bizobj.GetFieldValue(fv, {}))
phases = []
approvals = []
if parsed.add_approvals:
phases, approvals = _GetPhasesAndApprovalsFromParsed(
mr, parsed.phase_names, parsed.approvals_to_phase_idx,
parsed.required_approval_ids)
return admin_ids, owner_id, component_ids, field_values, phases, approvals
def _GetPhasesAndApprovalsFromParsed(
mr, phase_names, approvals_to_phase_idx, required_approval_ids):
"""Get Phase PBs from a parsed phase_names and approvals_by_phase_idx."""
phases = []
approvals = []
valid_phase_names = []
for name in phase_names:
if name:
if not tracker_constants.PHASE_NAME_RE.match(name):
mr.errors.phase_approvals = 'Invalid gate name(s).'
return phases, approvals
valid_phase_names.append(name)
if len(valid_phase_names) != len(
set(name.lower() for name in valid_phase_names)):
mr.errors.phase_approvals = 'Duplicate gate names.'
return phases, approvals
valid_phase_idxs = [idx for idx, name in enumerate(phase_names) if name]
if set(valid_phase_idxs) != set([
idx for idx in approvals_to_phase_idx.values() if idx is not None]):
mr.errors.phase_approvals = 'Defined gates must have assigned approvals.'
return phases, approvals
# Distributing the ranks over a wider range is not necessary since
# any edits to template phases will cause a complete rewrite.
# phase_id is temporarily the idx for keeping track of which approvals
# belong to which phases.
for idx, phase_name in enumerate(phase_names):
if phase_name:
phase = tracker_pb2.Phase(name=phase_name, rank=idx, phase_id=idx)
phases.append(phase)
for approval_id, phase_idx in approvals_to_phase_idx.items():
av = tracker_pb2.ApprovalValue(
approval_id=approval_id, phase_id=phase_idx)
if approval_id in required_approval_ids:
av.status = tracker_pb2.ApprovalStatus.NEEDS_REVIEW
approvals.append(av)
return phases, approvals
def FilterApprovalsAndPhases(approval_values, phases, config):
"""Return lists without deleted approvals and empty phases."""
deleted_approval_ids = [fd.field_id for fd in config.field_defs if
fd.is_deleted and
fd.field_type is tracker_pb2.FieldTypes.APPROVAL_TYPE]
filtered_avs = [av for av in approval_values if
av.approval_id not in deleted_approval_ids]
av_phase_ids = list(set([av.phase_id for av in filtered_avs]))
filtered_phases = [phase for phase in phases if
phase.phase_id in av_phase_ids]
return filtered_avs, filtered_phases
def GatherApprovalsPageData(approval_values, tmpl_phases, config):
"""Create the page data necessary for filling in the launch-gates-table."""
filtered_avs, filtered_phases = FilterApprovalsAndPhases(
approval_values, tmpl_phases, config)
filtered_phases.sort(key=lambda phase: phase.rank)
required_approval_ids = []
prechecked_approvals = []
phase_idx_by_id = {
phase.phase_id:idx for idx, phase in enumerate(filtered_phases)}
for av in filtered_avs:
# approval is part of a phase and that phase can be found.
if phase_idx_by_id.get(av.phase_id) is not None:
idx = phase_idx_by_id.get(av.phase_id)
prechecked_approvals.append(
'%d_phase_%d' % (av.approval_id, idx))
else:
prechecked_approvals.append('%d' % av.approval_id)
if av.status is tracker_pb2.ApprovalStatus.NEEDS_REVIEW:
required_approval_ids.append(av.approval_id)
num_phases = len(filtered_phases)
filtered_phases.extend([tracker_pb2.Phase()] * (
MAX_NUM_PHASES - num_phases))
return prechecked_approvals, required_approval_ids, filtered_phases
def GetCheckedApprovalsFromParsed(approvals_to_phase_idx):
checked_approvals = []
for approval_id, phs_idx in approvals_to_phase_idx.items():
if phs_idx is not None:
checked_approvals.append('%d_phase_%d' % (approval_id, phs_idx))
else:
checked_approvals.append('%d' % approval_id)
return checked_approvals
def GetIssueFromTemplate(template, project_id, reporter_id):
# type: (proto.tracker_pb2.TemplateDef, int, int) ->
# proto.tracker_pb2.Issue
"""Build a templated issue from TemplateDef.
Args:
template: Template that issue creation is based on.
project_id: ID of the Project the template belongs to.
reporter_id: Requesting user's ID.
Returns:
protorpc Issue filled with data from given `template`.
"""
owner_id = None
if template.owner_id:
owner_id = template.owner_id
elif template.owner_defaults_to_member:
owner_id = reporter_id
issue = tracker_pb2.Issue(
project_id=project_id,
summary=template.summary,
status=template.status,
owner_id=owner_id,
labels=template.labels,
component_ids=template.component_ids,
reporter_id=reporter_id,
field_values=template.field_values,
phases=template.phases,
approval_values=template.approval_values)
return issue
| 1.726563 | 2 |
knowit/core.py | labrys/knowit | 0 | 12757994 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from logging import NullHandler, getLogger
from six import text_type
logger = getLogger(__name__)
logger.addHandler(NullHandler())
class Reportable(object):
"""Reportable abstract class."""
def __init__(self, name, description=None, reportable=True):
"""Constructor."""
self.name = name
self._description = description
self.reportable = reportable
@property
def description(self):
"""Rule description."""
return self._description or self.name
def report(self, value, context):
"""Report unknown value."""
if not value or not self.reportable:
return
value = text_type(value)
if 'report' in context:
report_map = context['report'].setdefault(self.description, {})
if value not in report_map:
report_map[value] = context['path']
logger.info('Invalid %s: %r', self.description, value)
| 2.6875 | 3 |
migration/models.py | dixonary/uwcs-zarya | 7 | 12757995 | from django.contrib.auth.models import User
from django.db import models
from datetime import datetime, timedelta
TARGETS = (
('ACA', 'Academic'),
('GAM', 'Gaming'),
('SCL', 'Social'),
('SCT', 'Society'),
)
COMMS_TYPE = (
('NL', 'Newsletter'),
('M', 'Minute'),
('N', 'News Item'),
)
STATUS = (
('RE', 'Requested'),
('PR', 'Present'),
('DD', 'Disabled'),
)
# The following models are copied from the previous compsoc website (Django Reinhardt)
class Communication(models.Model):
title = models.CharField(max_length=100)
date = models.DateField()
text = models.TextField()
type = models.CharField(max_length=2, choices=COMMS_TYPE)
class Meta:
db_table = 'comms_communication'
class OldEventType(models.Model):
name = models.CharField(max_length=20)
info = models.TextField()
target = models.CharField(max_length=3, choices=TARGETS)
class Meta:
db_table = 'events_eventtype'
class OldLocation(models.Model):
name = models.CharField(max_length=60)
description = models.TextField()
image_url = models.CharField(max_length=255, default="/static/img/no_location.png")
map_loc = models.CharField(max_length=30, blank=True)
class Meta:
db_table = 'events_location'
class OldEvent(models.Model):
"""
Represents a single event
"""
# I'm never using camel case for model fields again :p
type = models.ForeignKey(OldEventType)
shortDescription = models.CharField(max_length=255, verbose_name="Short Description",
help_text="This text is displayed on the events index.")
location = models.ForeignKey(OldLocation)
longDescription = models.TextField(verbose_name="Long Description",
help_text="This text is displayed on the details page for this event.")
start = models.DateTimeField(default=datetime.now)
finish = models.DateTimeField(default=lambda: datetime.now() + timedelta(hours=1))
displayFrom = models.DateTimeField(default=datetime.now, verbose_name="Display From",
help_text="This controls when the event will be visible in the index and feeds.")
cancelled = models.BooleanField()
class Meta:
db_table = 'events_event'
class OldEventSignup(models.Model):
"""
This represents the signup options for a particular event,
e.g Signup limits and time constraints
This might be renamed to EventSignupOptions
"""
event = models.OneToOneField(OldEvent)
signupsLimit = models.IntegerField(verbose_name="Signups Limit", help_text="0 here implies unlimited signups.")
open = models.DateTimeField()
close = models.DateTimeField()
fresher_open = models.DateTimeField(
help_text="This allows you to control whether freshers can sign up earlier or later than regular members.")
guest_open = models.DateTimeField(
help_text="This allows you to control whether guests can sign up earlier or later than regular members.")
# this might be renamed to seating_plan for clarity
class Meta:
db_table = 'events_eventsignup'
class Signup(models.Model):
event = models.ForeignKey(OldEvent)
time = models.DateTimeField()
user = models.ForeignKey(User)
comment = models.TextField(blank=True)
class Meta:
db_table = 'events_signup'
class Member(models.Model):
"""
Used to store auxiliary data to the default profile data for
a django User.
"""
user = models.OneToOneField(User)
showDetails = models.BooleanField()
guest = models.BooleanField()
# Optional info about one's website
class WebsiteDetails(models.Model):
user = models.OneToOneField(User)
websiteUrl = models.CharField(max_length=50)
websiteTitle = models.CharField(max_length=50)
class Meta:
db_table = 'memberinfo_websitedetails'
class NicknameDetails(models.Model):
user = models.OneToOneField(User)
nickname = models.CharField(max_length=20)
class Meta:
db_table = 'memberinfo_nicknamedetails'
class OldShellAccount(models.Model):
user = models.OneToOneField(User)
name = models.CharField(max_length=30)
status = models.CharField(max_length=2, choices=STATUS)
class Meta:
db_table = 'memberinfo_shellaccount'
class OldDatabaseAccount(models.Model):
user = models.OneToOneField(User)
name = models.CharField(max_length=30)
status = models.CharField(max_length=2, choices=STATUS)
class Meta:
db_table = 'memberinfo_databaseaccount'
class OldExecPosition(models.Model):
"""
Represents an exec position
"""
title = models.CharField(max_length=30)
class Meta:
db_table = 'memberinfo_execposition'
class OldExecPlacement(models.Model):
"""
Represents a time period of working on the exec
"""
position = models.ForeignKey(OldExecPosition)
user = models.ForeignKey(User)
start = models.DateField()
end = models.DateField()
class Meta:
db_table = 'memberinfo_execplacement'
| 2.0625 | 2 |
ExPython/CursoemVideo/ex049.py | MatheusEwen/Exercicios_Do_CursoDePython | 1 | 12757996 | n = int(input('digite um númeoro para saber a sua tabuada:'))
for c in range(0,11):
print('{} X {} = {}'.format(c, n, c*n)) | 3.859375 | 4 |
test/test_extractor_auto.py | heartcored98/Trasnformer_Anatomy | 16 | 12757997 | <reponame>heartcored98/Trasnformer_Anatomy
import sys
sys.path.insert(0, '../')
import torch
import transformer_anatomy
from transformers import ElectraModel, ElectraTokenizer, BertModel, BertTokenizer, AutoConfig, AutoModel
from transformer_anatomy.extractor import AutoExtractor, ElectraExtractor
if __name__ == '__main__':
model_name = 'google/electra-small-discriminator'
print("==== Testing AutoConfig")
config = AutoConfig.from_pretrained(model_name)
print(f"==== Testing model={model_name} ====")
model = ElectraModel.from_pretrained(model_name, output_hidden_states=True, output_attentions=True)
tokenizer = ElectraTokenizer.from_pretrained(model_name)
model = AutoExtractor.from_model(model)
print(type(model))
print(f"==== Without Any Pooling ====")
input_ids = torch.tensor([tokenizer.encode("Let's see all hidden-states and attentions on this text")])
all_hidden_states, all_head_states = model(input_ids)
assert len(all_hidden_states) == 13, "number of layer does not match"
assert all_hidden_states[0].shape == torch.Size([1, 16, 256]), "0-th layer output does not match shape"
print(len(all_hidden_states), all_hidden_states[0].shape)
print(len(all_head_states), all_head_states[0].shape)
print(f"==== Pooling from Single Layer ====")
model.set_location('layer')
model.set_pooling_position([3])
layer_embedding = model(input_ids)
print(layer_embedding.shape)
print(f"==== Pooling from Single Head ====")
model.set_location('head')
model.set_pooling_position([(3, 2)])
head_embedding = model(input_ids)
print(head_embedding.shape)
print(f"==== Pooling from Multi Head ====")
model.set_location('head')
model.set_pooling_position([(3, 2), (4,2), (11,2)])
multi_head_embedding = model(input_ids)
print(multi_head_embedding.shape)
model_name = 'bert-base-uncased'
print(f"==== Testing model={model_name} ====")
model = BertModel.from_pretrained(model_name, output_hidden_states=True, output_attentions=True)
tokenizer = BertTokenizer.from_pretrained(model_name)
model = AutoExtractor.from_model(model)
print(type(model))
# assert model==ElectraExtractor
input_ids = torch.tensor([tokenizer.encode("Let's see all hidden-states and attentions on this text")])
all_hidden_states, all_head_states = model(input_ids)
print(len(all_hidden_states), all_hidden_states[0].shape)
print(len(all_head_states), all_head_states[0].shape) | 2.3125 | 2 |
mozmill-env/python/Lib/site-packages/mozmill_automation/files.py | lucashmorais/x-Bench | 0 | 12757998 | <reponame>lucashmorais/x-Bench<gh_stars>0
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os
import errors
class JSONFile:
"""Class to handle reading and writing of JSON files."""
def __init__(self, filename):
self.filename = os.path.abspath(filename)
def read(self):
if not os.path.isfile(self.filename):
raise errors.NotFoundException('Specified file cannot be found.',
self.filename)
try:
f = open(self.filename, 'r')
return json.loads(f.read())
finally:
f.close()
def write(self, data):
try:
folder = os.path.dirname(self.filename)
if not os.path.exists(folder):
os.makedirs(folder)
f = open(self.filename, 'w')
f.write(json.dumps(data))
finally:
f.close()
def get_unique_filename(filename, start_index):
(basename, ext) = os.path.splitext(filename)
return '%s_%i%s' % (basename, start_index, ext)
| 2.1875 | 2 |
fixture/soap.py | softester-git/Mantis_training | 0 | 12757999 | from suds.client import Client
from suds import WebFault
class SoapHelper:
def __init__(self, app):
self.app = app
def can_login(self, username, password):
client = Client("http://localhost/mantisbt-2.25.2/api/soap/mantisconnect.php?wsdl")
try:
client.service.mc_login(username, password)
return True
except WebFault:
return False
def get_projects(self, username, password, pname):
client = Client("http://localhost/mantisbt-2.25.2/api/soap/mantisconnect.php?wsdl")
try:
p = client.service.mc_project_get_id_from_name(username=username, password=password, project_name=str(pname))
assert p>0
except:
assert False
def get_projects_del(self, username, password, pname):
client = Client("http://localhost/mantisbt-2.25.2/api/soap/mantisconnect.php?wsdl")
try:
p = client.service.mc_project_get_id_from_name(username=username, password=password, project_name=str(pname))
assert client.service.mc_project_get_id_from_name(username=username, password=password, project_name=str(pname)) is False
except:
assert False | 2.75 | 3 |
06_iris_numpy_stats.py | tommirrington/52167-Programming-and-Scripting-Final-Project | 0 | 12758000 | <gh_stars>0
import numpy as np
import statistics as stats
#use numpy libraray to import csv file as an ndarray and assign variable for each vector
data = np.genfromtxt('data/iris.csv', delimiter=',')
sepl = data[:,0]
sepw = data[:,1]
petl = data[:,2]
petw = data[:,3]
#using numpy and stats libraries print and format results
print('\n\nSepal Length')
print('Min: {0:.1f}'.format(np.min(sepl)))
print('Max: {0:.1f}'.format(np.max(sepl)))
print('Mean: {0:.2f}'.format(np.mean(sepl)))
print('Mode: {0:.2f}'.format(stats.mode(sepl)))
print('Median: {0:.2f}'.format(stats.median(sepl)))
print('Std Dev: {0:.2f}'.format(stats.pstdev(sepl)))
print('Variance: {0:.2f}\n'.format(stats.pvariance(sepl)))
print('\nSepal Width')
print('Min: {0:.1f}'.format(np.min(sepw)))
print('Max: {0:.1f}'.format(np.max(sepw)))
print('Mean: {0:.2f}'.format(np.mean(sepw)))
print('Mode: {0:.2f}'.format(stats.mode(sepw)))
print('Median: {0:.2f}'.format(stats.median(sepw)))
print('Std Dev: {0:.2f}'.format(stats.pstdev(sepw)))
print('Variance: {0:.2f}\n'.format(stats.pvariance(sepw)))
print('\nPetal Length')
print('Min: {0:.1f}'.format(np.min(petl)))
print('Max: {0:.1f}'.format(np.max(petl)))
print('Mean: {0:.2f}'.format(np.mean(petl)))
print('Mode: {0:.2f}'.format(stats.mode(petl)))
print('Median: {0:.2f}'.format(stats.median(petl)))
print('Std Dev: {0:.2f}'.format(stats.pstdev(petl)))
print('Variance: {0:.2f}\n'.format(stats.pvariance(petl)))
print('\nPetal Width')
print('Min: {0:.1f}'.format(np.min(petw)))
print('Max: {0:.1f}'.format(np.max(petw)))
print('Mean: {0:.2f}'.format(np.mean(petw)))
print('Mode: {0:.2f}'.format(stats.mode(petw)))
print('Median: {0:.2f}'.format(stats.median(petw)))
print('Std Dev: {0:.2f}'.format(stats.pstdev(petw)))
print('Variance: {0:.2f}\n\n'.format(stats.pvariance(petw)))
#References
#https://docs.scipy.org/doc/numpy-1.12.0/reference/generated/numpy.genfromtxt.html#numpy.genfromtxt
#https://docs.scipy.org/doc/numpy-1.13.0/reference/routines.math.html
#https://docs.python.org/3/library/statistics.html#module-statistics
| 3.078125 | 3 |
gwr/__init__.py | yurytsoy/gwr | 0 | 12758001 | from .gwr import GWR
| 1.078125 | 1 |
tests/test_visualization_metrics.py | aniketmaurya/Chitra | 158 | 12758002 | from unittest.mock import MagicMock, Mock, patch
import numpy as np
import pytest
from chitra.visualization.metrics import (
cm_accuracy,
detect_multilabel,
plot_confusion_matrix,
)
def test_detect_multilabel():
with pytest.raises(UserWarning):
detect_multilabel({"label1": "this will raise UserWarning"})
assert detect_multilabel([1, 2, 3, 4])
assert not detect_multilabel([0, 1, 1, 0])
def test_cm_accuracy():
x = np.asarray([[1, 2], [1, 2]])
assert cm_accuracy(x) == 0.5
@patch("chitra.visualization.metrics.plt")
def test_plot_confusion_matrix(mock_plt: Mock):
mock_plt.show = MagicMock()
y_pred = [1, 1, 0, 1]
y_true = [0, 1, 0, 1]
assert plot_confusion_matrix(y_pred, y_true) is None
mock_plt.show.assert_called_once()
| 2.578125 | 3 |
test/test_identifiers.py | periodo/periodo-server | 9 | 12758003 | <reponame>periodo/periodo-server
import re
import pytest
from jsonpatch import JsonPatch
from periodo import identifier
def substitute(s):
chars = list(s)
chars[2] = identifier.XDIGITS[
(identifier.XDIGITS.index(chars[2]) + 1) % len(identifier.XDIGITS)
]
return "".join(chars)
def transpose(s):
chars = list(s)
for i in range(-3, -(len(s) + 1), -1):
if not chars[i] == chars[i + 1]:
chars[i], chars[i + 1] = chars[i + 1], chars[i]
return "".join(chars)
def check_authority_id(authority_id, id_map):
identifier.check(authority_id)
assert re.match(r"^p0[%s]{5}$" % identifier.XDIGITS, authority_id)
assert authority_id in id_map.values()
def check_period_id(period_id, authority_id, id_map):
identifier.check(period_id)
assert re.match(r"^%s[%s]{4}$" % (authority_id, identifier.XDIGITS), period_id)
assert period_id in id_map.values()
def test_assert_valid_loose():
# old style checksum
identifier.assert_valid("3wskd4mmt", strict=False)
# new style checksum
identifier.assert_valid("jrrjb8spw", strict=False)
def test_assert_valid_strict():
with pytest.raises(identifier.IdentifierException):
# old style checksum
identifier.assert_valid("3wskd4mmt")
# new style checksum
identifier.assert_valid("jrrjb8spw")
@pytest.mark.parametrize("alter", [substitute, transpose])
def test_check_altered_identifier(alter):
aid = identifier.for_authority()
identifier.check(aid)
altered_aid = alter(aid)
with pytest.raises(identifier.IdentifierException):
identifier.check(altered_aid)
pid = identifier.for_period(aid)
identifier.check(pid)
altered_pid = alter(pid)
with pytest.raises(identifier.IdentifierException):
identifier.check(altered_pid)
def test_id_has_wrong_shape():
with pytest.raises(identifier.IdentifierException):
identifier.check("p06rw8") # authority id too short
with pytest.raises(identifier.IdentifierException):
identifier.check("p06rw87/669p") # period id has slash
def test_generate_period_id():
aid = identifier.for_authority()
pid = identifier.for_period(aid)
assert pid.startswith(aid)
assert len(pid) == 11
def test_replace_skolem_ids_when_adding_items(load_json):
data = load_json("test-data.json")
original_patch = JsonPatch(load_json("test-patch-adds-items.json"))
applied_patch, id_map = identifier.replace_skolem_ids(
original_patch, data, set(), {}
)
xd = identifier.XDIGITS
# check addition of new period
assert re.match(
r"^/authorities/p0trgkv/periods/p0trgkv[%s]{4}$" % xd,
applied_patch.patch[0]["path"],
)
check_period_id(applied_patch.patch[0]["value"]["id"], "p0trgkv", id_map)
# check addition of new authority
assert re.match(r"^/authorities/p0[%s]{5}$" % xd, applied_patch.patch[1]["path"])
authority_id = applied_patch.patch[1]["value"]["id"]
check_authority_id(authority_id, id_map)
# check each period in new authority
periods = applied_patch.patch[1]["value"]["periods"]
for period_id in periods.keys():
check_period_id(period_id, authority_id, id_map)
assert period_id == periods[period_id]["id"]
# check that skolem IDs in prop values get replaced
prop = "broader" if "broader" in periods[period_id] else "narrower"
check_period_id(periods[period_id][prop], authority_id, id_map)
# check that skolem IDs in prop value arrays get replaced
for period_id in periods[period_id].get("derivedFrom", []):
check_period_id(period_id, "p0trgkv", id_map)
def test_replace_skolem_ids_when_replacing_periods(load_json):
data = load_json("test-data.json")
original_patch = JsonPatch(load_json("test-patch-replaces-periods.json"))
applied_patch, id_map = identifier.replace_skolem_ids(
original_patch, data, set(), {}
)
assert applied_patch.patch[0]["path"] == original_patch.patch[0]["path"]
period_id, period = list(applied_patch.patch[0]["value"].items())[0]
assert period_id == period["id"]
check_period_id(period_id, "p0trgkv", id_map)
def test_replace_skolem_ids_when_replacing_authorities(load_json):
data = load_json("test-data.json")
original_patch = JsonPatch(load_json("test-patch-replaces-authorities.json"))
applied_patch, id_map = identifier.replace_skolem_ids(
original_patch, data, set(), {}
)
assert applied_patch.patch[0]["path"] == original_patch.patch[0]["path"]
authority_id, authority = list(applied_patch.patch[0]["value"].items())[0]
assert authority_id == authority["id"]
check_authority_id(authority_id, id_map)
period_id, period = list(
applied_patch.patch[0]["value"][authority_id]["periods"].items()
)[0]
assert period_id == period["id"]
check_period_id(period_id, authority_id, id_map)
| 2.515625 | 3 |
pyacyclicnet/core/types/responsehashtable.py | GabeCordo/acyclic-network-venezia | 0 | 12758004 | <reponame>GabeCordo/acyclic-network-venezia<filename>pyacyclicnet/core/types/responsehashtable.py
from pyacyclicnet.core.types.enums import RequestTableLifetime
from pyacyclicnet.core.bitstream.parser import ProtocolParser
from datetime import date, datetime
from pyacyclicnet.core.types.result import Result
RESPONSE_TIME_THRESHOLD = 600
TIME_DIFFERENCE = lambda last_time: (datetime.now() - last_time).total_seconds() >= RESPONSE_TIME_THRESHOLD
class ResponseHashTable:
def __init__(self) -> None:
"""
:initializes a new hash-table to store request structs
"""
self.__table = {}
def __validate_nonce(self, hash: str) -> bool:
"""
checks to see if a nonce hash already exists in the table
@returns true if the nonce does not exist, allowing the program to proceed
@exception returns false if the nonce exists in the table already
"""
if hash in self.__table:
return False
else:
return True
def insert_request(self, packet: ProtocolParser, lifetime: RequestTableLifetime) -> bool:
"""
"""
if packet is None:
return False
if self.__validate_nonce(packet.nonce):
self.__table[packet.nonce] = [
packet.next_ip, # destination
packet.request, # the request code given to the node
None, # this will be the response of the request
datetime.now(), # the timestamp when the request was put into the table
lifetime # the lifetime that this entry will sit in the table
]
else:
return False
return True
def get_return_value(self, nonce: str) -> Result(str, Exception):
"""
checks to see if a nonce hash exists in the table and if there was ever a response
@returns the str representation of the response to send to the origin
@exception returns an empty string if there is no nonce or value
"""
try:
return Result(self.__table[nonce][2], None) # index 3 is the response value
except Exception as e:
return Result(str, e)
def get_request_destination(self, nonce: str) -> Result(str, Exception):
"""
grab the origin and destination values of the nonce request
@returns a tuple of the origin and destination str identifiers respectively
@exception returns None if the nonce doesn't exist in the table
"""
try:
return Result(self.__table[nonce][0], None) # index 0 is the request destination
except Exception as e:
return Result(None, e)
def get_request_code(self, nonce: str) -> Result(bool, Exception):
"""
grab the request code associated with the nonce request
@returns returns an integer representing the request code of the nonce
@exception returns an arbitrate integer 999 if no nonce request exists
"""
try:
return Result(self.__table[nonce][1], None) # index 2 is the request code
except Exception as e:
return Result(None, e)
def get_timestamp(self, nonce: str) -> Result(datetime, Exception):
"""
grab the timestamp associated with when the request was added to the
node's hash table.
@returns returns the datetime value if the nonce exists in the table
@exception returns None if the nonce request doesn't exists
"""
try:
return Result(self.__table[nonce][3],
None) # index 4 is the timestamp for when the request was added to the table
except Exception as e:
return Result(None, e)
def delete_nonce(self, nonce: str) -> bool:
"""
:grab the origin and destination values of the nonce request
@returns returns True if the nonce existed and met the criteria to be deleted
@exception returns False if the nonce doesn't exist or doesn't meet the citeria to be deleted
"""
if not self.__validate_nonce(nonce):
# if the nonce doesn't exist we don't want the program crashing
try:
code = self.__table[nonce][4]
flag = False # flag representing if the nonce was deleted
if code == RequestTableLifetime.RESPONDED:
if self.get_return_value(nonce) != "":
flag = True
elif code == RequestTableLifetime.TIME:
if TIME_DIFFERENCE(self.__table[nonce][3]):
flag = True
elif code == RequestTableLifetime.RESPONDED_TIME:
if TIME_DIFFERENCE(self.__table[nonce][3]) and self.get_return_value(nonce) != "":
flag = True
if flag:
self.__table.pop(nonce)
return flag
except Exception:
return False
return False
| 2.71875 | 3 |
23/aoc23-2-cython.py | combs/AdventOfCode2021 | 0 | 12758005 | <filename>23/aoc23-2-cython.py
import pyximport
pyximport.install()
from aoc23 import do_it
with open("data2.txt", "r") as fh:
board = fh.readlines()
board = [i.rstrip() for i in board]
do_it(board)
| 1.914063 | 2 |
bin/tools/nanosim/get_besthit.py | HKU-BAL/MegaPath-Nano | 13 | 12758006 | <gh_stars>10-100
#!/usr/bin/env python
from __future__ import with_statement
def besthit_and_unaligned(infile, outmaf, outfile):
align_dict = {}
out1 = open(outfile + "_besthit.maf", 'w')
unaligned_dict = {}
with open(outmaf, 'r') as f:
for line in f:
query = next(f)
query_info = query.strip().split()
if query_info[1] not in align_dict:
align_dict[query_info[1]] = [int(query_info[3]), query, False]
else:
if align_dict[query_info[1]][0] < int(query_info[3]):
align_dict[query_info[1]] = [int(query_info[3]), query, False]
with open(outmaf, 'r') as f1:
for line in f1:
ref = line
query = next(f1)
query_info = query.split()
name = query_info[1]
length = int(query_info[3])
if align_dict[name][0] == length and not align_dict[name][2]:
out1.write(ref + query)
align_dict[name][2] = True
with open(infile, 'r') as f2:
for line in f2:
if line[0] == ">":
name = line.strip().split()[0][1:]
flag = False
if name not in align_dict:
last_name = name
flag = True
else:
if flag:
unaligned_dict[last_name] = len(line.strip())
out1.close()
return unaligned_dict.values()
| 2.5625 | 3 |
googleTrends.py | choandrew/magic | 2 | 12758007 | <reponame>choandrew/magic<filename>googleTrends.py
from pytrends.pyGTrends import pyGTrends
import time
from random import randint
import glob, os
def main():
googleUsername = "<EMAIL>"
googlePassword = "<PASSWORD>!"
#connect to Google
connector = pyGTrends(googleUsername, googlePassword)
with open("listFinal.txt", "r") as f:
#creates array each containing a card name as a string
cards = [line.rstrip('\n') for line in f]
for card in cards:
connector.request_report(card,geo="US", date="today 90-d")
connector.save_csv("./output/", card)
#so that google trends doesn't get suspicious of scripting...
time.sleep(randint(4,8))
path = "./output"
os.chdir(path)
#gets rid of all the junk in the csv file
for file in glob.glob("*.csv"):
f = open(file,"r")
lines = f.readlines()
f.close()
del(lines[0:5])
lines = lines[0:90]
f = open(file,"w")
for line in lines:
f.write(line)
if __name__ == "__main__":
main()
| 2.78125 | 3 |
src/tasks.py | darrida/NOAA-temp-s3-upload | 0 | 12758008 | <gh_stars>0
import glob
from collections import defaultdict
from pathlib import Path
from prefect import task, get_run_logger
from src.support import initialize_s3_client, aws_load_files_year, local_clean_confirm_files, s3_clean_confirmation_files
@task(retries=5, retry_delay_seconds=5)
def load_year_files(data: dict, region_name: str, bucket_name: str):
"""Loads Archive File to S3
Args:
data: <work this out>
region_name (str): target s3 region
bucket_name (str): target s3 bucket
"""
logger = get_run_logger()
s3_client = initialize_s3_client(region_name)
# If not exists - creates year folder in aws
s3_client.put_object(Bucket=bucket_name, Body="", Key=f"data/")
success, failed = aws_load_files_year(
s3_client=s3_client,
bucket=bucket_name,
filepaths_l=data,
)
year = str(Path(data[0]).name)[:4]
logger.info(f"{year} | success: {success}, failed: {failed}")
@task()
def flag_updates(bucket: str, local_dir: str, region_name: str, all: bool) -> dict:
"""Takes individual year and finds file difference between AWS and Local
Args:
bucket (str): target AWS bucket
local_dir (str): local directory with year folders
region_name (str): AWS region used for s3_client connection
all (bool): <I think this isn't used now>
Return
set: Diference between AWS and Local
"""
logger = get_run_logger()
if not all:
years.sort()
years = years[-1]
logger.info(f"ONLY Check for updates to {years} related data")
update_l = []
s3_client = initialize_s3_client(region_name)
# If not exists - creates year folder in aws
s3_client.put_object(Bucket=bucket, Body="", Key=f"data/")
# File difference between local and aws for indidivual folder/year
aws_file_set = set()
paginator = s3_client.get_paginator("list_objects_v2")
pages = paginator.paginate(Bucket=bucket, Prefix='data')
for page in pages:
list_all_keys = page["Contents"]
# item arrives in format of 'year/filename'; this removes 'year/'
file_l = [x["Key"].split("/")[1] for x in list_all_keys]
for f in file_l:
aws_file_set.add(f)
# prep AWS "___complete" files for compare
aws_version_set = set([x for x in aws_file_set if "___complete" in x])
# find local version file
local_files = glob.glob(f"{local_dir}/**/*___complete", recursive=True)
local_files = sorted(local_files)
local_file_set = set([Path(x).name for x in local_files])
update_l = local_file_set.difference(aws_version_set)
logger.info(f"Update/Changes to Upload: {len(update_l)}")
logger.info(update_l)
upload_l = []
logger.info('local_dir: ' + local_dir)
# parent_dir = Path(local_files[0]).parent.parent
for u in update_l:
upload_l.append(Path(local_dir) / u[:4] / u)
return upload_l
@task(retries=3, retry_delay_seconds=5)
def cleanup_confirm_files(bucket_name, region_name, local_dir):
"""Removes All But Most Recent Confirm File from S3
Args:
bucket_name (str): target S3 bucket
region_name (str): target S3 region (used to initialize client)
local_dir (str): local directory where year archives are stored
"""
# TODO: To cover all angles, this could query the S3 bucket and see if multiple ___complete files for each
# year exists there as well. In the current state, this will only clean up files based on what is
# stored locally.
logger = get_run_logger()
s3_client = initialize_s3_client(region_name)
local_count = local_clean_confirm_files(local_dir)
s3_count = s3_clean_confirmation_files(s3_client, bucket_name)
logger.info(f"Cleaned up {local_count} old LOCAL '___complete' files.")
logger.info(f"Cleaned up {s3_count} old AWS S3 '___complete' files.") | 2.265625 | 2 |
migrations/versions/63a89136a199_add_map_to_system_ids.py | RedHatInsights/system_baseline-backend | 1 | 12758009 | <filename>migrations/versions/63a89136a199_add_map_to_system_ids.py
"""add map to system ids
Revision ID: 63a89136a199
Revises: 16a84bebd064
Create Date: 2021-03-24 22:32:00.267482
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "16a84bebd064"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"system_baseline_mapped_systems",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("account", sa.String(length=10), nullable=False),
sa.Column("system_baseline_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("system_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.ForeignKeyConstraint(
["system_baseline_id"],
["system_baselines.id"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint(
"system_baseline_id", "system_id", name="_system_baseline_mapped_system_uc"
),
)
op.create_index(
op.f("ix_system_baseline_mapped_systems_system_id"),
"system_baseline_mapped_systems",
["system_id"],
unique=False,
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(
op.f("ix_system_baseline_mapped_systems_system_id"),
table_name="system_baseline_mapped_systems",
)
op.drop_table("system_baseline_mapped_systems")
# ### end Alembic commands ###
| 1.757813 | 2 |
test/test_plugin_nextcloud.py | caronc/pnotify | 0 | 12758010 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 <NAME> <<EMAIL>>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import six
import mock
import requests
from apprise import plugins
from helpers import AppriseURLTester
# Disable logging for a cleaner testing output
import logging
logging.disable(logging.CRITICAL)
apprise_url_tests = (
##################################
# NotifyNextcloud
##################################
('ncloud://:@/', {
'instance': None,
}),
('ncloud://', {
'instance': None,
}),
('nclouds://', {
# No hostname
'instance': None,
}),
('ncloud://localhost', {
# No user specified
'instance': TypeError,
}),
('ncloud://user@localhost?to=user1,user2&version=invalid', {
# An invalid version was specified
'instance': TypeError,
}),
('ncloud://user@localhost?to=user1,user2&version=0', {
# An invalid version was specified
'instance': TypeError,
}),
('ncloud://user@localhost?to=user1,user2&version=-23', {
# An invalid version was specified
'instance': TypeError,
}),
('ncloud://localhost/admin', {
'instance': plugins.NotifyNextcloud,
}),
('ncloud://user@localhost/admin', {
'instance': plugins.NotifyNextcloud,
}),
('ncloud://user@localhost?to=user1,user2', {
'instance': plugins.NotifyNextcloud,
}),
('ncloud://user@localhost?to=user1,user2&version=20', {
'instance': plugins.NotifyNextcloud,
}),
('ncloud://user@localhost?to=user1,user2&version=21', {
'instance': plugins.NotifyNextcloud,
}),
('ncloud://user:pass@localhost/user1/user2', {
'instance': plugins.NotifyNextcloud,
# Our expected url(privacy=True) startswith() response:
'privacy_url': 'ncloud://user:****@localhost/user1/user2',
}),
('ncloud://user:pass@localhost:8080/admin', {
'instance': plugins.NotifyNextcloud,
}),
('nclouds://user:pass@localhost/admin', {
'instance': plugins.NotifyNextcloud,
# Our expected url(privacy=True) startswith() response:
'privacy_url': 'nclouds://user:****@localhost/admin',
}),
('nclouds://user:pass@localhost:8080/admin/', {
'instance': plugins.NotifyNextcloud,
}),
('ncloud://localhost:8080/admin?+HeaderKey=HeaderValue', {
'instance': plugins.NotifyNextcloud,
}),
('ncloud://user:pass@localhost:8081/admin', {
'instance': plugins.NotifyNextcloud,
# force a failure
'response': False,
'requests_response_code': requests.codes.internal_server_error,
}),
('ncloud://user:pass@localhost:8082/admin', {
'instance': plugins.NotifyNextcloud,
# throw a bizzare code forcing us to fail to look it up
'response': False,
'requests_response_code': 999,
}),
('ncloud://user:pass@localhost:8083/user1/user2/user3', {
'instance': plugins.NotifyNextcloud,
# Throws a series of connection and transfer exceptions when this flag
# is set and tests that we gracfully handle them
'test_requests_exceptions': True,
}),
)
def test_plugin_nextcloud_urls():
"""
NotifyNextcloud() Apprise URLs
"""
# Run our general tests
AppriseURLTester(tests=apprise_url_tests).run_all()
@mock.patch('requests.post')
def test_plugin_nextcloud_edge_cases(mock_post):
"""
NotifyNextcloud() Edge Cases
"""
# Disable Throttling to speed testing
plugins.NotifyBase.request_rate_per_sec = 0
# A response
robj = mock.Mock()
robj.content = ''
robj.status_code = requests.codes.ok
# Prepare Mock
mock_post.return_value = robj
# Variation Initializations
obj = plugins.NotifyNextcloud(
host="localhost", user="admin", password="<PASSWORD>", targets="user")
assert isinstance(obj, plugins.NotifyNextcloud) is True
assert isinstance(obj.url(), six.string_types) is True
# An empty body
assert obj.send(body="") is True
assert 'data' in mock_post.call_args_list[0][1]
assert 'shortMessage' in mock_post.call_args_list[0][1]['data']
# The longMessage argument is not set
assert 'longMessage' not in mock_post.call_args_list[0][1]['data']
| 1.46875 | 1 |
aligner/Scratch_null.py | trane293/nlp-project | 0 | 12758011 | <reponame>trane293/nlp-project<gh_stars>0
# coding: utf-8
# # IBM Model 1 with Expectation Maximization
# ## Author: <NAME>
# **Open the files**
# In[1]:
from __future__ import print_function
import itertools
import sys
import optparse
import os
import numpy as np
# In[3]:
optparser = optparse.OptionParser()
optparser.add_option("-d", "--datadir", dest="datadir", default="data", help="data directory (default=data)")
optparser.add_option("-p", "--prefix", dest="fileprefix", default="europarl", help="prefix of parallel data files (default=europarl)")
optparser.add_option("-e", "--english", dest="english", default="en", help="suffix of Destination (target language) filename (default=en)")
optparser.add_option("-f", "--french", dest="french", default="de", help="suffix of Source (source language) filename (default=de)")
optparser.add_option("-l", "--logfile", dest="logfile", default=None, help="filename for logging output")
optparser.add_option("-t", "--threshold", dest="threshold", default=0.5, type="float", help="threshold for alignment (default=0.5)")
optparser.add_option("-i", "--epochs", dest="epochs", default=20, type="int", help="Number of epochs to train for (default=5)")
optparser.add_option("-n", "--num_sentences", dest="num_sents", default=100000, type="int", help="Number of sentences to use for training and alignment")
(opts, _) = optparser.parse_args()
f_data = "%s.%s" % (os.path.join(opts.datadir, opts.fileprefix), opts.french)
e_data = "%s.%s" % (os.path.join(opts.datadir, opts.fileprefix), opts.english)
print('Opening files..', file=sys.stderr)
src_set = open(f_data)
des_set = open(e_data)
# **Perform Preprocessing**
# 1. **Split the data into two different sets, and split each sentence into words**
# 2. **Add a NONE character inside every english sentence**
# In[4]:
src_sent = []
dest_sent = []
for line_des, line_src in zip(des_set, src_set):
# split each sentence into a list of words for easy processing
line_des = 'nOnE ' + line_des
# print(line_des)
src_sent.append(line_src.split())
# Added none character to each english sentence. Weird Casing used to prevent collisions
dest_sent.append(line_des.split())
# We can see the words contain many "\xc3\xa9es"... which are basically the unicode codes for special accented symbols in french. Nothing to worry.
#
# Also, the punctuation marks are left as it is as "words" which map directly to the punctuation in the destination language.
# In[5]:
print('Source sentences..', file=sys.stderr)
print(src_sent[0:2], file=sys.stderr)
# In[6]:
print('Destination sentences..', file=sys.stderr)
print(dest_sent[0:10], file=sys.stderr)
# ## **We need to find the probability $t_k(f_i|e_j)$ where $f_i$ = source word and $e_j$ = destination word**
# Find all the unique words in french data
# In[7]:
# convert the source list into a chain of iterables, and then convert it to a set to only retain unique elements.
# further convert to list for easy processing
src_vocab = list(set(itertools.chain.from_iterable(src_sent)))
des_vocab = list(set(itertools.chain.from_iterable(dest_sent)))
# In[8]:
print('Some unique source words..', file=sys.stderr)
print(src_vocab[0:5], file=sys.stderr)
# In[9]:
print('Some unique destination words..', file=sys.stderr)
print(des_vocab[0:5], file=sys.stderr)
# # Start the training process..
# **We cannot initialize the $t_k$ values to uniform due to memory constraints. A better way to do this is to first check if the key exists or not, and if it doesn't, then initialize it to uniform probability. This saves a huge memory and computational overhead of permuting through all $f_i$ and $e_j$ and setting them uniform, many of which will not even appear in the training text**
# In[ ]:
k = 0
t_k = {}
uni_prob = 1.0 / np.shape(src_vocab)[0]
epochs = opts.epochs
print('Starting training for {} epochs..'.format(epochs), file=sys.stderr)
for _i in range(epochs):
print('Currently on training epoch {}..'.format(_i + 1), file=sys.stderr)
count_comb = {}
count_e = {}
# iterate over all training examples
for src_sent_eg, dest_sent_eg in zip(src_sent, dest_sent):
for f_i in src_sent_eg:
Z = 0.0
for e_j in dest_sent_eg:
if (f_i, e_j) not in t_k:
# initialize counts on the fly
# print('({}, {}) not in t_k, initializing to uniform!'.format(f_i, e_j))
t_k[(f_i, e_j)] = uni_prob
Z += t_k[(f_i, e_j)]
for e_j in dest_sent_eg:
c = t_k[(f_i, e_j)] / Z
# initialize counts on the fly
if (f_i, e_j) not in count_comb:
# print('({}, {}) not in count_comb, initializing to 0!'.format(f_i, e_j))
count_comb[(f_i, e_j)] = 0.0
# initialize counts on the fly
if e_j not in count_e:
# print('({}) not in count_e, initializing to 0!'.format(e_j))
count_e[e_j] = 0.0
count_comb[(f_i, e_j)] += c
count_e[e_j] += c
# print("+++++++++++++DEBUG+++++++++++++" + str(count_comb))
# for item in count_comb:
# print("=============DEBUG=============" + str(count_comb[item[0], 'nOnE']))
print('Updating t_k counts...', file=sys.stderr)
for f_e_keys in count_comb:
# f_e_keys[0] = f_i, f_e_keys[1] = e_j
# NULL word can be added here with the expression lmda*t(f|NULL) + (1-lmda)*t(f|e)
# print("================DEBUG==================" + count_comb[f_e_keys])
lmda = 0.01
t_k[(f_e_keys[0], f_e_keys[1])] = \
count_comb[f_e_keys] / count_e[f_e_keys[1]]
# lmda * (count_comb[f_e_keys[0], 'nOnE'] / count_e['nOnE']) \
# + \
# (1 - lmda) * (count_comb[f_e_keys] / count_e[f_e_keys[1]])
# # Make predictions using this trained model..
# In[ ]:
print('Aligning...', file=sys.stderr)
print('Source | Destination', file=sys.stderr)
for src_sent_eg, dest_sent_eg in zip(src_sent, dest_sent):
for i, f_i in enumerate(src_sent_eg):
bestp = 0
bestj = 0
for j, e_j in enumerate(dest_sent_eg):
if t_k[(f_i, e_j)] > bestp:
bestp = t_k[(f_i, e_j)]
bestj = j
sys.stdout.write('{}-{} '.format(i, bestj))
sys.stdout.write('\n')
| 2.625 | 3 |
fuelweb_test/puppet_tests/puppet_module.py | ignatenkobrain/fuel-main | 0 | 12758012 | # Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import re
import sys
from puppet_test import PuppetTest
path = os.path.abspath(__file__)
path = os.path.dirname(path)
sys.path.insert(0, path)
class PuppetModule:
"""This class represents Puppet module."""
def __init__(self, local_module_path):
"""You should give this constructor the full path to the module."""
self.local_module_path = local_module_path
self.module_name = os.path.basename(self.local_module_path)
self.__tests = []
self.__dependencies = []
self.comment_regexp = re.compile(r'^\s*#')
self.dependency_regexp = \
re.compile(r'^\s*dependency\s*[\'\"]*([^\'\"]+)[\'\"]*')
self.find_tests()
self.find_dependencies()
def find_dependencies(self):
"""Get dependencies of this module from Modulefile if present."""
module_file = 'Modulefile'
dependencies = []
module_file_path = os.path.join(self.local_module_path, module_file)
if not os.path.isfile(module_file_path):
self.__dependencies = dependencies
return False
opened_file = open(module_file_path, 'r')
for line in opened_file.readlines():
if re.match(self.comment_regexp, line):
# skip commented line
continue
match = re.match(self.dependency_regexp, line)
if match:
# found dependency line
dependency_name = match.group(1).split('/')[-1]
dependencies.append(dependency_name)
self.__dependencies = dependencies
return True
def find_tests(self):
"""Find all tests.
Find all tests in this module and fill tests array
with PuppetTest objects.
"""
current_path = os.path.abspath(os.curdir)
try:
os.chdir(self.local_module_path)
except OSError as error:
logging.error("Cannot change directory to %s: %s" %
(self.local_module_path, error.message))
else:
for root, dirs, files in os.walk('tests'):
for test_file in files:
if not test_file[-3:] == '.pp':
continue
test_file_path = os.path.join(root, test_file)
puppet_test = PuppetTest(test_file_path)
self.__tests.append(puppet_test)
finally:
# try to restore original folder on exit
try:
os.chdir(current_path)
except OSError as error:
logging.error("Cannot change directory to %s: %s" %
(self.local_module_path, error.message), 1)
@property
def tests(self):
"""Property returns list of tests."""
return self.__tests
@property
def name(self):
"""Property returns module name."""
return self.module_name
@property
def path(self):
"""Property returns path to this module."""
return self.local_module_path
@property
def dependencies(self):
"""Property returns list of module dependencies."""
return self.__dependencies
def __repr__(self):
"""String representation of PuppetModule."""
tests_string = ''
if len(self.tests) > 0:
tests = [repr(test) for test in self.tests]
tests_string += ", ".join(tests)
tpl = "PuppetModule(name=%s, path=%s, tests=[%s]" \
% (self.name, self.path, tests_string)
return tpl
| 2.21875 | 2 |
examples/increase_version_code.py | smantinc/pyaml | 1 | 12758013 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import getopt
from libaml.aml import AML
from libaml.aml import ResTypes
"""
This example demonstrates the how to modify binary XML using libaml.
It parses AndroidManifest.xml and increases version code by one.
"""
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], 'i:o:')
params = dict([(i.lstrip('-'), j) for i, j in opts])
if 'i' not in params:
print('Usage:\n%s -i AndroidManifest.xml [-o outfile.xml]' % sys.argv[0])
sys.exit(0)
infile = params['i']
outfile = params['o'] if 'o' in params else infile
with open(infile, 'rb') as fp:
buf = fp.read()
aml = AML(buf)
while aml.hasnext():
header, body = aml.next()
if header.type == ResTypes.RES_XML_START_ELEMENT_TYPE and body.nodename == 'manifest':
for i in body.attributes:
if str(i) == 'android:versionCode':
i.typedValue.data += 1
with open(outfile, 'wb') as fp:
fp.write(aml.tobytes())
print('Done.')
| 2.375 | 2 |
app.py | carsonmcdonald/Python-Flask-neo4j-Heroku-Example | 13 | 12758014 | <filename>app.py
#!/usr/bin/env python
"""
The main Flask application.
This app needs neo4j to run correctly. It assumes that there is an
instance running on localhost if the NEO4J_REST_URL environment
variable is not set to point to one.
The PORT environment variable can be used to change the port that
the app listens for connections on.
"""
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "Copyright 2012 <NAME>"
__license__ = "See LICENSE"
import os
import util
import time
import json
import ConfigParser
import StringIO
from urlparse import urlparse
from flask import *
from py2neo import neo4j
from py2neo import cypher
"""
Start off by setting up Flask, the complex queries configuration, the
neo4j service and the neo4j index variables.
"""
app = Flask(__name__)
config = ConfigParser.ConfigParser()
config.read('complexqueries.cfg')
if os.environ.get('NEO4J_REST_URL'):
graph_db_url = urlparse(os.environ.get('NEO4J_REST_URL'))
graph_db = neo4j.GraphDatabaseService('http://{host}:{port}{path}'.format(host=graph_db_url.hostname, port=graph_db_url.port, path=graph_db_url.path), user_name=graph_db_url.username, password=graph_db_url.password)
else:
graph_db = neo4j.GraphDatabaseService('http://localhost:7474/db/data')
users_idx = graph_db.get_node_index("Users")
posts_idx = graph_db.get_node_index("Posts")
tags_idx = graph_db.get_node_index("Tags")
bookmarks_idx = graph_db.get_node_index("Bookmarks")
def transform_entry(entry):
"""
Turn the given neo4j Node into a dictionary based on the Node's type.
The raw neo4j Node doesn't serialize to JSON so this converts it into
something that will.
@param entry: the neo4j Node to transform.
"""
# This transform is used for just nodes as well so first check to see if there is a relation
if 'relation' not in entry:
node = entry
relation = {'weight':1}
elif entry['relation'] is None:
node = entry['node']
relation = {'weight':1}
else:
node = entry['node']
relation = entry['relation']
# Skip anything that isn't a dict or doesn't have an ntype property (bookmarks are skipped here)
if type(node) is not dict or 'ntype' not in node:
return None
if node['ntype'] == 'user':
return {'user':{'name':node['name'],'user_id':node['userId'], 'weight':relation['weight']}}
elif node['ntype'] == 'a':
return {'answer':{'post_id':node['postId'], 'favorite_count':node['favoriteCount'], 'score':node['score'], 'weight':relation['weight']}}
elif node['ntype'] == 'q':
return {'question':{'post_id':node['postId'], 'favorite_count':node['favoriteCount'], 'score':node['score'], 'weight':relation['weight']}}
elif node['ntype'] == 'tag':
return {'tag':{'name':node['tagName'], 'weight':relation['weight']}}
else:
return None
def combine_relation_and_node(relationship):
"""
Combine a neo4j Relation's properties and the relation's end Node's properties
into a single dictionary.
@param relationship: the neo4j Relationship to transform.
"""
relation_props = relationship.get_properties()
node_props = relationship.get_end_node().get_properties()
return {'relation':relation_props, 'node':node_props}
def output_bookmarks(bookmark, start_ts):
"""
Pulls together the bookmarks for a given user into a single response
dictionary.
@param bookmark: the bookmark data to return
@param start_ts: the starting time of the current call
"""
users_related = map(combine_relation_and_node, bookmark[0].get_relationships(neo4j.Direction.OUTGOING, "bookmarked_user"))
posts_related = map(combine_relation_and_node, bookmark[0].get_relationships(neo4j.Direction.OUTGOING, "bookmarked_post"))
tags_related = map(combine_relation_and_node, bookmark[0].get_relationships(neo4j.Direction.OUTGOING, "bookmarked_tag"))
users = filter(lambda r: r is not None, map(transform_entry, users_related))
posts = filter(lambda r: r is not None, map(transform_entry, posts_related))
tags = filter(lambda r: r is not None, map(transform_entry, tags_related))
return jsonify(users=users, posts=posts, tags=tags, exec_time=(time.time()-start_ts))
@app.route("/", methods=["GET"])
def index():
"""
The root route will send the user check to see if a user has a bookmarkId
and send them to their unique page with those bookmarks. If they don't have
a bookmarkId already it will create a new one and then send them to that.
If you wanted to repurpose this app you probably wouldn't want to do this but
instead show a static page here.
"""
broot = request.cookies.get('broot') or util.next_code()
# if the given root doesn't exist yet then create it
bookmark = bookmarks_idx.search("bookmarkId", broot)
if len(bookmark) == 0:
node = graph_db.create_node({"bookmarkId": broot})
bookmarks_idx.add(node, "bookmarkId", broot)
return redirect("/h/" + broot, code=302)
@app.route("/h/<broot>", methods=["GET"])
def bookmark_history(broot):
"""
This route is where all users land after they are given a unique bookmark.
The only static page in the app is rendered here and can be found in the
templates directory.
If you wanted to repurpose this app you probably wouldn't need this route.
"""
bookmark = bookmarks_idx.search("bookmarkId", broot)
if len(bookmark) == 0:
return redirect("/", code=302)
else:
resp = make_response(render_template('index.html'))
resp.set_cookie('broot', broot, 60 * 60 * 24 * 365 * 10)
return resp
@app.route("/service/bookmarks/<broot>", methods=["GET"])
def get_bookmarks(broot):
"""
This route handles requests for bookmarks given a bookmarkId.
If no bookmarks are found for the given bookmarkId a 404 status is returned.
neo4j note: This is an example of using an index search.
@param broot: the bookmarkId to look up bookmarks for
"""
start_ts = time.time()
bookmark = bookmarks_idx.search("bookmarkId", broot)
if len(bookmark) > 0:
return output_bookmarks(bookmark, start_ts)
else:
abort(404)
@app.route("/service/bookmarks/<broot>", methods=["POST"])
def add_bookmark(broot):
"""
This route handles adding and removing bookmarks as well as setting the weight
of a bookmark.
The JSON sent should fit one of the following formats:
{'type':'user', 'userId':1234, 'action':'add'}
{'type':'post', 'postId':9000, 'action':'add'}
{'type':'tag', 'tagName':'school', 'action':'add'}
{'type':'tag', 'tagName':'school', 'action':'weight', 'weight':2}
Action can be add, delete or weight.
If the posted JSON is not found, the action is an unknown type, or the reference
node can note be found a 404 is returned.
neo4j note: This route contains an example of adding, deleting, modifying and
associating Nodes and Relationships.
@param broot: the bookmarkId to modify
"""
start_ts = time.time()
bookmark = bookmarks_idx.search("bookmarkId", broot)
if len(bookmark) > 0 and request.json is not None:
ref_node = []
ref_type = request.json['type']
if request.json['type'] == "user":
ref_node = users_idx.search("userId", request.json['uid'])
elif request.json['type'] == "post":
ref_node = posts_idx.search("postId", request.json['pid'])
elif request.json['type'] == "tag":
ref_node = tags_idx.search("tagName", request.json['name'])
if len(ref_node) > 0:
if request.json['action'] == "add":
relation = bookmark[0].create_relationship_to(ref_node[0], 'bookmarked_' + ref_type)
relation.set_properties({"weight":1})
elif request.json['action'] == "delete" or request.json['action'] == "weight":
# Find the related nodes of the correct type (user, post or tag)
related = bookmark[0].get_relationships(neo4j.Direction.OUTGOING, 'bookmarked_' + ref_type)
# Find the correct relation to delete based on the end node
for relation in related:
if relation.get_end_node()._uri == ref_node[0]._uri:
if request.json['action'] == "delete":
relation.delete()
elif request.json['action'] == "weight":
relation.set_properties({"weight":request.json['weight']})
return output_bookmarks(bookmark, start_ts)
else:
abort(404)
else:
abort(404)
@app.route("/service/simplequery/<qtype>/<qval>", methods=["GET"])
def get_node_simplequery(qtype, qval):
"""
Run a simple neo4j query for information.
The results of the neo4j query are transformed into a JSON response with
only the relivant information for the given query type.
neo4j note: This code contains an example of using directional lookups
on a Node.
@param qtype: the type of query being made (user, post or tag)
@param qval: the value to introduce into the query
"""
start_ts = time.time()
res = []
if qtype == 'user':
res = users_idx.search("userId", qval)
elif qtype == 'post':
res = posts_idx.search("postId", qval)
elif qtype == 'tag':
res = tags_idx.search("tagName", qval)
if len(res) > 0:
out_related = graph_db.get_properties(*res[0].get_related_nodes(neo4j.Direction.OUTGOING))
in_related = graph_db.get_properties(*res[0].get_related_nodes(neo4j.Direction.INCOMING))
# transform the nodes into what we want on the client side
out_related = filter(lambda r: r is not None, map(transform_entry, out_related))
in_related = filter(lambda r: r is not None, map(transform_entry, in_related))
return jsonify(node=transform_entry(res[0].get_properties()), outgoing=out_related, incoming=in_related, exec_time=(time.time()-start_ts))
else:
abort(404)
@app.route("/service/complexquery/<qtype>/<qval>", methods=["GET"])
def get_node_complexquery(qtype, qval):
"""
Run one of the pre-configured Cypher queries against the neo4j database.
See the complexqueries.cfg file for all the pre-configured queries. Most of
the pre-configured queries are limited to just a query but there is also an
option to add a post-query block of code that will further transform the
query results. Here is an example that takes the given post value and looks
up posts that are tagged the same then transforms the results so that they
are sorted by tag count and limited to the first 20 tag sets:
[similarly.tagged]
query: start startQuestion=node:Posts(postId="{val}")
match startQuestion -[:tagged]-> (tag) <-[:tagged]- (endQuestion)
return endQuestion.postId as questionId, collect(tag.tagName) as tags
order by questionId
exec:result = map(lambda r: [r[0], r[1].replace('[', '').replace(']', '').replace(', ', ',').split(',')], result[0])
result.sort(lambda x,y: cmp(len(x[1]), len(y[1])))
result.reverse()
del result[20:]
result = map(lambda r: {'questionId':r[0], 'tags':r[1]}, result)
result = json.dumps(result)
The result of an exec code block is expected to be in JSON format.
neo4j note: This code is an example of executing a Cypher query.
@param qtype: the type of query being made, must match an entry in
the complexqueries.cfg file
@param qval: the value to introduce into the query
"""
start_ts = time.time()
raw_query = config.get(qtype, "query")
if raw_query is not None:
query = raw_query.format(val=util.simple_sanitize(qval))
result = None
if config.has_option(qtype, "exec"):
result = cypher.execute(query, graph_db)
exec(config.get(qtype, "exec"))
else:
temp = StringIO.StringIO()
cypher.execute_and_output_as_json(query, graph_db, temp)
result = temp.getvalue()
temp.close()
resp = make_response('{{"value":"{value}", "result":{result}, "exec_time":{exec_time}}}'.format(value=qval, result=result, exec_time=(time.time()-start_ts)))
resp.headers['Content-type'] = 'application/json'
return resp
else:
abort(404)
if __name__ == '__main__':
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, debug=True)
| 3.046875 | 3 |
src/snowflake/connector/options.py | fhoehle/snowflake-connector-python | 0 | 12758015 | import warnings
import pkg_resources
from .errors import MissingDependencyError
# Flags to see whether optional dependencies were installed
installed_pandas = False
installed_keyring = False
def warn_incompatible_dep(dep_name: str,
installed_ver: str,
expected_ver: 'pkg_resources.Requirement') -> None:
warnings.warn(
"You have an incompatible version of '{}' installed, please install a version that "
"adheres to: '{}'".format(dep_name,
_expected_version),
stacklevel=2)
class MissingPandas(object):
def __getattr__(self, item):
raise MissingDependencyError('pandas')
try:
import pandas
# since we enable relative imports without dots this import gives us an issues when ran from test directory
from pandas import DataFrame # NOQA
import pyarrow
installed_pandas = True
# Make sure we have the right pyarrow installed
_pandas_extras = pkg_resources.working_set.by_key['snowflake-connector-python']._dep_map['pandas']
_expected_version = [dep for dep in _pandas_extras if dep.name == 'pyarrow'][0]
_installed_pyarrow = pkg_resources.working_set.by_key['pyarrow']
if _installed_pyarrow and _installed_pyarrow.version not in _expected_version:
warn_incompatible_dep('pyarrow', _installed_pyarrow.version, _expected_version)
except ImportError:
pandas = MissingPandas()
pyarrow = MissingPandas()
try:
import keyring
installed_keyring = True
except ImportError:
keyring = None
| 2.28125 | 2 |
gaussian/gauss.py | EnsekiTT/ml_review | 0 | 12758016 | # -*- coding: utf-8 -*-
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
POINTS = 1000
if __name__ == '__main__':
gauss1 = (np.random.randn(POINTS), np.random.randn(POINTS)*0.24);
gauss2 = (np.random.randn(POINTS)*0.28, np.random.randn(POINTS));
x1 = np.array(range(POINTS)) * 0.005
y1 = x1 * -2
x2 = x1
y2 = x2 * 1
offset_x1 = -4
offset_y1 = 2
cc = zip(gauss1)
dd = zip(gauss2)
cc[0] = cc[0] + x1
cc[1] = cc[1] + y1
cc[0] = cc[0] + offset_x1
cc[1] = cc[1] + offset_y1
dd[0] = dd[0] + x2
dd[1] = dd[1] + y2
plt.scatter(cc[0], cc[1], c=u'b')
plt.scatter(dd[0], dd[1], c=u'r')
plt.draw()
plt.show()
| 2.96875 | 3 |
MethodSCRIPTExample_Python/MethodSCRIPTExample_Python/MSPlotCV.py | PalmSens/emstatpico | 7 | 12758017 | # -*- coding: utf-8 -*-
"""
/* ----------------------------------------------------------------------------
* PalmSens Method SCRIPT SDK
* ----------------------------------------------------------------------------
* Copyright (c) 2019-2020, PalmSens BV
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the disclaimer below.
*
* PalmSens's name may not be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* DISCLAIMER: THIS SOFTWARE IS PROVIDED BY PALMSENS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
* DISCLAIMED. IN NO EVENT SHALL PALMSENS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ----------------------------------------------------------------------------
*/
"""
###############################################################################
# Description
###############################################################################
# This example showcases how to perform and plot a simple Cyclic Voltammetry (CV) measurement
###############################################################################
# Imports
###############################################################################
import serial
import os.path
import PSEsPicoLib
import matplotlib.pyplot as plt
import sys
###############################################################################
# Configuration
###############################################################################
#Folder where scripts are stored
MSfilepath = ".\\MethodSCRIPT files"
#Name of script file to run
MScriptFile = "MSExampleCV.mscr"
#COM port of the EmStat Pico
myport = "COM9"
#Set to False to disable printing of raw and parsed data
verbose_printing = True
###############################################################################
# Code
###############################################################################
#Set printing verbosity
PSEsPicoLib.SetPrintVerbose(verbose_printing)
#used to only parse data once we have succesfully executed the script
measurement_succes = False
#combine the path and filename
MScriptPathandFile = os.path.join(MSfilepath, MScriptFile)
#initialization and open the port
ser = serial.Serial() #Create an instance of the serial object
if PSEsPicoLib.OpenComport(ser,myport,1): #open myport with 1 sec timeout
print("Succesfuly opened: " + ser.port )
try:
PSEsPicoLib.Flush(ser) #Flush the EmstatPico parse buffer
if PSEsPicoLib.IsConnected(ser): #Check if EmstatPico is connected
print("Connected!")
# Send the MethodSCRIPT file
PSEsPicoLib.SendScriptFile(ser,MScriptPathandFile)
#Get the results and store it in datafile
datafile=PSEsPicoLib.GetResults(ser) # fetch the results
#Create "data" subfolder
(prefix, sep, suffix) = MScriptFile.rpartition('.') #split the file-extension and the filename
ResultFile = prefix + '.dat' #change the extension to .dat
ResultPath = MSfilepath+"\\data" #use subfolder for the data
try:
os.mkdir(ResultPath)
except OSError:
print ("Creation of the directory %s failed" % ResultPath)
else:
print ("Successfully created the directory %s " % ResultPath)
ResultFile = os.path.join(ResultPath, ResultFile) #combine the path and the filename
ResultFile = PSEsPicoLib.CheckFileExistAndRename(ResultFile) #Rename the file if it exists to a unique name by add the date+time
#print(ResultFile)
f = open(ResultFile,"w+") #Open file for writing
f.write(datafile) #write data to file
f.close() #close file
measurement_succes = True
else:
print("Unable to connected!")
except Exception as e1: #catch exception
print("error communicating...: " + str(e1)) #print the exception
finally:
ser.close() #close the comport
else:
print("cannot open serial port ")
if(not measurement_succes):
sys.exit()
value_matrix = PSEsPicoLib.ParseResultFile(ResultFile) #Parse result file to Value matrix
applied_potential=PSEsPicoLib.GetColumnFromMatrix(value_matrix,0) #Get the applied potentials
measured_current=PSEsPicoLib.GetColumnFromMatrix(value_matrix,1) #Get the measured current
plt.figure(1)
plt.plot(applied_potential,measured_current)
plt.title("Voltammogram")
plt.xlabel("Applied Potential (V)")
plt.ylabel("Measured Current (A)")
plt.show()
plt.grid(b=True, which='major')
plt.grid(b=True, which='minor', color='b', linestyle='-', alpha=0.2)
plt.minorticks_on()
| 0.949219 | 1 |
imageprovider/abstractimageprovider.py | freekode/wow-bot-cv | 1 | 12758018 | <filename>imageprovider/abstractimageprovider.py
from abc import ABC, abstractmethod
class AbstractImageProvider(ABC):
@abstractmethod
def get_image_numpy_array(self, coordinates):
pass
| 2.40625 | 2 |
day02/ex02/logger.py | bcarlier75/python_bootcamp_42ai | 1 | 12758019 | <reponame>bcarlier75/python_bootcamp_42ai
import time
from random import randint
def log(func):
def wrapper(*args, **kwargs):
my_arg = ''
if func.__name__ == 'start_machine':
my_arg = 'Start Machine\t'
elif func.__name__ == 'boil_water':
my_arg = 'Boil Water\t\t'
elif func.__name__ == 'make_coffee':
my_arg = 'Make Coffee\t\t'
elif func.__name__ == 'add_water':
my_arg = 'Add Water\t\t'
user_log = '(bcarlier)Running:'
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
with open('machine.log', 'a+') as f:
f.write(f'{user_log} {my_arg} [ exec-time = {end_time - start_time:.3f} ms ]\n')
return result
return wrapper
class CoffeeMachine:
water_level = 100
@log
def start_machine(self):
if self.water_level > 20:
return True
else:
print("Please add water!")
return False
@log
def boil_water(self):
return "boiling..."
@log
def make_coffee(self):
if self.start_machine():
for _ in range(20):
time.sleep(0.1)
self.water_level -= 1
print(self.boil_water())
print("Coffee is ready!")
@log
def add_water(self, water_level):
time.sleep(randint(1, 5))
self.water_level += water_level
print("Blub blub blub...")
if __name__ == "__main__":
machine = CoffeeMachine()
for i in range(0, 5):
machine.make_coffee()
machine.make_coffee()
machine.add_water(70)
| 3.671875 | 4 |
tests/test_dqn.py | kaixinbaba/reinforch | 3 | 12758020 | from reinforch.agents import DQNAgent
from reinforch.core.memorys import SimpleMatrixMemory
from reinforch.environments import OpenAIGym
from reinforch.execution import Runner
def test_dqn():
gym_id = 'CartPole-v0'
env = OpenAIGym(gym_id)
env.seed(7)
n_s = env.n_s
n_a = env.n_a
memory = SimpleMatrixMemory(row_size=3000, every_class_size=[n_s, 1, 1, n_s, 1])
agent = DQNAgent(n_s=n_s,
n_a=n_a,
memory=memory,
config='tests/configs/test_dqn.json')
with Runner(agent=agent,
environment=env,
verbose=False) as runner:
runner.train(total_episode=10,
max_step_in_one_episode=200,
save_model=False,
save_final_model=False,
visualize=False)
| 2.296875 | 2 |
andersoncd/plot_utils.py | lesteve/andersoncd | 18 | 12758021 | <reponame>lesteve/andersoncd<filename>andersoncd/plot_utils.py<gh_stars>10-100
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
import os
import matplotlib
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import rc
C_LIST = sns.color_palette("colorblind", 8)
C_LIST_DARK = sns.color_palette("dark", 8)
def configure_plt(fontsize=10, poster=True):
rc('font', **{'family': 'sans-serif',
'sans-serif': ['Computer Modern Roman']})
usetex = matplotlib.checkdep_usetex(True)
params = {'axes.labelsize': fontsize,
'font.size': fontsize,
'legend.fontsize': fontsize,
'xtick.labelsize': fontsize - 2,
'ytick.labelsize': fontsize - 2,
'text.usetex': usetex,
'figure.figsize': (8, 6)}
plt.rcParams.update(params)
sns.set_palette('colorblind')
sns.set_style("ticks")
if poster:
sns.set_context("poster")
def _plot_legend_apart(ax, figname, ncol=None):
"""Do all your plots with fig, ax = plt.subplots(),
don't call plt.legend() at the end but this instead"""
if ncol is None:
ncol = len(ax.lines)
fig = plt.figure(figsize=(30, 4), constrained_layout=True)
fig.legend(ax.lines, [line.get_label() for line in ax.lines], ncol=ncol,
loc="upper center")
fig.tight_layout()
fig.savefig(figname, bbox_inches="tight")
os.system("pdfcrop %s %s" % (figname, figname))
return fig
dict_algo_name = {}
dict_algo_name["pgd", False] = "GD"
dict_algo_name["cd", False] = "CD"
dict_algo_name["bcd", False] = "BCD"
dict_algo_name["pgd", True] = "GD - Anderson"
dict_algo_name["cd", True] = "CD - Anderson"
dict_algo_name["bcd", True] = "BCD - Anderson"
dict_algo_name["rcd", False] = "RCD"
dict_algo_name["rbcd", False] = "RBCD"
dict_algo_name["fista", False] = "GD - inertial"
dict_algo_name["apcg", False] = "CD - inertial"
current_palette = sns.color_palette("colorblind")
dict_color = {}
dict_color["pgd"] = current_palette[0]
dict_color["fista"] = current_palette[0]
dict_color["cd"] = current_palette[1]
dict_color["bcd"] = current_palette[1]
dict_color["rcd"] = current_palette[4]
dict_color["rbcd"] = current_palette[4]
dict_color["apcg"] = current_palette[1]
| 2.578125 | 3 |
main_app/utils.py | raulrodriguez8/FLEXipes | 0 | 12758022 | <reponame>raulrodriguez8/FLEXipes
from datetime import datetime, timedelta
from calendar import HTMLCalendar
from .models import Meal
class Calendar(HTMLCalendar):
def __init__(self, year=None, month=None):
self.year = year
self.month = month
super(Calendar, self).__init__()
# formats a day as a td
# filter events by day
def formatday(self, day, meals):
meals_per_day = meals.filter(date__day=day)
d = ''
for meal in meals_per_day:
if meal.meal == 'B':
d += f'<div><li style ="background-color: #ED6B86;color: white; width:200px;margin: 0;"> {meal.meal}: <a href="{ meal.recipe_url }" target="_blank"> {meal.recipe_name} </li></div><br>'
elif meal.meal == 'R':
d += f'<div><li style ="background-color: #FF495C;color: white; width:200px;margin: 0;"> {meal.meal}: <a href="{ meal.recipe_url }" target="_blank"> {meal.recipe_name} </li></div><br>'
elif meal.meal == 'L':
d += f'<div><li style ="background-color: #4F5C7D;color: white; width:200px;margin: 0;"> {meal.meal}: <a href="{ meal.recipe_url }" target="_blank"> {meal.recipe_name} </li></div><br>'
else:
d += f'<div><li style ="background-color: #336036;color: white; width:200px;margin: 0;"> {meal.meal}: <a href="{ meal.recipe_url }" target="_blank"> {meal.recipe_name} </li></div><br>'
if day != 0:
return f'<td style="vertical-align: top;text-align: left;"><span class="date">{day}</span><ul> {d} </ul></td>'
return '<td style ="width:200px"></td>'
# formats a week as a tr
def formatweek(self, theweek, meals):
week = ''
for d, weekday in theweek:
week += self.formatday(d, meals)
return f'<tr style="height:250px;"> {week} </tr>'
# formats a month as a table
# filter events by year and month
def formatmonth(self, withyear=True):
meals = Meal.objects.filter(date__year=self.year, date__month=self.month)
cal = f'<table border="0" cellpadding="0" cellspacing="0" class="calendar">\n'
cal += f'{self.formatmonthname(self.year, self.month, withyear=withyear)}\n'
cal += f'{self.formatweekheader()}\n'
for week in self.monthdays2calendar(self.year, self.month):
cal += f'{self.formatweek(week, meals)}\n'
return cal | 2.90625 | 3 |
July 29/mult.py | rithulkamesh/python-comp | 1 | 12758023 | <reponame>rithulkamesh/python-comp<filename>July 29/mult.py
n = int(input('Enter A Number:'))
count = 1
for i in range(count, 11, 1):
print (f'{n} * {count} = {n*count}')
count +=1 | 3.46875 | 3 |
tests/test__requests.py | LCBRU/lbrc_flask | 0 | 12758024 | <reponame>LCBRU/lbrc_flask
import datetime
from flask import url_for
from lbrc_flask.pytest.helpers import login
def test__get_value_from_all_arguments__get(client, faker):
login(client, faker)
field_name = 'hello'
value = 'jim'
resp = client.get(url_for('json_requests', field_name=field_name, hello=value))
assert resp.get_json()['result'] == value
def test__get_value_from_all_arguments__post(client, faker):
login(client, faker)
field_name = 'toast'
value = 'jam'
resp = client.post(url_for('json_requests', field_name=field_name), data={field_name: value})
assert resp.get_json()['result'] == value
def test__get_value_from_all_arguments__post_json(client, faker):
login(client, faker)
field_name = 'bread'
value = 'cheese'
resp = client.post_json(url_for('json_requests', field_name=field_name), data={field_name: value})
assert resp.get_json()['result'] == value
| 2.390625 | 2 |
draw_card/build_image.py | NumberSir/nonebot_plugin_gamedraw | 1 | 12758025 | import base64
from io import BytesIO
from pathlib import Path
from PIL import Image, ImageDraw, UnidentifiedImageError
from nonebot.log import logger
from typing import Tuple, Union, Optional, Literal
from .util import load_font
import asyncio
class BuildImage:
def __init__(
self,
w: int,
h: int,
img_w: int = 0,
img_h: int = 0,
background: str = "",
color: Union[float, Tuple[float, ...], str] = "white",
image_type: Literal[
"1",
"CMYK",
"F",
"HSV",
"I",
"L",
"LAB",
"P",
"RGB",
"RGBA",
"RGBX",
"YCbCr",
] = "RGBA",
divisor: float = 1,
font_size: int = 10,
):
self.w = int(w)
self.h = int(h)
self.img_w = int(img_w)
self.img_h = int(img_h)
self.current_w = 0
self.current_h = 0
if not background:
self.markImg = Image.new(image_type, (self.w, self.h), color)
else:
try:
if w == 0 and h == 0:
self.markImg = Image.open(background)
w, h = self.markImg.size
if divisor:
self.w = int(divisor * w)
self.h = int(divisor * h)
self.markImg = self.markImg.resize(
(self.w, self.h), Image.ANTIALIAS
)
else:
self.w = w
self.h = h
else:
self.markImg = Image.open(background).resize(
(self.w, self.h), Image.ANTIALIAS
)
except UnidentifiedImageError as e:
logger.warning(f"无法识别图片 已删除图片,下次更新重新下载... e:{e}")
Path(background).unlink(missing_ok=True)
self.markImg = Image.new(image_type, (self.w, self.h), color)
except FileNotFoundError:
logger.warning(f"{background} not exists")
self.markImg = Image.new(image_type, (self.w, self.h), color)
self.font = load_font(fontsize=font_size)
self.draw = ImageDraw.Draw(self.markImg)
self.size = self.w, self.h
try:
self.loop = asyncio.get_event_loop()
except RuntimeError:
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
self.loop = asyncio.get_event_loop()
async def atext(
self,
pos: Tuple[int, int],
text: str,
fill: Union[str, Tuple[int, int, int]] = (0, 0, 0),
):
"""
说明:
异步 在图片上添加文字
参数:
:param pos: 文字位置
:param text: 文字内容
:param fill: 文字颜色
"""
await self.loop.run_in_executor(None, self.text, pos, text, fill)
def text(
self,
pos: Tuple[int, int],
text: str,
fill: Union[str, Tuple[int, int, int]] = (0, 0, 0),
):
"""
说明:
在图片上添加文字
参数:
:param pos: 文字位置
:param text: 文字内容
:param fill: 文字颜色
"""
self.draw.text(pos, text, fill=fill, font=self.font)
async def apaste(
self,
img: "BuildImage" or Image,
pos: Optional[Tuple[int, int]] = None,
alpha: bool = False,
):
"""
说明:
异步 贴图
参数:
:param img: 已打开的图片文件,可以为 BuildImage 或 Image
:param pos: 贴图位置(左上角)
:param alpha: 图片背景是否为透明
"""
await self.loop.run_in_executor(None, self.paste, img, pos, alpha)
# 贴图
def paste(self, img, pos=None, alpha=False):
if isinstance(img, BuildImage):
img = img.markImg
if self.current_w == self.w:
self.current_w = 0
self.current_h += self.img_h
if not pos:
pos = (self.current_w, self.current_h)
if alpha:
try:
self.markImg.paste(img, pos, img)
except ValueError:
img = img.convert("RGBA")
self.markImg.paste(img, pos, img)
else:
self.markImg.paste(img, pos)
self.current_w += self.img_w
return self.markImg
def circle_corner(self, r: int):
img = self.markImg.convert("RGBA")
w, h = img.size
alpha = img.split()[-1]
circle = Image.new("L", (r * 2, r * 2), 0) # 创建黑色方形
draw = ImageDraw.Draw(circle)
draw.ellipse((0, 0, r * 2, r * 2), fill=255) # 黑色方形内切白色圆形
draw = ImageDraw.Draw(alpha)
alpha.paste(circle.crop((0, 0, r, r)), (0, 0)) # 左上角
alpha.paste(circle.crop((r, 0, r * 2, r)), (w - r, 0)) # 右上角
alpha.paste(circle.crop((r, r, r * 2, r * 2)), (w - r, h - r)) # 右下角
alpha.paste(circle.crop((0, r, r, r * 2)), (0, h - r)) # 左下角
img.putalpha(alpha)
self.markImg = img
# 转bs4:
def pic2bs4(self):
buf = BytesIO()
self.markImg.save(buf, format="PNG")
return f"base64://{base64.b64encode(buf.getvalue()).decode()}"
| 2.28125 | 2 |
src/E028.py | Jexan/ProjectEulerSolutions | 0 | 12758026 | <filename>src/E028.py
# Sum of the diagonals of a spiral square diagonal
# OPTIMAL (<0.1s)
#
# APPROACH:
# Generate the numbers in the spiral with a simple algorithm until
# the desdired side is obtained,
SQUARE_SIDE = 1001
DUMMY_SQUARE_SIDE = 5
DUMMY_RESULT = 101
def generate_numbers(limit):
current = 1
internal_square = 1
steps = 0
while internal_square <= limit:
yield current
if current == internal_square**2:
internal_square += 2
steps += 2
current += steps
def sum_diagonals(square_side):
return sum(generate_numbers(square_side))
assert sum_diagonals(DUMMY_SQUARE_SIDE) == DUMMY_RESULT
result = sum_diagonals(SQUARE_SIDE) | 3.90625 | 4 |
ballet/validation/feature_api/checks.py | HDI-Project/fhub_core | 19 | 12758027 | <filename>ballet/validation/feature_api/checks.py
import io
from copy import deepcopy
import dill as pickle
import numpy as np
from funcy import all, all_fn, isa, iterable
from sklearn.model_selection import train_test_split
from ballet.feature import Feature
from ballet.util import RANDOM_STATE
from ballet.validation.base import BaseCheck
def _get_one_row(*args):
return tuple(
obj.iloc[0:1]
for obj in args
)
class FeatureApiCheck(BaseCheck):
"""Base class for implementing new Feature API checks
Args:
X_df (array-like): X dataframe
y_df (array-like): y dataframe
"""
def __init__(self, X_df, y_df):
self.X = X_df
self.y = y_df
class IsFeatureCheck(FeatureApiCheck):
def check(self, feature):
"""Check that the object is an instance of ballet.Feature"""
assert isinstance(feature, Feature)
def give_advice(self, feature):
return f'The object needs to be an instance of ballet.Feature, whereas it is actually of type {type(feature).__name__}' # noqa
class HasCorrectInputTypeCheck(FeatureApiCheck):
def check(self, feature):
"""Check that the feature's `input` is a str or Iterable[str]"""
input = feature.input
is_str = isa(str)
is_nested_str = all_fn(
iterable, lambda x: all(is_str, x))
assert is_str(input) or is_nested_str(input)
def give_advice(self, feature):
return f'The feature\'s input needs to be a string or list of strings, whereas it is actually of type {type(feature.input).__name__}' # noqa
class HasTransformerInterfaceCheck(FeatureApiCheck):
def check(self, feature):
"""Check that the feature has a fit/transform/fit_tranform interface"""
assert hasattr(feature.transformer, 'fit')
assert hasattr(feature.transformer, 'transform')
assert hasattr(feature.transformer, 'fit_transform')
def give_advice(self, feature):
missing = ', '.join(
attr
for attr in ('fit', 'transform', 'fit_transform')
if not hasattr(feature.transformer, attr)
)
return f'The feature\'s transformer must have the transformer interface, but these methods are missing and must be implemented: {missing}' # noqa
class CanMakeMapperCheck(FeatureApiCheck):
def check(self, feature):
"""Check that the feature can be converted to a FEP"""
feature.as_feature_engineering_pipeline()
def give_advice(self, feature):
return 'The following method call fails and needs to be fixed: feature.as_feature_engineering_pipeline()' # noqa
class CanFitCheck(FeatureApiCheck):
def check(self, feature):
"""Check that fit can be called on reference data"""
mapper = feature.as_feature_engineering_pipeline()
mapper.fit(self.X, y=self.y)
def give_advice(self, feature):
return 'The feature fails when calling fit on sample data'
class CanFitOneRowCheck(FeatureApiCheck):
def check(self, feature):
"""Check that fit can be called on one row of reference data"""
mapper = feature.as_feature_engineering_pipeline()
x, y = _get_one_row(self.X, self.y)
mapper.fit(x, y=y)
def give_advice(self, feature):
return 'The feature fails when calling fit on a single row of sample data (i.e. 1xn array)' # noqa
class CanTransformCheck(FeatureApiCheck):
def check(self, feature):
"""Check that transform can be called on reference data"""
mapper = feature.as_feature_engineering_pipeline()
mapper.fit(self.X, y=self.y)
mapper.transform(self.X)
def give_advice(self, feature):
return 'The feature fails when calling transform on sample data'
class CanTransformNewRowsCheck(FeatureApiCheck):
def check(self, feature):
"""Check that transform can be called on new, unseen rows"""
mapper = feature.as_feature_engineering_pipeline()
X1, X2, y1, _ = train_test_split(
self.X, self.y, test_size=0.1, random_state=RANDOM_STATE,
shuffle=True)
mapper.fit(X1, y=y1)
mapper.transform(X2)
def give_advice(self, feature):
return 'The feature fails when calling transform on different data than it was trained on; make sure the transform method works on any number of new rows' # noqa
class CanTransformOneRowCheck(FeatureApiCheck):
def check(self, feature):
"""Check that transform can be called on one row of reference data"""
mapper = feature.as_feature_engineering_pipeline()
mapper.fit(self.X, y=self.y)
x, = _get_one_row(self.X)
mapper.transform(x)
def give_advice(self, feature):
return 'The feature fails when calling transform on a single row of sample data (i.e. 1xn array)' # noqa
class CanFitTransformCheck(FeatureApiCheck):
def check(self, feature):
"""Check that fit_transform can be called on reference data"""
mapper = feature.as_feature_engineering_pipeline()
mapper.fit_transform(self.X, y=self.y)
def give_advice(self, feature):
return 'The feature fails when calling fit_transform on sample data'
class HasCorrectOutputDimensionsCheck(FeatureApiCheck):
def check(self, feature):
"""Check that the dimensions of the transformed data are correct
For input X, an n x p array, a n x q array should be produced,
where q is the number of feature values produced by the feature.
"""
mapper = feature.as_feature_engineering_pipeline()
X = mapper.fit_transform(self.X, y=self.y)
assert self.X.shape[0] == X.shape[0]
def give_advice(self, feature):
mapper = feature.as_feature_engineering_pipeline()
X = mapper.fit_transform(self.X, y=self.y)
n = self.X.shape[0]
m = X.shape[0]
return f'The feature does not produce the correct output dimensions, for example when it is fit and transformed on {n} rows of data, it produces {m} rows of feature values.' # noqa
class CanDeepcopyCheck(FeatureApiCheck):
def check(self, feature):
"""Check that the feature can be deepcopied
This is needed for execution of the overall transformation pipeline
"""
deepcopy(feature)
def give_advice(self, feature):
return 'Calling copy.deepcopy(feature) fails, make sure every component of the feature can be deepcopied' # noqa
class CanPickleCheck(FeatureApiCheck):
def check(self, feature):
"""Check that the feature can be pickled
This is needed for saving the pipeline to disk
"""
try:
buf = io.BytesIO()
pickle.dump(feature, buf, protocol=pickle.HIGHEST_PROTOCOL)
buf.seek(0)
new_feature = pickle.load(buf)
assert new_feature is not None
assert isinstance(new_feature, Feature)
finally:
buf.close()
def give_advice(self, feature):
return 'Calling pickle.dump(feature, buf) fails, make sure the feature can be pickled' # noqa
class NoMissingValuesCheck(FeatureApiCheck):
def check(self, feature):
"""Check that the output of the transformer has no missing values"""
mapper = feature.as_feature_engineering_pipeline()
X = mapper.fit_transform(self.X, y=self.y)
assert not np.any(np.isnan(X))
def give_advice(self, feature):
return 'When transforming sample data, the feature produces NaN values. If you reasonably expect these missing values, make sure you clean missing values as an additional step in your transformer list. For example: NullFiller(replacement=replacement)' # noqa
class NoInfiniteValuesCheck(FeatureApiCheck):
def check(self, feature):
"""Check that the output of the transformer has no non-finite values"""
mapper = feature.as_feature_engineering_pipeline()
X = mapper.fit_transform(self.X, y=self.y)
assert not np.any(np.isinf(X))
def give_advice(self, feature):
return 'When transforming sample data, the feature produces infinite values. You can detect these with np.isinf. If you reasonably expect these infinite values, make sure you clean infinite values as an additional step in your transformer list. For example: NullFiller(np.isinf, replacement) ' # noqa
| 2.6875 | 3 |
main.py | zhang0227/RVM-StreamlitApp | 0 | 12758028 | <filename>main.py
from re import I
import streamlit as st
import os
import argparse
import torch
import shutil
def add_model_args(parser):
parser.add_argument('--variant', type=str, default="mobilenetv3",
choices=['mobilenetv3', 'resnet50'])
parser.add_argument('--checkpoint', type=str,
default="rvm_mobilenetv3.pth")
parser.add_argument('--input-resize', type=int, default=None, nargs=2)
parser.add_argument('--output-composition', type=str,
default="composition.mp4")
parser.add_argument('--output-alpha', type=str, default="alpha.mp4")
parser.add_argument('--output-foreground', type=str,
default="foreground.mp4")
parser.add_argument('--output-type', type=str,
default="video", choices=['video', 'png_sequence'])
parser.add_argument('--output-video-mbps', type=int, default=4)
parser.add_argument('--seq-chunk', type=int, default=1)
parser.add_argument('--num-workers', type=int, default=0)
parser.add_argument('--disable-progress', action='store_true')
parser.add_argument('--device', type=str)
parser.add_argument('--downsample-ratio', type=float)
parser.add_argument('--input-source', type=str)
return parser
if __name__ == '__main__':
st.title('RVM Streamlit App')
parser = argparse.ArgumentParser()
add_model_args(parser)
opt = parser.parse_args()
print(opt)
# check device
if torch.cuda.is_available():
opt.device = "cuda"
else:
opt.device = "cpu"
source = ("HD", "4K")
source_index = st.sidebar.selectbox("选择输入", range(
len(source)), format_func=lambda x: source[x])
uploaded_file = st.sidebar.file_uploader("上传视频", type=['mp4'])
if uploaded_file is not None:
is_valid = True
with st.spinner(text='资源加载中...'):
st.sidebar.video(uploaded_file)
with open(os.path.join("data", "videos", uploaded_file.name), "wb") as f:
f.write(uploaded_file.getbuffer())
opt.input_source = f'data/videos/{uploaded_file.name}'
print(uploaded_file)
if source_index == 0:
opt.downsample_ratio = 0.25
else:
opt.downsample_ratio = 0.125
else:
is_valid = False
if is_valid:
print('valid')
output_path = os.path.join("result",uploaded_file.name.split(".")[0])
if not os.path.exists(output_path):
os.mkdir(output_path)
if st.button('开始转换'):
from inference import convert
convert(opt)
st.text("composition video:")
st.video(str("composition.mp4"))
st.text("alpha video:")
st.video(str("alpha.mp4"))
st.text("foreground video:")
st.video(str("foreground.mp4"))
st.balloons()
shutil.move("composition.mp4", os.path.join(output_path))
shutil.move("alpha.mp4", os.path.join(output_path))
shutil.move("foreground.mp4", os.path.join(output_path))
| 2.421875 | 2 |
examples/phaseplots.py | Robomate/python-control | 0 | 12758029 | # phaseplots.py - examples of phase portraits
# RMM, 24 July 2011
#
# This file contains examples of phase portraits pulled from "Feedback
# Systems" by <NAME> Murray (Princeton University Press, 2008).
import numpy as np
import matplotlib.pyplot as mpl
from control.phaseplot import phase_plot
from numpy import pi
# Clear out any figures that are present
mpl.close('all')
#
# Inverted pendulum
#
# Define the ODEs for a damped (inverted) pendulum
def invpend_ode(x, t, m=1., l=1., b=0.2, g=1):
return (x[1], -b/m*x[1] + (g*l/m) * np.sin(x[0]))
# Set up the figure the way we want it to look
mpl.figure(); mpl.clf();
mpl.axis([-2*pi, 2*pi, -2.1, 2.1]);
mpl.title('Inverted pendlum')
# Outer trajectories
phase_plot(invpend_ode,
X0 = [ [-2*pi, 1.6], [-2*pi, 0.5], [-1.8, 2.1],
[-1, 2.1], [4.2, 2.1], [5, 2.1],
[2*pi, -1.6], [2*pi, -0.5], [1.8, -2.1],
[1, -2.1], [-4.2, -2.1], [-5, -2.1] ],
T = np.linspace(0, 40, 200),
logtime = (3, 0.7) )
# Separatrices
mpl.hold(True);
phase_plot(invpend_ode, X0 = [[-2.3056, 2.1], [2.3056, -2.1]], T=6, lingrid=0)
mpl.show();
#
# Systems of ODEs: damped oscillator example (simulation + phase portrait)
#
def oscillator_ode(x, t, m=1., b=1, k=1):
return (x[1], -k/m*x[0] - b/m*x[1])
# Generate a vector plot for the damped oscillator
mpl.figure(); mpl.clf();
phase_plot(oscillator_ode, [-1, 1, 10], [-1, 1, 10], 0.15);
mpl.hold(True); mpl.plot([0], [0], '.');
# a=gca; set(a,'FontSize',20); set(a,'DataAspectRatio',[1,1,1]);
mpl.xlabel('x1'); mpl.ylabel('x2');
# Generate a phase plot for the damped oscillator
mpl.figure(); mpl.clf();
mpl.axis([-1, 1, -1, 1]); # set(gca, 'DataAspectRatio', [1, 1, 1]);
phase_plot(oscillator_ode,
X0 = [
[-1, 1], [-0.3, 1], [0, 1], [0.25, 1], [0.5, 1], [0.75, 1], [1, 1],
[1, -1], [0.3, -1], [0, -1], [-0.25, -1], [-0.5, -1], [-0.75, -1], [-1, -1]
], T = np.linspace(0, 8, 80), timepts = [0.25, 0.8, 2, 3])
mpl.hold(True); mpl.plot([0], [0], 'k.'); # 'MarkerSize', AM_data_markersize*3);
# set(gca,'DataAspectRatio',[1,1,1]);
mpl.xlabel('x1'); mpl.ylabel('x2');
mpl.show()
#
# Stability definitions
#
# This set of plots illustrates the various types of equilibrium points.
#
# Saddle point vector field
def saddle_ode(x, t):
return (x[0] - 3*x[1], -3*x[0] + x[1]);
# Asy stable
m = 1; b = 1; k = 1; # default values
mpl.figure(); mpl.clf();
mpl.axis([-1, 1, -1, 1]); # set(gca, 'DataAspectRatio', [1 1 1]);
phase_plot(oscillator_ode,
X0 = [
[-1,1], [-0.3,1], [0,1], [0.25,1], [0.5,1], [0.7,1], [1,1], [1.3,1],
[1,-1], [0.3,-1], [0,-1], [-0.25,-1], [-0.5,-1], [-0.7,-1], [-1,-1],
[-1.3,-1]
], T = np.linspace(0, 10, 100),
timepts = [0.3, 1, 2, 3], parms = (m, b, k));
mpl.hold(True); mpl.plot([0], [0], 'k.'); # 'MarkerSize', AM_data_markersize*3);
# set(gca,'FontSize', 16);
mpl.xlabel('{\itx}_1'); mpl.ylabel('{\itx}_2');
# Saddle
mpl.figure(); mpl.clf();
mpl.axis([-1, 1, -1, 1]); # set(gca, 'DataAspectRatio', [1 1 1]);
phase_plot(saddle_ode, scale = 2, timepts = [0.2, 0.5, 0.8], X0 =
[ [-1, -1], [1, 1],
[-1, -0.95], [-1, -0.9], [-1, -0.8], [-1, -0.6], [-1, -0.4], [-1, -0.2],
[-0.95, -1], [-0.9, -1], [-0.8, -1], [-0.6, -1], [-0.4, -1], [-0.2, -1],
[1, 0.95], [1, 0.9], [1, 0.8], [1, 0.6], [1, 0.4], [1, 0.2],
[0.95, 1], [0.9, 1], [0.8, 1], [0.6, 1], [0.4, 1], [0.2, 1],
[-0.5, -0.45], [-0.45, -0.5], [0.5, 0.45], [0.45, 0.5],
[-0.04, 0.04], [0.04, -0.04] ], T = np.linspace(0, 2, 20));
mpl.hold(True); mpl.plot([0], [0], 'k.'); # 'MarkerSize', AM_data_markersize*3);
# set(gca,'FontSize', 16);
mpl.xlabel('{\itx}_1'); mpl.ylabel('{\itx}_2');
# Stable isL
m = 1; b = 0; k = 1; # zero damping
mpl.figure(); mpl.clf();
mpl.axis([-1, 1, -1, 1]); # set(gca, 'DataAspectRatio', [1 1 1]);
phase_plot(oscillator_ode, timepts =
[pi/6, pi/3, pi/2, 2*pi/3, 5*pi/6, pi, 7*pi/6, 4*pi/3, 9*pi/6, 5*pi/3, 11*pi/6, 2*pi],
X0 = [ [0.2,0], [0.4,0], [0.6,0], [0.8,0], [1,0], [1.2,0], [1.4,0] ],
T = np.linspace(0, 20, 200), parms = (m, b, k));
mpl.hold(True); mpl.plot([0], [0], 'k.') # 'MarkerSize', AM_data_markersize*3);
# set(gca,'FontSize', 16);
mpl.xlabel('{\itx}_1'); mpl.ylabel('{\itx}_2');
mpl.show()
| 3.140625 | 3 |
nets.py | AntonioRochaAZ/CNN-Project | 1 | 12758030 | <gh_stars>1-10
from main import *
import torch.nn as nn
class ConvNetBase(NetBase):
"""Base Class for the Convolutional Networks proposed in the HASYv2 article.
This defines a general forward method that is the same for all of them.
"""
def __init__(self, **kwargs):
super(ConvNetBase, self).__init__(**kwargs)
self.conv_net = None
self.lin_net = None
self.is_classifier = True
def forward(self, inputs):
batch_size = inputs.shape[0]
conv_out = self.conv_net(inputs)
lin_out = self.lin_net(conv_out.view(batch_size, -1))
return lin_out
class TwoLayer(ConvNetBase):
"""Two Layer Convolutional Neural Network"""
def __init__(self, **kwargs):
super(TwoLayer, self).__init__(**kwargs)
self.conv_net = nn.Sequential(
nn.Conv2d(1, 32, (3, 3)),
nn.MaxPool2d((2, 2), (2, 2))
)
self.lin_net = nn.Sequential(
nn.Linear(32 * 15 * 15, 369),
nn.Softmax(dim=1)
)
class ThreeLayer(ConvNetBase):
"""Three Layer Convolutional Neural Network"""
def __init__(self, **kwargs):
super(ThreeLayer, self).__init__(**kwargs)
self.conv_net = nn.Sequential(
nn.Conv2d(1, 32, (3, 3)),
nn.MaxPool2d((2, 2), (2, 2)),
nn.Conv2d(32, 64, (3, 3)),
nn.MaxPool2d((2, 2), (2, 2)),
)
self.lin_net = nn.Sequential(
nn.Linear(64 * 6 * 6, 369),
nn.Softmax(dim=1)
)
class FourLayer(ConvNetBase):
"""Four Layer Convolutional Neural Network"""
def __init__(self, **kwargs):
super(FourLayer, self).__init__(**kwargs)
self.conv_net = nn.Sequential(
nn.Conv2d(1, 32, (3, 3)),
nn.MaxPool2d((2, 2), (2, 2)),
nn.Conv2d(32, 64, (3, 3)),
nn.MaxPool2d((2, 2), (2, 2)),
nn.Conv2d(64, 128, (3, 3)),
nn.MaxPool2d((2, 2), (2, 2)),
)
self.lin_net = nn.Sequential(
nn.Linear(128 * 2 * 2, 369),
nn.Softmax(dim=1)
)
class TFCNN(ConvNetBase):
"""TF-CNN"""
def __init__(self, **kwargs):
super(TFCNN, self).__init__(**kwargs)
self.conv_net = nn.Sequential(
nn.Conv2d(1, 32, (3, 3)),
nn.MaxPool2d((2, 2), (2, 2)),
nn.Conv2d(32, 64, (3, 3)),
nn.MaxPool2d((2, 2), (2, 2)),
)
self.lin_net = nn.Sequential(
nn.Linear(64 * 6 * 6, 1024),
nn.Tanh(),
nn.Dropout(0.5),
nn.Linear(1024, 369),
nn.Softmax(dim=1)
)
| 3.203125 | 3 |
usajobs/__init__.py | chdean/usajobs | 2 | 12758031 | <filename>usajobs/__init__.py
import requests
import namedtupled
# api v3: http://search.digitalgov.gov/developer/jobs.html
def format_search(terms):
base_url = 'https://api.usa.gov/jobs/search.json?'
words = [x for x in terms.split(' ') if x]
query = 'query=' + '+'.join(words)
url = base_url + query
return url
def search(terms, start=0, step=100, as_dict=False):
""" Constructs v3 fuzzy searches with parameters outlined here:
http://search.digitalgov.gov/developer/jobs.html """
size = '&size=' + str(step)
base_url = format_search(terms) + size
results = requests.get(base_url).json()
data = results
if len(data) == step:
while results != []:
start += step
from_start = '&from=' + str(start)
next_url = base_url + from_start
results = requests.get(next_url).json()
data += results
if not as_dict:
data = namedtupled.map(data)
return data
# api v2: https://developer.usajobs.gov/Search-API/Overview
def connect(email, apikey):
""" Forms the header parameters for authenticating with the v2 api. """
headers = {'Host': 'data.usajobs.gov',
'User-Agent': email,
'Authorization-Key': apikey}
return headers
def query(terms, auth=None, as_dict=True):
""" Constructs v2 explicit queries with parameters outlined here:
https://developer.usajobs.gov/Search-API/API-Query-Parameters """
base_url = 'https://data.usajobs.gov/api/search?'
url = base_url + terms
if not auth:
env = namedtupled.env(['email', 'apikey'])
headers = connect(email=env.email, apikey=env.apikey)
resp = requests.get(url, headers=headers)
data = resp.json()
if not as_dict:
data = namedtupled.map(data)
return data
| 2.828125 | 3 |
filesystem_tools.py | franciscoGPS/youtdl | 0 | 12758032 | import os.path as path
import csv
class Filesystem(object):
#This method stores the passed data to the default video id's file
def __init__(self, filename, open_mode):
self.open_mode = open_mode
self.file = filename
#This method stores txt on the specified file with a specified EOL character.
def save_default( self, txt, EOL="" ):
save_path = path.dirname(path.abspath(__file__))
name_of_file = self.file
completeName = path.join(save_path, name_of_file+".txt")
file1 = open(completeName, self.open_mode)
file1.write(txt+""+EOL+"\n")
file1.close()
def print_path(self):
save_path = path.dirname(path.abspath(__file__))
completeName = path.join(save_path, self.file+".txt")
print(completeName)
def file_to_list(self):
#list_of_lists = []
save_path = path.dirname(path.abspath(__file__))
completeName = path.join(save_path, self.file+".txt")
file1 = open(completeName, self.open_mode)
#list_of_lists = file1.splitlines()
list_of_songs = [line.split('\n') for line in file1.readlines()]
file1.close()
return list_of_songs
def make_ids_table(self, file_name, array):
save_path = path.dirname(path.abspath(__file__))
completeName = path.join(save_path, self.file+".txt")
file1 = open(completeName, self.open_mode)
csvWriter = csv.writer(file1,delimiter=',')
csvWriter.writerows(array)
file1.close()
def get_songs_list(self):
songs = list()
with open(self.file, self.open_mode) as my_csv:
song_list = csv.reader(my_csv)
for row in song_list:
songs.append(row)
return songs
def read_csv_file_to_list(self, file_path, file_name, open_mode):
song_lyrics_list = {'mood':[], 'title':[], 'artist':[], 'lyric':[], 'youtube_id':[]}
completeName = path.join(file_path, file_name+".csv")
with open(completeName,open_mode) as my_csv:
reader_list = csv.reader(my_csv)
for row in reader_list:
######
"""
songs structure
[0] uselss id
[1] artist
[2] lyrics
[3] mood
[4] title
"""
######
song_lyrics_list['mood'].append(str(row[3].strip()))
song_lyrics_list['title'].append(str(row[4].strip()))
song_lyrics_list['artist'].append(str(row[1].strip()))
song_lyrics_list['lyric'].append(row[2].strip())
song_lyrics_list['youtube_id'].append("")
return song_lyrics_list
| 3.375 | 3 |
platform/play.py | JS00000/trading_simulation | 1 | 12758033 | <filename>platform/play.py<gh_stars>1-10
# coding=utf-8
import numpy as np
from scipy import stats
from user import User
from trading_system import Tradengine
from relationship import Relationship
from helper.tools import calc_mssk
from helper.args_parser import play_parser
from helper.data_ploter import plot_rank, plot_distribution, plot_imitate_market
def main(args):
codes = args.codes
start = args.start
end = args.end
market = args.market
steps = args.steps
user_number = args.user_number
init_fund = args.init_fund
transfer_deep = args.transfer_deep
T = Tradengine(codes, start, end, **{
"market": market,
"seq_length": 5
})
U = []
for i in range(user_number):
# same stratigy
U.append(User(i, codes, init_fund, T.get_ori_data(), 0))
R = Relationship(user_number, 10)
degree = R.net.degree()
his_close = []
his_p_change = []
assets = np.ndarray(user_number)
ass_deg = np.ndarray(user_number, dtype=[('assets', float), ('degree', int)])
for step in range(steps):
order_num = 0
# generate random news
info_value = np.random.normal(0, 1)
# pass the news to user
for i in range(user_number):
if np.random.random() < U[i].get_receive_rate():
U[i].update_info(step, info_value)
# transfer the news along network for transfer_deep times
for __ in range(transfer_deep):
for i, to in R.net.adj.items():
info_h = U[i].get_info()
for to, _eattr in to.items():
if np.random.random() < U[i].get_transfer_rate():
for k in info_h:
U[to].update_info(k, info_h[k])
# decide and add order
for i in range(user_number):
d = U[i].decide(T.get_new_data(), T.get_scare_para())
if not (d['action_id'] == 2 or d['volume'] == 0):
T.add_order(d['code'], d['price'], d['action_id'], d['volume'], i)
order_num += 1
# call auction
T.auction()
# conclude transactions
r = T.get_result()
for it in r:
U[it[0]].action_by_code(it[3])(it[1], it[2], it[4])
last_data = T.get_last_data()
# update everyones assets
for i in range(user_number):
U[i].status_update(last_data)
# results
for i in range(user_number):
assets[i] = U[i].assets
ass_deg[i] = (U[i].assets, degree[i])
his_close.append(last_data[0]['close'])
his_p_change.append(last_data[0]['p_change'])
# if (step+1) % 25 == 0:
# plot_distribution(assets)
# plot_imitate_market(his_close)
print("step: ", step, last_data[0]['close'], last_data[0]['p_change'])
plot_rank(ass_deg)
plot_distribution(assets)
plot_imitate_market(his_close)
if __name__ == '__main__':
main(play_parser.parse_args())
| 2.46875 | 2 |
diventi/products/migrations/0014_auto_20190120_1049.py | flavoi/diven | 2 | 12758034 | # Generated by Django 2.0.8 on 2019-01-20 09:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0013_auto_20180902_1533'),
]
operations = [
migrations.AddField(
model_name='product',
name='abstract',
field=models.TextField(blank=True, max_length=200, verbose_name='abstract'),
),
migrations.AddField(
model_name='product',
name='abstract_en',
field=models.TextField(blank=True, max_length=200, null=True, verbose_name='abstract'),
),
migrations.AddField(
model_name='product',
name='abstract_it',
field=models.TextField(blank=True, max_length=200, null=True, verbose_name='abstract'),
),
]
| 1.570313 | 2 |
turing/settings/__init__.py | kwierman/turing | 0 | 12758035 | import logging
DEBUG=True
try:
from .local import *
except ImportError:
logging.warning("No local settings defined. Using Defaults")
| 1.359375 | 1 |
ket/__main__.py | quantum-ket/ket | 3 | 12758036 | # Copyright 2020, 2021 <NAME> <<EMAIL>>
# Copyright 2020, 2021 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .import_ket import _import_globals_ket
from . import *
from .ket import label, branch, jump, build_info
from .preprocessor import *
def __ket__():
import argparse
from os import path, getcwd
parser = argparse.ArgumentParser(prog='ket', description='Ket interpreter')
parser.add_argument('--version', action='version', version=f'Ket {build_info()}')
parser.add_argument('-o', '--out', help='KQASM output file', type=str)
parser.add_argument('-s', '--kbw', help='quantum execution (KBW) IP address', type=str, default='127.0.0.1')
parser.add_argument('-u', '--user', help='quantum execution (KBW) SSH user', type=str)
parser.add_argument('-p', '--port', help='quantum execution (KBW) port', type=str, default='4242')
parser.add_argument('-P', '--ssh-port', help='quantum execution (KBW) SSH port', type=str, default='22')
parser.add_argument('--seed', help='set RNG seed for quantum execution', type=int)
parser.add_argument('--api-args', help='additional parameters for quantum execution', type=str)
parser.add_argument('--no-execute', help='does not execute KQASM, measurements return 0', action='store_false')
parser.add_argument('--dump2fs', help='use the filesystem to transfer dump data', action='store_true')
parser.add_argument('input', metavar='.ket', help='source code', type=str)
args = parser.parse_args()
ket_args = {
"server" : args.kbw,
"port" : args.port,
"execute" : args.no_execute,
"dump2fs" : args.dump2fs,
"ssh_port" : args.ssh_port,
}
if args.user:
ket_args["user"] = args.user
if args.out:
ket_args["kqasm"] = args.out
if args.seed:
ket_args["seed"] = args.seed
if args.api_args:
ket_args["api-args"] = args.api_args
ket_config(**ket_args)
globals()['__name__'] = '__main__'
globals()['__in_ket__'] = True
source = path.join(getcwd(), args.input)
_import_globals_ket(source, globals())
if __name__ == '__main__':
__ket__()
| 2.09375 | 2 |
scale/scheduler/cleanup/node.py | kaydoh/scale | 121 | 12758037 | <gh_stars>100-1000
"""Defines the class that handles a node's cleanup"""
from __future__ import unicode_literals
import logging
from job.execution.tasks.cleanup_task import CleanupTask
from scheduler.manager import scheduler_mgr
JOB_EXES_WARNING_THRESHOLD = 100
MAX_JOB_EXES_PER_CLEANUP = 25
logger = logging.getLogger(__name__)
class NodeCleanup(object):
"""This class manages all of the cleanup for a node."""
def __init__(self):
"""Constructor
"""
self._job_exes = {} # {Job Exe ID: RunningJobExecution}
def add_job_execution(self, job_exe):
"""Adds a job execution that needs to be cleaned up
:param job_exe: The job execution to add
:type job_exe: :class:`job.execution.job_exe.RunningJobExecution`
"""
self._job_exes[job_exe.id] = job_exe
def delete_job_executions(self, job_exes):
"""Deletes the given job executions since they have been cleaned up
:param job_exes: The job executions to delete
:type job_exes: [:class:`job.execution.job_exe.RunningJobExecution`]
"""
for job_exe in job_exes:
if job_exe.id in self._job_exes:
del self._job_exes[job_exe.id]
def create_next_task(self, agent_id, hostname, is_initial_cleanup_completed):
"""Creates and returns the next cleanup task that needs to be run, possibly None
:param agent_id: The node's agent ID
:type agent_id: string
:param hostname: The node's hostname
:type hostname: string
:param is_initial_cleanup_completed: Indicates if node's initial cleanup is completed
:type is_initial_cleanup_completed: bool
:returns: The next cleanup task, possibly None
:rtype: :class:`job.tasks.base_task.Task`
"""
total_job_exes = self._job_exes.values()
count = len(total_job_exes)
if count > JOB_EXES_WARNING_THRESHOLD:
logger.warning('Node %s has %d job executions waiting to be cleaned up', hostname, count)
cleanup_job_exes = []
if is_initial_cleanup_completed:
if count == 0:
# No job executions to clean, so no task
return None
for job_exe in total_job_exes:
cleanup_job_exes.append(job_exe)
if len(cleanup_job_exes) >= MAX_JOB_EXES_PER_CLEANUP:
break
return CleanupTask(scheduler_mgr.framework_id, agent_id, cleanup_job_exes)
def get_num_job_exes(self):
"""Returns the number of job executions waiting to be cleaned up
:returns: The number of job executions waiting to be cleaned up
:rtype: int
"""
return len(self._job_exes.values())
| 2.828125 | 3 |
myproject/prod_settings.py | stefanbschneider/django-hello-world | 3 | 12758038 | <filename>myproject/prod_settings.py
""" Production Settings """
# default: use settings from main settings.py if not overwritten
from .settings import *
import django_heroku
DEBUG = False
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY', SECRET_KEY)
ALLOWED_HOSTS = ['django-hello-world-app.herokuapp.com']
# Activate Django-Heroku.
django_heroku.settings(locals())
| 1.695313 | 2 |
lib/python3.8/site-packages/ansible_collections/openstack/cloud/plugins/modules/os_keystone_role.py | cjsteel/python3-venv-ansible-2.10.5 | 0 | 12758039 | <gh_stars>0
#!/usr/bin/python
# Copyright (c) 2016 IBM
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = '''
---
module: identity_role
short_description: Manage OpenStack Identity Roles
author: OpenStack Ansible SIG
description:
- Manage OpenStack Identity Roles.
options:
name:
description:
- Role Name
required: true
type: str
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
type: str
requirements:
- "python >= 3.6"
- "openstacksdk"
extends_documentation_fragment:
- openstack.cloud.openstack
'''
EXAMPLES = '''
# Create a role named "demo"
- openstack.cloud.identity_role:
cloud: mycloud
state: present
name: demo
# Delete the role named "demo"
- openstack.cloud.identity_role:
cloud: mycloud
state: absent
name: demo
'''
RETURN = '''
role:
description: Dictionary describing the role.
returned: On success when I(state) is 'present'.
type: complex
contains:
id:
description: Unique role ID.
type: str
sample: "677bfab34c844a01b88a217aa12ec4c2"
name:
description: Role name.
type: str
sample: "demo"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (openstack_full_argument_spec,
openstack_module_kwargs,
openstack_cloud_from_module)
def _system_state_change(state, role):
if state == 'present' and not role:
return True
if state == 'absent' and role:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
name = module.params.get('name')
state = module.params.get('state')
sdk, cloud = openstack_cloud_from_module(module)
try:
role = cloud.get_role(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(state, role))
if state == 'present':
if role is None:
role = cloud.create_role(name)
changed = True
else:
changed = False
module.exit_json(changed=changed, role=role)
elif state == 'absent':
if role is None:
changed = False
else:
cloud.delete_role(name)
changed = True
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| 2.125 | 2 |
samples/snippets/simple_app.py | KoffieLabs/python-bigquery | 1 | 12758040 | #!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple application that performs a query with BigQuery."""
# [START bigquery_simple_app_all]
# [START bigquery_simple_app_deps]
from google.cloud import bigquery
# [END bigquery_simple_app_deps]
def query_stackoverflow() -> None:
# [START bigquery_simple_app_client]
client = bigquery.Client()
# [END bigquery_simple_app_client]
# [START bigquery_simple_app_query]
query_job = client.query(
"""
SELECT
CONCAT(
'https://stackoverflow.com/questions/',
CAST(id as STRING)) as url,
view_count
FROM `bigquery-public-data.stackoverflow.posts_questions`
WHERE tags like '%google-bigquery%'
ORDER BY view_count DESC
LIMIT 10"""
)
results = query_job.result() # Waits for job to complete.
# [END bigquery_simple_app_query]
# [START bigquery_simple_app_print]
for row in results:
print("{} : {} views".format(row.url, row.view_count))
# [END bigquery_simple_app_print]
if __name__ == "__main__":
query_stackoverflow()
# [END bigquery_simple_app_all]
| 2.515625 | 3 |
cohesity_management_sdk/models/restore_sql_app_object_params.py | nick6655/management-sdk-python | 18 | 12758041 | <filename>cohesity_management_sdk/models/restore_sql_app_object_params.py<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
import cohesity_management_sdk.models.sql_update_restore_task_options
import cohesity_management_sdk.models.files_to_directory_mapping
class RestoreSqlAppObjectParams(object):
"""Implementation of the 'RestoreSqlAppObjectParams' model.
TODO: type model description here.
Attributes:
capture_tail_logs (bool): Set to true if tail logs are to be captured
before the restore operation. This is only applicable if we are
restoring the SQL database to its original source, and the
database is not being renamed.
continue_after_error (bool): Whether restore should continue after
encountering a page checksum error.
data_file_destination (string): Which directory to put the database
data files. Missing directory will be automatically created.
Cannot be empty if not restoring to the original SQL instance.
db_restore_overwrite_policy (int): Policy to overwrite an existing DB
during a restore operation.
enable_checksum (bool): Whether restore checksums are enabled.
instance_name (string): The name of the SQL instance that we restore
database to. If target_host is not empty, this also cannot be
empty.
is_auto_sync_enabled (bool): The following field is set if auto_sync
for multi-stage SQL restore task is enabled. This field is valid
only if is_multi_state_restore is set to true.
is_multi_stage_restore (bool): The following field is set if we are
creating a multi-stage SQL restore task needed for features such
as Hot-Standby.
keep_cdc (bool): Set to true to keep cdc on restored database.
log_file_destination (string): Which directory to put the database log
files. Missing directory will be automatically created. Cannot be
empty if not restoring to the original SQL instance.
multi_stage_restore_options (SqlUpdateRestoreTaskOptions): TODO: type
description here.
new_database_name (string): The new name of the database, if it is
going to be renamed. app_entity in RestoreAppObject has to be
non-empty for the renaming, otherwise it does not make sense to
rename all databases in the owner.
restore_time_secs (long|int): The time to which the SQL database needs
to be restored. This allows for granular recovery of SQL
databases. If this is not set, the SQL database will be recovered
to the full/incremental snapshot (specified in the owner's restore
object in AppOwnerRestoreInfo).
resume_restore (bool): Resume restore if sql instance/database exist
in restore/recovering state. The database might be in
restore/recovering state if previous restore failed or previous
restore was attempted with norecovery option.
secondary_data_file_destination (string): Which directory to put the
secondary data files of the database. Secondary data files are
optional and are user defined. The recommended file name extension
for these is ".ndf". If this option is specified, the directory
will be automatically created if its missing.
secondary_data_file_destination_vec (list of FilesToDirectoryMapping):
Specify the secondary data files and corresponding direcories of
the DB. Secondary data files are optional and are user defined.
The recommended file extension for secondary files is ".ndf". If
this option is specified and the destination folders do not exist
they will be automatically created.
with_clause (string): 'with_clause' contains 'with clause' to be used
in native sql restore command. This is only applicable for db
restore of native sql backup. Here user can specify multiple
restore options. Example: "WITH BUFFERCOUNT = 575, MAXTRANSFERSIZE
= 2097152". If this is not specified, we use the value specified
in magneto_sql_native_restore_with_clause gflag.
with_no_recovery (bool): Set to true if we want to recover the
database in "NO_RECOVERY" mode which does not bring it online
after restore.
"""
# Create a mapping from Model property names to API property names
_names = {
"capture_tail_logs":'captureTailLogs',
"continue_after_error":'continueAfterError',
"data_file_destination":'dataFileDestination',
"db_restore_overwrite_policy":'dbRestoreOverwritePolicy',
"enable_checksum":'enableChecksum',
"instance_name":'instanceName',
"is_auto_sync_enabled":'isAutoSyncEnabled',
"is_multi_stage_restore":'isMultiStageRestore',
"keep_cdc":'keepCdc',
"log_file_destination":'logFileDestination',
"multi_stage_restore_options":'multiStageRestoreOptions',
"new_database_name":'newDatabaseName',
"restore_time_secs":'restoreTimeSecs',
"resume_restore":'resumeRestore',
"secondary_data_file_destination":'secondaryDataFileDestination',
"secondary_data_file_destination_vec":'secondaryDataFileDestinationVec',
"with_clause":'withClause',
"with_no_recovery":'withNoRecovery'
}
def __init__(self,
capture_tail_logs=None,
continue_after_error=None,
data_file_destination=None,
db_restore_overwrite_policy=None,
enable_checksum=None,
instance_name=None,
is_auto_sync_enabled=None,
is_multi_stage_restore=None,
keep_cdc=None,
log_file_destination=None,
multi_stage_restore_options=None,
new_database_name=None,
restore_time_secs=None,
resume_restore=None,
secondary_data_file_destination=None,
secondary_data_file_destination_vec=None,
with_clause=None,
with_no_recovery=None):
"""Constructor for the RestoreSqlAppObjectParams class"""
# Initialize members of the class
self.capture_tail_logs = capture_tail_logs
self.continue_after_error = continue_after_error
self.data_file_destination = data_file_destination
self.db_restore_overwrite_policy = db_restore_overwrite_policy
self.enable_checksum = enable_checksum
self.instance_name = instance_name
self.is_auto_sync_enabled = is_auto_sync_enabled
self.is_multi_stage_restore = is_multi_stage_restore
self.keep_cdc = keep_cdc
self.log_file_destination = log_file_destination
self.multi_stage_restore_options = multi_stage_restore_options
self.new_database_name = new_database_name
self.restore_time_secs = restore_time_secs
self.resume_restore = resume_restore
self.secondary_data_file_destination = secondary_data_file_destination
self.secondary_data_file_destination_vec = secondary_data_file_destination_vec
self.with_clause = with_clause
self.with_no_recovery = with_no_recovery
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
capture_tail_logs = dictionary.get('captureTailLogs')
continue_after_error = dictionary.get('continueAfterError')
data_file_destination = dictionary.get('dataFileDestination')
db_restore_overwrite_policy = dictionary.get('dbRestoreOverwritePolicy')
enable_checksum = dictionary.get('enableChecksum')
instance_name = dictionary.get('instanceName')
is_auto_sync_enabled = dictionary.get('isAutoSyncEnabled')
is_multi_stage_restore = dictionary.get('isMultiStageRestore')
keep_cdc = dictionary.get('keepCdc')
log_file_destination = dictionary.get('logFileDestination')
multi_stage_restore_options = cohesity_management_sdk.models.sql_update_restore_task_options.SqlUpdateRestoreTaskOptions.from_dictionary(dictionary.get('multiStageRestoreOptions')) if dictionary.get('multiStageRestoreOptions') else None
new_database_name = dictionary.get('newDatabaseName')
restore_time_secs = dictionary.get('restoreTimeSecs')
resume_restore = dictionary.get('resumeRestore')
secondary_data_file_destination = dictionary.get('secondaryDataFileDestination')
secondary_data_file_destination_vec = None
if dictionary.get('secondaryDataFileDestinationVec') != None:
secondary_data_file_destination_vec = list()
for structure in dictionary.get('secondaryDataFileDestinationVec'):
secondary_data_file_destination_vec.append(cohesity_management_sdk.models.files_to_directory_mapping.FilesToDirectoryMapping.from_dictionary(structure))
with_clause = dictionary.get('withClause')
with_no_recovery = dictionary.get('withNoRecovery')
# Return an object of this model
return cls(capture_tail_logs,
continue_after_error,
data_file_destination,
db_restore_overwrite_policy,
enable_checksum,
instance_name,
is_auto_sync_enabled,
is_multi_stage_restore,
keep_cdc,
log_file_destination,
multi_stage_restore_options,
new_database_name,
restore_time_secs,
resume_restore,
secondary_data_file_destination,
secondary_data_file_destination_vec,
with_clause,
with_no_recovery)
| 2.109375 | 2 |
HTTP/proxy.py | MartrixG/computer-internet-project | 0 | 12758042 | import socket
import sys
import threading
import time
import traceback
import os
import re
import hashlib
MAX_HEADER = 4096
RECV_SIZE = 512
ban_list = [
b'jwes.hit.edu.cn'
] # 网站过滤列表
change_list = {
b'www.hit.edu.cn': b'studyathit.hit.edu.cn'
} # 网站引导字典
user_list = [
'127.0.0.1'
] # 用户过滤列表
c = {}
def getHeader(string, name): # 从请求头中提取关键字
decode = string.decode('UTF-8')
header = re.compile(name+r'.*', re.IGNORECASE)
match = header.search(decode)
if match:
head = match.group()
replace = re.compile(r'\r')
head = replace.sub('', head)
return head.encode('UTF-8')
else:
return None
def transHost(raw_host): # 将请求头中的host和port分隔开
header = raw_host.decode('UTF-8', 'ignore')
groups = header.split(":")
host = groups[1].encode('UTF-8')
if len(groups) > 2:
port = int(groups[2])
else:
port = 80
return host, port
def splitHeader(string): # 获取请求头
return string.split(b'\r\n\r\n')[0]
def recvBody(conn, base, size): # 接收剩余的数据内容
if size == -1: # 没有写明长度,按照报告中的方法确定结尾
while(base[-7:] != b'\r\n0\r\n\r\n'):
base += conn.recv(RECV_SIZE)
else:
while len(base) < size: # 如果写明了长度,读取到规定的长度
base += conn.recv(RECV_SIZE)
return base
def checkCache(cache, url): # 检查该url是否被缓存
hl = hashlib.md5()
hl.update(url)
url = hl.hexdigest()
if cache.__contains__(url):
return True
else:
return False
def writeCache(cache, url, timestamp, body): # 将缓存写入文件夹并且在字典中添加md5编码和时间
hl = hashlib.md5()
hl.update(url)
url = hl.hexdigest()
cache[url] = timestamp
file = open('计算机网络\HTTP\dict.txt', 'a')
file.write(url+'::'+timestamp+'\n')
file.close()
file = open('计算机网络\HTTP\cache\\'+url, 'wb')
file.write(body)
file.close()
def loadbody(cache, url): # 从文件夹中读取缓存的内容
hl = hashlib.md5()
hl.update(url)
url = hl.hexdigest()
for entry in os.listdir('计算机网络\HTTP\cache'):
if(entry == url):
file = open('计算机网络\HTTP\cache\\'+entry, 'rb')
return file.read()
def thread_proxy(client, addr, cache, banlist, changelist, userlist): # 代理线程
thread_name = threading.currentThread().name
# 监测是否ban IP地址
if userlist != None:
if userlist.count(addr[0]) != 0:
print("%sThis client is banned!" % (thread_name))
client.close()
return
# 尝试接受客户端发送的requset
try:
request = client.recv(MAX_HEADER)
except: # 如果超时输出错误信息
print("%sTime out!" % (thread_name))
client.close()
return
# 获得初始的host
raw_host = getHeader(request, "Host").replace(b' ', b'')
url = getHeader(request, 'get').split(b' ')[1]
if not raw_host: # 如果提取不到host输出错误信息
print("%sHost request error%s" % (thread_name, str(addr)))
client.close()
return
host, port = transHost(raw_host)
print("%sGET:%s:%s" % (thread_name, url, str(port)))
# 钓鱼
if changelist != None:
if changelist.__contains__(host):
host = changelist[host] # 修改host
print("%sHost has change to %s" % (thread_name, host))
# 禁止访问的host
if banlist != None:
if banlist.count(host) != 0:
print("%sThis host is banned" % (thread_name))
client.close()
return
# 建立到服务器的连接
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.settimeout(10)
try:
server.connect((host, port))
except socket.timeout: # 如果超时输出错误信息
print("%sTime out!" % (thread_name))
server.close()
client.close()
return
# 检查缓存
if checkCache(cache, url):
# 修改request 监测是否变化
url_md5 = hashlib.md5()
url_md5.update(url)
url_md5 = url_md5.hexdigest()
modify = '\r\nIf-Modified-Since:'+cache[url_md5]+'\r\n\r\n'
newrequest = request
newrequest = newrequest.replace(
b'\r\n\r\n', modify.encode('UTF-8')) # 修改request
server.sendall(newrequest)
response = server.recv(MAX_HEADER)
responseHeader = splitHeader(response)
flag = getHeader(responseHeader, 'HTTP/1.1').split(b' ')[1]
if flag == b'304': # 如果返回了304,直接读取缓存,然后结束
print("%sCache hit!!" % (thread_name))
response = loadbody(cache, url)
client.sendall(response)
server.close()
client.close()
return
# 未命中发送未修改的request
server.sendall(request)
response = server.recv(RECV_SIZE)
responseHeader = splitHeader(response)
if len(responseHeader) < len(response) - 4: # 如果响应头长度和接收长度不同,说明没有接受完全部数据
content_size = getHeader(responseHeader, 'content-length')
if content_size:
size = int(content_size.split(b':')[1]) + 4 + len(responseHeader)
else:
size = -1
response = recvBody(server, response, size)
client.sendall(response) # 转发数据
# 写入缓存
time = getHeader(responseHeader, 'Last-Modified')
if time != None:
# 如果含有Last-Modified说明可被缓存
time = time.split(b': ')[1].decode('UTF-8')
writeCache(cache, url, time, response)
server.close()
client.close()
def thread_server(myserver):
while True:
conn, addr = myserver.accept()
conn.settimeout(10)
thread_p = threading.Thread(target=thread_proxy, args=(
conn, addr, c, None, change_list, None))
thread_p.setDaemon(True)
thread_p.start()
def main(port=8000):
try:
myserver = socket.socket()
myserver.bind(('127.0.0.1', port))
myserver.listen(1024)
thread_s = threading.Thread(target=thread_server, args=(myserver,))
thread_s.setDaemon(True)
thread_s.start()
while True:
time.sleep(1000)
except KeyboardInterrupt:
print("sys exit")
finally:
myserver.close()
def loadCache(): # 从文件中建立起字典
file = open('计算机网络\HTTP\dict.txt', 'r')
line = file.readline()
while line:
line = line.split('::')
c[line[0]] = line[1][:-1]
line = file.readline()
# 命令入口
if __name__ == '__main__':
try:
loadCache()
print("Start proxy...")
main()
except Exception as e:
print("error exit")
traceback.print_exc()
finally:
print("end server")
sys.exit(0)
| 2.640625 | 3 |
memory_benchmarking/benchmark_model.py | ravi9/topologies | 0 | 12758043 | #!/usr/bin/python
# ----------------------------------------------------------------------------
# Copyright 2018 Intel
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import numpy as np
import os
import argparse
import psutil
import time
parser = argparse.ArgumentParser(
description="Benchmark 3D and 2D Convolution Models",add_help=True)
parser.add_argument("--dim_lengthx",
type = int,
default=16,
help="Tensor cube length of side x")
parser.add_argument("--dim_lengthy",
type = int,
default=16,
help="Tensor cube length of side y")
parser.add_argument("--dim_lengthz",
type = int,
default=16,
help="Tensor cube length of side z")
parser.add_argument("--num_channels",
type = int,
default=1,
help="Number of channels")
parser.add_argument("--num_outputs",
type = int,
default=1,
help="Number of outputs")
parser.add_argument("--bz",
type = int,
default=1,
help="Batch size")
parser.add_argument("--lr",
type = float,
default=0.001,
help="Learning rate")
parser.add_argument("--num_datapoints",
type = int,
default=1024,
help="Number of datapoints")
parser.add_argument("--epochs",
type = int,
default=3,
help="Number of epochs")
parser.add_argument("--intraop_threads",
type = int,
default=psutil.cpu_count(logical=False),
help="Number of intraop threads")
parser.add_argument("--interop_threads",
type = int,
default=2,
help="Number of interop threads")
parser.add_argument("--blocktime",
type = int,
default=0,
help="Block time for CPU threads")
parser.add_argument("--print_model",
action="store_true",
default=False,
help="Print the summary of the model layers")
parser.add_argument("--use_upsampling",
action="store_true",
default=False,
help="Use upsampling instead of transposed convolution")
parser.add_argument("--D2",
action="store_true",
default=False,
help="Use 2D model and images instead of 3D.")
parser.add_argument("--single_class_output",
action="store_true",
default=False,
help="Use binary classifier instead of U-Net")
parser.add_argument("--mkl_verbose",
action="store_true",
default=False,
help="Print MKL debug statements.")
parser.add_argument("--trace",
action="store_true",
default=False,
help="Create trace of TensorFlow timeline")
parser.add_argument("--inference",
action="store_true",
default=False,
help="Test inference speed. Default=Test training speed")
args = parser.parse_args()
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # Get rid of the AVX, SSE warnings
if args.mkl_verbose:
os.environ["MKL_VERBOSE"] = "1" # Print out messages from MKL operations
os.environ["MKLDNN_VERBOSE"] = "1" # Print out messages from MKL-DNN operations
os.environ["OMP_NUM_THREADS"] = str(args.intraop_threads)
os.environ["KMP_BLOCKTIME"] = str(args.blocktime)
os.environ["KMP_AFFINITY"] = "granularity=thread,compact,1,0"
import tensorflow as tf
from model import *
from tqdm import tqdm
import datetime
print("Started script on {}".format(datetime.datetime.now()))
print("args = {}".format(args))
print("OS: {}".format(os.system("uname -a")))
print("TensorFlow version: {}".format(tf.__version__))
import keras as K
print("Keras API version: {}".format(K.__version__))
if args.D2: # Define shape of the tensors (2D)
dims = (1,2)
tensor_shape = [args.bz,
args.dim_lengthx,
args.dim_lengthy,
args.num_channels]
out_shape = [args.bz,
args.dim_lengthx,
args.dim_lengthy,
args.num_outputs]
else: # Define shape of the tensors (3D)
dims=(1,2,3)
tensor_shape = [args.bz,
args.dim_lengthx,
args.dim_lengthy,
args.dim_lengthz,
args.num_channels]
tensor_shape = [args.bz,
args.dim_lengthx,
args.dim_lengthy,
args.dim_lengthz,
args.num_outputs]
# Optimize CPU threads for TensorFlow
config = tf.ConfigProto(
inter_op_parallelism_threads=args.interop_threads,
intra_op_parallelism_threads=args.intraop_threads)
sess = tf.Session(config=config)
K.backend.set_session(sess)
global_step = tf.Variable(0, name="global_step", trainable=False)
# Define the shape of the input images
# For segmentation models, the label (mask) is the same shape.
img = tf.placeholder(tf.float32, shape=tensor_shape) # Input tensor
if args.single_class_output:
truth = tf.placeholder(tf.float32, shape=(args.bz,args.num_outputs)) # Label tensor
else:
truth = tf.placeholder(tf.float32, shape=tensor_shape) # Label tensor
# Define the model
# Predict the output mask
if not args.inference:
# Set keras learning phase to train
K.backend.set_learning_phase(True)
# Don"t initialize variables on the fly
K.backend.manual_variable_initialization(False)
if args.single_class_output:
if args.D2: # 2D convnet model
predictions = conv2D(img,
print_summary=args.print_model, n_out=args.num_outputs)
else: # 3D convet model
predictions = conv3D(img,
print_summary=args.print_model, n_out=args.num_outputs)
else:
if args.D2: # 2D U-Net model
predictions = unet2D(img,
use_upsampling=args.use_upsampling,
print_summary=args.print_model, n_out=args.num_outputs)
else: # 3D U-Net model
predictions = unet3D(img,
use_upsampling=args.use_upsampling,
print_summary=args.print_model, n_out=args.num_outputs)
# Performance metrics for model
if args.single_class_output:
loss = tf.losses.sigmoid_cross_entropy(truth, predictions)
metric_score = tf.metrics.mean_squared_error(truth, predictions)
else:
loss = dice_coef_loss(truth, predictions, dims) # Loss is the dice between mask and prediction
metric_score = dice_coef(truth, predictions, dims)
train_op = tf.train.AdamOptimizer(args.lr).minimize(loss, global_step=global_step)
# Just feed completely random data in for the benchmark testing
imgs = np.random.rand(*tensor_shape)
if args.single_class_output:
truths = np.random.rand(args.bz, args.num_outputs)
else:
truths = np.random.rand(*tensor_shape)
# Initialize all variables
init_op = tf.global_variables_initializer()
init_l = tf.local_variables_initializer() # For TensorFlow metrics
sess.run(init_op)
sess.run(init_l)
saver = tf.train.Saver()
save_path = saver.save(sess, "./saved_model/model.ckpt")
print("Model saved in path: %s" % save_path)
# Freeze graph if inference
if args.inference:
K.backend.set_learning_phase(False)
# Set up trace for operations
run_metadata = tf.RunMetadata()
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
# Same number of sample to process regardless of batch size
# So if we have a larger batch size we can take fewer steps.
total_steps = args.num_datapoints//args.bz
print("Using random data.")
if args.inference:
print("Testing inference speed.")
else:
print("Testing training speed.")
start_time = time.time()
for epoch in tqdm(range(args.epochs), desc="Epoch #"):
for i in tqdm(range(total_steps), desc="Step #"):
if args.inference:
feed_dict = {img: imgs}
else:
feed_dict = {img: imgs, truth:truths}
if args.inference:
if args.trace:
history = sess.run([predictions], feed_dict=feed_dict,
options=run_options, run_metadata=run_metadata)
else:
history = sess.run([predictions], feed_dict=feed_dict)
else:
if args.trace:
history, loss_v, metric_v, this_step = \
sess.run([train_op, loss, metric_score, global_step],
feed_dict=feed_dict,
options=run_options, run_metadata=run_metadata)
else:
history, loss_v, metric_v, this_step = \
sess.run([train_op, loss, metric_score, global_step],
feed_dict=feed_dict)
stop_time = time.time()
print("\n\nTotal time = {:,.3f} seconds".format(stop_time - start_time))
print("Total images = {:,}".format(args.epochs*args.num_datapoints))
print("Speed = {:,.3f} images per second".format( \
(args.epochs*args.num_datapoints)/(stop_time - start_time)))
if args.trace:
"""
Save the training timeline
"""
from tensorflow.python.client import timeline
timeline_filename = "./timeline_trace.json"
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
with open(timeline_filename, "w") as f:
print("Saved Tensorflow trace to: {}".format(timeline_filename))
print("To view the trace:\n(1) Open Chrome browser.\n"
"(2) Go to this url -- chrome://tracing\n"
"(3) Click the load button.\n"
"(4) Load the file {}.".format(timeline_filename))
f.write(chrome_trace)
print("Stopped script on {}".format(datetime.datetime.now()))
| 2.015625 | 2 |
lib/ops_capture_color.py | nepia11/blender-addon-template | 1 | 12758044 | import bpy
import bgl
from logging import getLogger
logger = getLogger(__name__)
translation = bpy.app.translations.pgettext
def capture_under_cursor(buffer, mouse_x=0, mouse_y=0, type_flg="i") -> list:
"""
フラットなrgba(float)のlistを返す
"""
# GL_FLOATでバッファ作って読むと馬鹿みたいに重いのでGL_BYTE,GL_UNSIGNED_BYTEになってる
bgl.glReadBuffer(bgl.GL_FRONT)
bgl.glReadPixels(
mouse_x,
mouse_y,
1,
1,
bgl.GL_RGBA,
bgl.GL_UNSIGNED_BYTE,
buffer,
)
if type_flg == "i":
return [value for value in buffer]
elif type_flg == "f":
return [value / 255 for value in buffer]
def bytes_to_color_code(color: list) -> str:
"""RGBAのイテラブルを投げるとカラーコードを返してくれる"""
c = color
return f"#{c[0]:x}{c[1]:x}{c[2]:x}{c[3]:x}"
def create_buffer(src_width: int = 1, src_height: int = 1):
buffer = bgl.Buffer(bgl.GL_BYTE, src_width * src_height * 4)
return buffer
class TEMPLATE_OT_CaptureColor(bpy.types.Operator):
"""カーソル下の色を取得するやつ"""
bl_idname = "template.capture_color"
bl_label = translation("my operator")
bl_description = "operator description"
bl_options = {"REGISTER", "UNDO"}
buffer = create_buffer()
# イベントを受け取りたいときはexecuteの代わりにinvokeが使える
def invoke(self, context, event):
color = capture_under_cursor(self.buffer, event.mouse_x, event.mouse_y, "f")
context.tool_settings.gpencil_paint.brush.color = color[:3]
# brushes = [b for b in bpy.data.brushes]
# for b in brushes:
# b.color = (color[:3])
# logging
logger.debug(color)
# infoにメッセージを通知
self.report({"INFO"}, f"{color}")
# 正常終了ステータスを返す
return {"FINISHED"}
class TEMPLATE_PT_CursorColor(bpy.types.Panel):
bl_label = "CursorColor"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
def draw(self, context):
layout = self.layout
layout.operator(TEMPLATE_OT_CaptureColor.bl_idname)
classses = [TEMPLATE_OT_CaptureColor, TEMPLATE_PT_CursorColor]
tools = []
def register():
for c in classses:
bpy.utils.register_class(c)
for t in tools:
bpy.utils.register_tool(t)
def unregister():
for c in classses:
bpy.utils.unregister_class(c)
for t in tools:
bpy.utils.unregister_tool(t)
| 2.578125 | 3 |
web/cloudstrype/main/migrations/0001_initial.py | btimby/cloudstrype | 5 | 12758045 | <filename>web/cloudstrype/main/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-04 19:44
from __future__ import unicode_literals
import cryptography.fernet
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
import django.utils.timezone
import main.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=255, unique=True, verbose_name='email address')),
('full_name', models.CharField(max_length=64)),
('first_name', models.CharField(editable=False, max_length=64)),
('last_name', models.CharField(editable=False, max_length=64)),
('is_active', models.BooleanField(default=True)),
('is_admin', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
bases=(main.models.UidModelMixin, models.Model),
),
migrations.CreateModel(
name='Chunk',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('size', models.IntegerField()),
],
bases=(main.models.UidModelMixin, models.Model),
),
migrations.CreateModel(
name='ChunkStorage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('attrs', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('chunk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='storages', to='main.Chunk')),
],
),
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='owned_files', to=settings.AUTH_USER_MODEL)),
],
bases=(main.models.UidModelMixin, models.Model),
),
migrations.CreateModel(
name='FileStat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reads', models.IntegerField()),
('last', models.DateTimeField(auto_now=True)),
('file', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='stats', to='main.File')),
],
),
migrations.CreateModel(
name='FileTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='FileVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('file', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.File')),
],
),
migrations.CreateModel(
name='Key',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(default=cryptography.fernet.Fernet.generate_key, max_length=44)),
('uses', models.IntegerField(default=0)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='keys', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Option',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('raid_level', models.SmallIntegerField(default=1)),
('raid_replicas', models.SmallIntegerField(default=1)),
('attrs', django.contrib.postgres.fields.jsonb.JSONField(null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='options', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Storage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.SmallIntegerField(choices=[(1, 'Dropbox'), (2, 'Onedrive'), (3, 'Box'), (4, 'Google Drive'), (5, 'Array'), (6, 'Basic')])),
('size', models.BigIntegerField(default=0)),
('used', models.BigIntegerField(default=0)),
('auth', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={})),
('attrs', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={})),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='storages', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Storage',
'verbose_name_plural': 'Storages',
},
bases=(main.models.UidModelMixin, models.Model),
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
],
),
migrations.CreateModel(
name='UserDir',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('attrs', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='child_dirs', to='main.UserDir')),
('tags', models.ManyToManyField(to='main.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
bases=(main.models.UidModelMixin, models.Model),
),
migrations.CreateModel(
name='UserFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('attrs', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('deleted', models.DateTimeField(null=True)),
('file', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_files', to='main.File')),
('parent', models.ForeignKey(null=True, on_delete=main.models.SET_FIELD('deleted', django.utils.timezone.now), related_name='child_files', to='main.UserDir')),
('tags', models.ManyToManyField(through='main.FileTag', to='main.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='files', to=settings.AUTH_USER_MODEL)),
],
bases=(main.models.UidModelMixin, models.Model),
managers=[
('all', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='Version',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('size', models.IntegerField(default=0)),
('md5', models.CharField(max_length=32)),
('sha1', models.CharField(max_length=40)),
('mime', models.CharField(max_length=64)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('file', models.ManyToManyField(related_name='versions', through='main.FileVersion', to='main.File')),
],
options={
'base_manager_name': 'objects',
},
bases=(main.models.UidModelMixin, models.Model),
),
migrations.CreateModel(
name='VersionChunk',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('serial', models.IntegerField(default=0)),
('chunk', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='filechunks', to='main.Chunk')),
('version', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='filechunks', to='main.Version')),
],
),
migrations.AddField(
model_name='fileversion',
name='version',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Version'),
),
migrations.AddField(
model_name='filetag',
name='file',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.UserFile'),
),
migrations.AddField(
model_name='filetag',
name='tag',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to='main.Tag'),
),
migrations.AddField(
model_name='file',
name='version',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='current_of', to='main.Version'),
),
migrations.AddField(
model_name='chunkstorage',
name='storage',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='chunks', to='main.Storage'),
),
migrations.AddField(
model_name='chunk',
name='key',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='chunks', to='main.Key'),
),
migrations.AddField(
model_name='chunk',
name='version',
field=models.ManyToManyField(related_name='chunks', through='main.VersionChunk', to='main.Version'),
),
migrations.AlterUniqueTogether(
name='versionchunk',
unique_together=set([('version', 'serial')]),
),
migrations.AlterUniqueTogether(
name='userdir',
unique_together=set([('user', 'name', 'parent')]),
),
migrations.AlterUniqueTogether(
name='fileversion',
unique_together=set([('file', 'version')]),
),
migrations.AlterUniqueTogether(
name='filetag',
unique_together=set([('file', 'tag')]),
),
migrations.AlterUniqueTogether(
name='chunkstorage',
unique_together=set([('chunk', 'storage')]),
),
]
| 1.710938 | 2 |
templates/dynamic_import.py | nexssp/language_python | 2 | 12758046 | import importlib
name = 'test_module'
# method 1
module = importlib.import_module('test_module')
# method 2
module = __import__(name, fromlist=[''])
module.some_func()
| 2 | 2 |
jcd/app.py | nipil/jcdecaux_fetch_store | 1 | 12758047 | <gh_stars>1-10
#! /usr/bin/env python
# -*- coding: UTF-8 -*- vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
# The MIT License (MIT)
#
# Copyright (c) 2015-2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import re
import sys
import json
import argparse
import requests
import jcd.common
import jcd.cmd
# access jcdecaux web api
class ApiAccess(object):
BaseUrl = "https://api.jcdecaux.com/vls/v1"
def __init__(self, apikey):
self._apikey = apikey[0]
@staticmethod
def _parse_reply(reply_text):
try:
reply_json = json.loads(reply_text)
except (ValueError, OverflowError, TypeError) as error:
print "%s: %s" % (type(error).__name__, error)
raise jcd.common.JcdException(
"Could not parse JSON reply :\n%s" % (reply_text, ))
if isinstance(reply_json, dict) and reply_json.has_key("error"):
error = reply_json["error"]
raise jcd.common.JcdException(
"JCDecaux API exception: %s" % reply_json["error"])
return reply_json
def _get(self, sub_url, payload=None):
if payload is None:
payload = {}
# add the api key to the call
payload["apiKey"] = self._apikey
url = "%s/%s" % (self.BaseUrl, sub_url)
headers = {"Accept": "application/json"}
try:
request = requests.get(url, params=payload, headers=headers)
if request.status_code != requests.codes.ok:
raise jcd.common.JcdException("JCDecaux Requests exception: (%i) %s headers=%s content=%s" % (
request.status_code, url, repr(request.headers), repr(request.text)))
# avoid ultra-slow character set auto-detection
# see https://github.com/kennethreitz/requests/issues/2359
request.encoding = "utf-8"
# check for api error
return self._parse_reply(request.text)
except requests.exceptions.RequestException as exception:
raise jcd.common.JcdException(
"JCDecaux Requests exception: (%s) %s" % (
type(exception).__name__, exception))
def get_all_stations(self):
return self._get("stations")
def get_contract_station(self, contract_name, station_id):
return self._get("stations/%i" % station_id,
{"contract": contract_name})
def get_contract_stations(self, contract_name):
return self._get("stations",
{"contract": contract_name})
def get_contracts(self):
return self._get("contracts")
# main app
class App(object):
DataPath = None
DbName = None
Verbose = None
def __init__(self, default_data_path, default_app_dbname):
# top parser
self._parser = argparse.ArgumentParser(
description='Fetch and store JCDecaux API results')
# top level argument for data destination
self._parser.add_argument(
'--datadir',
help='choose data folder (default: %s)' % default_data_path,
default=default_data_path
)
self._parser.add_argument(
'--dbname',
help='choose db filename (default: %s)' % default_app_dbname,
default=default_app_dbname
)
self._parser.add_argument(
'--verbose', '-v',
action='store_true',
help='display operationnal informations'
)
# top level commands
top_command = self._parser.add_subparsers(dest='command')
# init command
init = top_command.add_parser(
'init',
help='create application files',
description='Initialize application'
)
init.add_argument(
'--force', '-f',
action='store_true',
help='overwrite existing files'
)
# config command
config = top_command.add_parser(
'config',
help='config application parameters',
description='Configure application'
)
for value in jcd.cmd.ConfigCmd.Parameters:
config.add_argument(
'--%s' % value[0],
type=value[1],
help=value[2],
)
# admin command
admin = top_command.add_parser(
'admin',
help='administrate application database',
description='Manage database'
)
for value in jcd.cmd.AdminCmd.Parameters:
admin.add_argument(
'--%s' % value[0],
action='store_true',
help=value[1],
)
# fetch command
fetch = top_command.add_parser(
'fetch',
help='get information from the API',
description='Get from API'
)
fetch.add_argument(
'--contracts', '-c',
action='store_true',
help='get contracts'
)
fetch.add_argument(
'--state', '-s',
action='store_true',
help='get current state'
)
# store command
top_command.add_parser(
'store',
help='store fetched state into database',
description='Store state in database'
)
# cron command
top_command.add_parser(
'cron',
help='do a full acquisition cycle',
description='Fetch and store according to configuration'
)
# import v1 command
import_v1 = top_command.add_parser(
'import_v1',
help='import data from version 1',
description='Analize and import data from the version 1'
)
import_v1.add_argument(
'--source',
help='directory of version 1 data to import (default: %s)' % jcd.cmd.Import1Cmd.DefaultPath,
default=jcd.cmd.Import1Cmd.DefaultPath
)
import_v1.add_argument(
'--sync',
help='sqlite synchronous pragma: 0/1/2/3 (default: 0)',
type=int,
choices=range(0, 4),
default=0
)
# export_csv command
export_csv = top_command.add_parser(
'export_csv',
help='export data in csv format',
description='Dump and store data in csv format'
)
export_csv.add_argument(
'source',
type=self.export_param_type_check,
help="'contracts', 'stations', or date (YYYY-MM-DD)"
)
def run(self):
try:
# parse arguments
args = self._parser.parse_args()
# consume data-path argument
App.DataPath = args.datadir
del args.datadir
# consume db name argument
App.DbName = args.dbname
del args.dbname
# consume verbose
App.Verbose = args.verbose
del args.verbose
# consume command
command = getattr(self, args.command)
del args.command
# run requested command
command(args)
except jcd.common.JcdException as exception:
print >>sys.stderr, "JcdException: %s" % exception
sys.exit(1)
@staticmethod
def export_param_type_check(value):
if value == "contracts" or value == "stations":
return value
try:
return re.match("^\d{4}-\d{2}-\d{2}$", value).group(0)
except:
raise argparse.ArgumentTypeError("String '%s' does not match required format"% value)
@staticmethod
def init(args):
init = jcd.cmd.InitCmd(args)
init.run()
@staticmethod
def config(args):
config = jcd.cmd.ConfigCmd(args)
config.run()
@staticmethod
def admin(args):
admin = jcd.cmd.AdminCmd(args)
admin.run()
@staticmethod
def fetch(args):
fetch = jcd.cmd.FetchCmd(args)
fetch.run()
@staticmethod
def store(args):
store = jcd.cmd.StoreCmd(args)
store.run()
@staticmethod
def cron(args):
cron = jcd.cmd.CronCmd(args)
cron.run()
@staticmethod
def import_v1(args):
import1 = jcd.cmd.Import1Cmd(args)
import1.run()
@staticmethod
def export_csv(args):
exportcsv = jcd.cmd.ExportCsvCmd(args)
exportcsv.run()
| 1.804688 | 2 |
src/notion/tests/test_notion_block.py | Abdujabbar/education-backend | 0 | 12758048 | <reponame>Abdujabbar/education-backend
import pytest
from notion.block import NotionBlock
@pytest.mark.parametrize(('block', 'expected_type'), [
({}, None),
({'value': {'test': 'zero'}}, None),
({'value': {'type': 'testing'}}, 'testing'),
])
def test_block_type(block, expected_type):
assert NotionBlock(id='test', data=block).type == expected_type
@pytest.mark.parametrize(('block', 'expected_content'), [
({}, []),
({'value': {'test': 'zero'}}, []),
({'value': {'content': ['a', 'b']}}, ['a', 'b']),
])
def test_content(block, expected_content):
assert NotionBlock(id='test', data=block).content == expected_content
| 2.546875 | 3 |
project/apps/core/modules/camera.py | expert-m/crazy-bear | 0 | 12758049 | <reponame>expert-m/crazy-bear<gh_stars>0
import datetime
import typing
import schedule
from imutils.video import VideoStream
from ..base import BaseModule, Command
from ... import task_queue
from ...common.constants import OFF, ON
from ...common.storage import file_storage
from ...common.utils import camera_is_available, synchronized_method
from ...core import events
from ...core.constants import (
CAMERA_IS_AVAILABLE, CURRENT_FPS, VIDEO_RECORDING_IS_ENABLED, SECURITY_IS_ENABLED,
USE_CAMERA, VIDEO_SECURITY,
)
from ...guard.video_guard import VideoGuard
from ..constants import BotCommands
from .... import config
__all__ = (
'Camera',
)
class Camera(BaseModule):
initial_state = {
VIDEO_SECURITY: None,
USE_CAMERA: False,
CAMERA_IS_AVAILABLE: True,
SECURITY_IS_ENABLED: False,
CURRENT_FPS: None,
VIDEO_RECORDING_IS_ENABLED: False,
}
_video_stream: typing.Optional[VideoStream] = None
_camera_is_available: bool = True
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._update_camera_status()
def init_schedule(self, scheduler: schedule.Scheduler) -> tuple:
return (
scheduler.every(10).seconds.do(
self.unique_task_queue.push,
self._save_photo,
priority=task_queue.TaskPriorities.MEDIUM,
),
scheduler.every(30).seconds.do(
self.unique_task_queue.push,
self._check_video_stream,
priority=task_queue.TaskPriorities.LOW,
),
scheduler.every(10).minutes.do(
self.unique_task_queue.push,
self._update_camera_status,
priority=task_queue.TaskPriorities.LOW,
),
scheduler.every(10).seconds.do(
self.unique_task_queue.push,
self.check,
priority=task_queue.TaskPriorities.MEDIUM,
),
)
def process_command(self, command: Command) -> typing.Any:
if command.name == BotCommands.CAMERA:
if command.first_arg == ON:
self._enable_camera()
elif command.first_arg == OFF:
self._disable_camera()
elif command.first_arg == 'photo':
self._take_photo()
elif command.first_arg == 'video':
if command.second_arg == ON:
self._start_video_recording()
elif command.second_arg == OFF:
self._stop_video_recording()
else:
return False
else:
return False
return True
if command.name == BotCommands.SECURITY:
if command.first_arg == ON:
self._enable_security()
elif command.first_arg == OFF:
self._disable_security()
else:
return False
return True
return False
@synchronized_method
def check(self) -> None:
video_guard: typing.Optional[VideoGuard] = self.state[VIDEO_SECURITY]
use_camera: bool = self.state[USE_CAMERA]
security_is_enabled: bool = self.state[SECURITY_IS_ENABLED]
if video_guard and (video_guard.is_stopped or not use_camera or not security_is_enabled):
self._disable_security()
video_guard = None
if not video_guard and use_camera and security_is_enabled and self.state[CAMERA_IS_AVAILABLE]:
self._enable_security()
video_guard = self.state[VIDEO_SECURITY]
if video_guard:
self.state[CURRENT_FPS] = video_guard.motion_detector.fps_tracker.fps()
else:
self.state[CURRENT_FPS] = None
@synchronized_method
def disable(self) -> None:
super().disable()
if self.state[VIDEO_SECURITY]:
self._disable_security()
if self.state[VIDEO_RECORDING_IS_ENABLED]:
self._stop_video_recording()
if self.state[USE_CAMERA]:
self._disable_camera()
@synchronized_method
def _enable_camera(self) -> None:
self._update_camera_status()
if not self.state[CAMERA_IS_AVAILABLE]:
self.messenger.send_message('Camera is not available')
return
self.state[USE_CAMERA] = True
if not self._video_stream:
self._video_stream = VideoStream(src=config.VIDEO_SRC, resolution=config.IMAGE_RESOLUTION)
self._video_stream.start()
self.messenger.send_message('The camera is on')
if self.state[VIDEO_SECURITY]:
self._enable_security()
@synchronized_method
def _disable_camera(self) -> None:
self.state[USE_CAMERA] = False
if self.state[SECURITY_IS_ENABLED]:
self._disable_security()
if self._video_stream:
self._video_stream.stop()
self._video_stream.stream.stream.release()
self._video_stream = None
self.messenger.send_message('The camera is off')
@synchronized_method
def _enable_security(self) -> None:
# TODO: Fix camera usage. Lack of power or overheating of the processor.
if not self.state[USE_CAMERA]:
return
video_guard: VideoGuard = self.state[VIDEO_SECURITY]
if video_guard:
self.messenger.send_message('Video security is already enabled')
return
# if not self._video_stream:
# self._enable_camera()
if self._video_stream:
video_guard = VideoGuard(
messenger=self.messenger,
video_stream=self._video_stream,
task_queue=self.task_queue,
motion_detected_callback=events.motion_detected.send,
)
self.state[VIDEO_SECURITY] = video_guard
video_guard.start()
self.messenger.send_message('Video security is enabled')
@synchronized_method
def _disable_security(self) -> None:
video_guard: VideoGuard = self.state[VIDEO_SECURITY]
if video_guard:
video_guard.stop()
self.state[VIDEO_SECURITY] = None
self.messenger.send_message('Video security is stopped')
elif self.state[USE_CAMERA]:
self.messenger.send_message('Video security is already disabled')
@synchronized_method
def _take_photo(self) -> None:
if not self._can_use_camera():
return
now = datetime.datetime.now()
frame = self._video_stream.read()
if frame is not None:
self.messenger.send_frame(frame, caption=f'Captured at {now.strftime("%d.%m.%Y, %H:%M:%S")}')
self.task_queue.put(
file_storage.upload_frame,
kwargs={
'file_name': f'saved_photos/{now.strftime("%Y-%m-%d %H:%M:%S.png")}',
'frame': frame,
},
priority=task_queue.TaskPriorities.MEDIUM,
retry_policy=task_queue.retry_policy_for_connection_error,
)
@synchronized_method
def _start_video_recording(self) -> None:
if not self._can_use_camera():
return
# TODO: Implement
self.state[VIDEO_RECORDING_IS_ENABLED] = True
self.messenger.send_message('Not implemented')
@synchronized_method
def _stop_video_recording(self) -> None:
if not self._can_use_camera():
return
# TODO: Implement
self.state[VIDEO_RECORDING_IS_ENABLED] = False
self.messenger.send_message('Not implemented')
def _can_use_camera(self) -> bool:
use_camera: bool = self.state[USE_CAMERA]
if use_camera:
return True
self.messenger.send_message('Camera is not enabled')
return False
@synchronized_method
def _save_photo(self) -> None:
if not self.state[USE_CAMERA] or not self._video_stream:
return
now = datetime.datetime.now()
file_storage.upload_frame(
file_name=f'photos/{now.strftime("%Y-%m-%d %H:%M:%S.png")}',
frame=self._video_stream.read(),
)
@synchronized_method
def _check_video_stream(self) -> None:
if not self._video_stream:
return
frame = self._video_stream.read()
if frame is None:
self.state[CAMERA_IS_AVAILABLE] = False
self.messenger.send_message('Camera is not available')
self._run_command(BotCommands.CAMERA, OFF)
@synchronized_method
def _update_camera_status(self) -> None:
if self._video_stream:
return
self.state[CAMERA_IS_AVAILABLE] = camera_is_available(config.VIDEO_SRC)
| 2.125 | 2 |
setup.py | mbroz/feel-the-streets | 5 | 12758050 | from setuptools import setup
setup(name="Feel the streets") | 1.09375 | 1 |