content stringlengths 5 1.05M |
|---|
import re
import logging
from typing import List, Dict
from functools import lru_cache
from datetime import datetime, timezone
import pygsheets
from sheet2linkml.model import ModelElement
from sheet2linkml.source.gsheetmodel.mappings import Mappings
from sheet2linkml.source.gsheetmodel.entity import Entity, EntityWorksheet, Attribute
from sheet2linkml.source.gsheetmodel.enum import EnumWorksheet, Enum
from sheet2linkml.source.gsheetmodel.datatype import Datatype, DatatypeWorksheet
from sheet2linkml.terminologies.service import TerminologyService
from linkml_runtime.linkml_model.meta import SchemaDefinition
class GSheetModel(ModelElement):
"""
A GSheetModel represents a single, coherent model represented as a series of worksheets
in Google Sheets. This representation is currently being developed by all the CCDH workstreams,
and so it needs to be flexible enough to be modified extensively on the fly.
If we ever extend sheet2linkml beyond Google Sheets, we should create a top-level Model
class that generically represents a single, coherent model, and then make GSheetModel a
subclass of that. But that's for another day.
"""
""" The Google API scopes that are necessary to query this sheet. """
SCOPES = [
"https://www.googleapis.com/auth/spreadsheets.readonly",
"https://www.googleapis.com/auth/drive.metadata.readonly",
]
def __init__(self, google_sheet_client, google_sheet_id: str):
"""
Create a new Google Sheet Model. This will create a model that uses the specified
Google Sheet as an input.
Note that creating a GSheetModel will kick off the Google Sheets authentication
process, which will ask you to visit a website, log in with a Google account, and
then to enter the provided code into this Python script. See
https://pygsheets.readthedocs.io/en/stable/authorization.html for information on
creating and downloading OAuth2 credentials from the Google Developers Console.
Once you authenticate yourself, this script will create a file with your
authentication token in your working directory, so you will not need to log in
again from the same working directory.
:param google_sheet_oath2_credentials: A file containing OAuth2 credential files.
:param google_sheet_id: The Google Sheet ID containing the model.
"""
self.client = google_sheet_client
self.sheet = self.client.open_by_key(google_sheet_id)
# TODO: at some point, we should read the version number from the Google Sheets document... somehow.
self.release_version = None
# Set a `development version`. This is initially None, but if set, we add this to the metadata we emit.
self.development_version = None
# Set the 'last_updated' time to now.
# TODO: we should be able to get this from the Google Sheet, in RFC 3339 format, but this apparently
# requires a different scope than what we currently use.
# return self.sheet.updated
self.last_updated = datetime.now(timezone.utc).isoformat()
# Set the terminology_service to None, indicating that terminology services shouldn't be used.
self.terminology_service = None
@property
def version(self):
"""
Return the version of this GSheetModel.
"""
# TODO: We should read this from the Google Sheet.
if self.release_version:
return self.release_version
else:
return self.development_version
@staticmethod
def is_sheet_normative(worksheet: pygsheets.worksheet):
"""
Check if a sheet is an informative sheet that does not contain any model information.
There are three types of such sheets:
- O_* tabs are useful tabs that have other non Entity content (exclude)
- X_* tabs are tabs that might be deletable (exclude)
- R_* tabs can be created to hold some sort of reference material that should not be changed. (exclude)
"""
result = not (
worksheet.title.startswith("O_")
or worksheet.title.startswith("X_")
or worksheet.title.startswith("R_")
)
return result
def entity_worksheets(self) -> List[EntityWorksheet]:
"""
A list of worksheets available in this model.
We identify a worksheet containing entities based on its column header.
:return: A list of entities available in this model.
"""
# Identify entity worksheets among the list of all worksheets in this Google Sheets document.
worksheets = self.sheet.worksheets()
tests_and_errors = {
"excluded by sheet type": GSheetModel.is_sheet_normative,
"not an entity worksheet": EntityWorksheet.is_sheet_entity,
}
entity_worksheets = list()
for worksheet in worksheets:
flag_skip = False
for test_name, error in tests_and_errors.items():
if not error(worksheet):
logging.debug(f"Skipping worksheet {worksheet.title}: {test_name}")
flag_skip = True
break
if not flag_skip:
entity_worksheets.append(worksheet)
return [
EntityWorksheet(
self, worksheet, terminology_service=self.terminology_service
)
for worksheet in entity_worksheets
]
def entities(self) -> List[Entity]:
"""
:return: The list of entities in this model.
"""
result = []
for worksheet in self.entity_worksheets():
result.extend(worksheet.entities)
return result
def datatype_worksheets(self) -> List[DatatypeWorksheet]:
"""
A list of datatype worksheets available in this model.
We only have a single datatype worksheet: 'Primitives'. So we just return that.
:return: A list of datatype worksheets available in this model.
"""
return [DatatypeWorksheet(self, self.sheet.worksheet("title", "Primitives"))]
def datatypes(self) -> List[Datatype]:
"""
:return: The list of Datatypes in this model.
"""
result = []
for worksheet in self.datatype_worksheets():
result.extend(worksheet.datatypes)
return result
def enum_worksheets(self) -> List[EnumWorksheet]:
"""
A list of enum worksheets available in this model.
We only have a single enum worksheet: 'O_CCDH Enums'. So we just return that.
"""
return [EnumWorksheet(self, self.sheet.worksheet("title", "O_CCDH Enums"))]
def enums_from_worksheets(self) -> List[Enum]:
"""
A list of enums available from worksheets in this model.
"""
result = []
for worksheet in self.enum_worksheets():
result.extend(worksheet.enums)
return result
@property
def mappings(self) -> List[Mappings.Mapping]:
"""Return a list of all the mappings in this LinkML document."""
mappings = [
mapping
for datatype in self.datatypes()
for mapping in datatype.mappings.mappings
]
mappings.extend(
mapping
for entity in self.entities()
for mapping in entity.mappings_including_attributes
)
mappings.extend(
mapping
for enum in self.enums_from_worksheets()
for mapping in enum.mappings_including_values
)
return mappings
def __str__(self) -> str:
"""
:return: A string representation of this Google Sheet model.
"""
return f'{self.__class__.__name__} with an underlying Google Sheet titled "{self.sheet.title}" containing {len(self.sheet.worksheets())} worksheets'
@property
def name(self) -> str:
"""
:return: The name of this model.
"""
return self.sheet.title
@property
def full_name(self) -> str:
"""
:return: The full name of this model.
"""
return self.sheet.url
def get_filename(self) -> str:
"""
Return this Google Sheet model as a filename, which we calculate by making the Google Sheet title filesystem-safe.
:return: A filename that could be used for this model.
"""
# Taken from https://stackoverflow.com/a/46801075/27310
filename = str(self.sheet.title).strip().replace(" ", "_")
return re.sub(r"(?u)[^-\w.]", "", filename)
def to_markdown(self) -> str:
"""
:return: A Markdown representation of this Google Sheet model.
"""
return f"[{self.sheet.title}]({self.sheet.url})"
def as_linkml(self, root_uri) -> SchemaDefinition:
"""
Return this Google Sheet model as a LinkML SchemaDefinition.
:param root_uri: The base URI to use for terms defined in this model.
:return: A LinkML SchemaDefinition for the model described by this Google Sheet.
"""
logging.info(f"Generating LinkML for {self}")
# Set up general metadata.
schema: SchemaDefinition = SchemaDefinition(name="CRDC-H", id=f"{root_uri}")
schema.prefixes = {
"linkml": "https://w3id.org/linkml/",
"crdch": f"{root_uri}/",
"NCIT": "http://purl.obolibrary.org/obo/NCIT_",
"GDC": "http://example.org/gdc/",
"PDC": "http://example.org/pdc/",
"ICDC": "http://example.org/icdc/",
"HTAN": "http://example.org/htan/",
}
# TODO: See if we can get by without.
# schema.imports = ['datatypes', 'prefixes']
schema.imports = ["linkml:types"]
schema.default_prefix = "crdch"
schema.license = "https://creativecommons.org/publicdomain/zero/1.0/"
schema.notes.append(f"Derived from {self.to_markdown()}")
schema.generation_date = self.last_updated
schema.version = self.version
# Generate all the datatypes.
schema_types = {
datatype.name: datatype.as_linkml(root_uri) for datatype in self.datatypes()
}
# Generate all the entities.
schema_classes = {
entity.name: entity.as_linkml(root_uri) for entity in self.entities()
}
# Load enums from the attributes themselves -- this will look things up in the terminology service.
schema_enums = {
Enum.fix_enum_name(attribute.full_name): attribute.as_linkml_enum()
for entity in self.entities()
for attribute in entity.attributes
if attribute.as_linkml_enum() is not None
}
# Add enums from the enum worksheets in this Google Doc.
enum_worksheets = self.enum_worksheets()
for enum_worksheet in enum_worksheets:
for enum in enum_worksheet.enums:
schema_enums[enum.fixed_name] = enum.as_linkml(root_uri)
# At this point, classes might refer to types that haven't been defined
# yet. So, for fields that refer to other classes in this model, we need to
# go through and:
# - Warn the user about the missing type
# - Replace the type with 'Entity' for now.
valid_types = (
set(schema_types.keys())
.union(set(schema_classes.keys()))
.union(set(schema_enums.keys()))
)
def fix_type_name(entity, dct, prop):
logging.debug(f"fix_type_name({entity}, {dct}, {prop})")
value = dct[prop]
if value is not None and value not in valid_types:
logging.warning(
f"Entity {entity}'s {prop} refers to type {value}, which is not defined."
)
dct[prop] = "Entity"
for entity in schema_classes.values():
fix_type_name(entity.name, entity, "is_a")
for attrName in entity.attributes:
# Ignore attributes that start with `_`.
if not attrName.startswith("_"):
attr = entity.attributes[attrName]
fix_type_name(f"{entity.name}.{attrName}", attr, "range")
# Write the lists to the schema
schema.types = schema_types
schema.classes = schema_classes
schema.enums = schema_enums
return schema
def use_terminology_service(self, terminology_service: TerminologyService):
"""
Activate terminology lookups using the specified base_url (e.g. https://terminology.ccdh.io/enumerations/CRDC-H.Specimen.analyte_type?value_only=false)
"""
self.terminology_service = terminology_service
|
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, PathPatch
from matplotlib.text import TextPath
from matplotlib.transforms import Affine2D
# This import registers the 3D projection, but is otherwise unused.
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
import mpl_toolkits.mplot3d.art3d as art3d
import numpy as np
xx, yy = np.meshgrid(np.linspace(0,15,16), np.linspace(0,15,16))
import cv2
img = cv2.imread('mitosisSmall2.png')
img = cv2.resize(img,dsize=(16,16))
img=img[:,:,1]
X = xx
Y = yy
Z = 0*np.ones(X.shape)
data = img
fig = plt.figure(figsize=(15,5))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, color='black', linewidth=1, facecolors=plt.cm.Greys(255-data), shade=False)
ax.grid(True)
for x in np.arange(0,15):
for y in np.arange(0,15):
print(x,y)
ax.text(x+0.5,y+0.5,0.1,'%.1f' % (data[int(x),int(y)]/255), fontweight='bold', fontsize=4, color=[1.0,0.3,1.0], zorder=5, horizontalalignment='center', verticalalignment='center')
#ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, facecolors=plt.cm.Greys(255-data), shade=False)
ax.view_init(elev=90., azim=84)
ax.set_zlim(0,5)
# draw grid
#for y in
#ax.imshow(img)
xx, yy = np.meshgrid(np.arange(0,5), np.arange(0,5))
filtr = np.array([[255,255, 255,255,255], [0,255, 255,255,255], [0,0,255,255,255], [0,0,0,255,255], [0,0,0,0,255]])
print(xx.shape,yy.shape,filtr.shape)
ax.set_axis_off()
#ax.plot_surface(xx-8, yy+18, 2*np.ones(xx.shape), linewidth=1, zorder=10, facecolors=plt.cm.Greys(filtr), shade=False)
xo_act = -8
yo_act = 12
for x in np.arange(0,5):
for y in np.arange(0,5):
print(x,y)
ax.text(x+xo_act+0.5,y+yo_act+0.5,2.1,'%.1f' % (filtr[int(x),int(y)]/255), fontweight='bold', fontsize=4, color='blue',horizontalalignment='center', verticalalignment='center')
#plt.savefig('conv2d.svg')
# draw grid
for k in range(16):
plt.plot(xs=[0,15],ys=[k,k], zs=[0,0],color='k', linewidth=0.2)
plt.plot(xs=[k,k],ys=[0,15], zs=[0,0],color='k', linewidth=0.2)
for k in [0,5]:
startx=7
starty=5
plt.plot(xs=[startx,startx+5],ys=[starty+k,starty+k], zs=[0.01,0.01],color=[1.0,0.3,1.0], linewidth=1.)
plt.plot(xs=[startx+k,startx+k],ys=[starty+0,starty+5], zs=[0.01,0.01],color=[1.0,0.3,1.0], linewidth=1.)
for k in range(6):
plt.plot(xs=[xo_act+0,xo_act+5],ys=[yo_act+k,yo_act+k], zs=[0,0],color='k', linewidth=0.2)
plt.plot(xs=[xo_act+k,xo_act+k],ys=[yo_act+0,yo_act+5], zs=[0,0],color='k', linewidth=0.2)
plt.plot(xs=[startx+5,xo_act+5], ys=[starty, yo_act],color=[1.0,0.3,1.0], linewidth=0.4, linestyle='--')
plt.plot(xs=[startx,xo_act], ys=[starty, yo_act],color=[1.0,0.3,1.0], linewidth=0.4, linestyle='--')
plt.plot(xs=[startx+5,xo_act+5], ys=[starty+5, yo_act+5],color=[1.0,0.3,1.0], linewidth=0.4, linestyle='--')
convresult = np.mean(img[starty:starty+5, startx:startx+5] * filtr / 255 / 255)
print('Conv result:',convresult)
xo_act2 = -25
yo_act2 = 12
ax.text(startx+xo_act2+0.5,starty+yo_act2+0.5, z=0.0, s='%.1f' % convresult,fontweight='bold', fontsize=4, color='blue',horizontalalignment='center', verticalalignment='center')
#plt.plot(xs=[xo_act2+1+startx,xo_act+5], ys=[starty+yo_act2, yo_act],color=[1.0,0.3,1.0], linewidth=0.4, linestyle='--')
plt.plot(xs=[xo_act2+startx,xo_act], ys=[starty+yo_act2, yo_act],color='b', linewidth=0.4, linestyle='--')
plt.plot(xs=[xo_act2+startx+1,xo_act+5], ys=[starty+yo_act2+1, yo_act+5],color='b', linewidth=0.4, linestyle='--')
for k in [0,5]:
plt.plot(xs=[xo_act,xo_act+5],ys=[yo_act+k,yo_act+k], zs=[0.01,0.01],color='b', linewidth=1.)
plt.plot(xs=[xo_act+k,xo_act+k],ys=[yo_act+0,yo_act+5], zs=[0.01,0.01],color='b', linewidth=1.)
xo_act3=xo_act2+startx
yo_act3=yo_act2+starty
for k in [0,1]:
plt.plot(xs=[xo_act3,xo_act3+1],ys=[yo_act3+k,yo_act3+k], zs=[0.01,0.01],color='b', linewidth=1.)
plt.plot(xs=[xo_act3+k,xo_act3+k],ys=[yo_act3+0,yo_act3+1], zs=[0.01,0.01],color='b', linewidth=1.)
for k in range(12):
plt.plot(xs=[xo_act2+0,xo_act2+11],ys=[yo_act2+k,yo_act2+k], zs=[0,0],color='k', linewidth=0.2)
plt.plot(xs=[xo_act2+k,xo_act2+k],ys=[yo_act2+0,yo_act2+11], zs=[0,0],color='k', linewidth=0.2)
plt.tight_layout()
plt.savefig('conv2d.svg')
|
import pygame
from pygame import sprite
from settings import PROJECT_PATH, ENEMY_DEFAULT_SIZE, BRUTALISK_POINTS
class Brutalisk(sprite.Sprite):
def __init__(self, x_pos, y_pos):
sprite.Sprite.__init__(self)
self.image = pygame.transform.scale(pygame.image.load(PROJECT_PATH +
"/brutalisk/images/enemy1_1.png")
, ENEMY_DEFAULT_SIZE)
self.rect = self.image.get_rect(topleft=(x_pos, y_pos))
self.points = BRUTALISK_POINTS
def update(self):
pass
|
import utils.import_envs # noqa: F401 pylint: disable=unused-import
from contrastive_highlights.Interfaces.frogger_interface import FroggerInterface
from contrastive_highlights.Interfaces.gym_interface import GymInterface
def get_agent(args):
"""Implement here for specific agent and environment loading scheme"""
if args.agent_type == "gym":
interface = GymInterface(args.config, args.output_dir)
# elif args.agent_type == "frogger":
else:
interface = FroggerInterface(args.agent_config, args.output_dir, args.n_traces)
env, agent = interface.initiate()
agent.interface = interface
env.seed(0)
return env, agent
|
import numpy
import logging
from blocks.bricks import Brick
from blocks.select import Selector
logger = logging.getLogger(__name__)
def save_params(bricks, path):
"""Save bricks parameters.
Saves parameters with their pathes into an .npz file.
Parameters
----------
bricks : Brick or Selector
The bricks.
path : str of file
Destination for saving.
"""
if isinstance(bricks, Brick):
bricks = Selector([bricks])
assert isinstance(bricks, Selector)
params = bricks.get_params()
# numpy.savez is vulnerable to slashes in names
param_values = {name.replace("/", "-"): param.get_value()
for name, param in params.items()}
numpy.savez(path, **param_values)
def load_params(bricks, path):
"""Load brick parameters.
Loads parameters from .npz file where they are saved with their pathes.
Parameters
----------
bricks : Brick or Selector
The bricks.
path : str or file
Source for loading.
"""
if isinstance(bricks, Brick):
bricks = Selector([bricks])
assert isinstance(bricks, Selector)
param_values = {name.replace("-", "/"): value
for name, value in numpy.load(path).items()}
for name, value in param_values.items():
selected = bricks.select(name)
if len(selected) == 0:
logger.error("Unknown parameter {}".format(name))
assert len(selected) == 1
selected = selected[0]
assert selected.get_value(
borrow=True, return_internal_type=True).shape == value.shape
selected.set_value(value)
params = bricks.get_params()
for name in params.keys():
if name not in param_values:
logger.error("No value is provided for the parameter {}"
.format(name))
|
"""
56.19%
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isSubtree(self, s, t):
"""
:type s: TreeNode
:type t: TreeNode
:rtype: bool
"""
if self.isMatch(s, t):
return True
if not s:
return False
return self.isSubtree(s.left, t) or self.isSubtree(s.right, t)
def isMatch(self, s, t):
if not (s and t):
return s is t
return s.val == t.val and self.isMatch(s.left, t.left) and self.isMatch(s.right, t.right) |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_utils import timeutils
from zun.common import context
from zun import objects
from zun.scheduler.filters import compute_filter
from zun.tests import base
from zun.tests.unit.scheduler import fakes
@mock.patch('zun.api.servicegroup.ServiceGroup.service_is_up')
class TestComputeFilter(base.TestCase):
def setUp(self):
super(TestComputeFilter, self).setUp()
self.context = context.RequestContext('fake_user', 'fake_project')
def test_compute_filter_manual_disable(self, service_up_mock):
filt_cls = compute_filter.ComputeFilter()
container = objects.Container(self.context)
extra_spec = {}
service = objects.ZunService(self.context)
service.disabled = True
service.disabled_reason = 'This is a reason!'
host = fakes.FakeHostState('host1',
{'service': service})
self.assertFalse(filt_cls.host_passes(host, container,
extra_spec))
self.assertFalse(service_up_mock.called)
def test_compute_filter_sgapi_passes(self, service_up_mock):
filt_cls = compute_filter.ComputeFilter()
container = objects.Container(self.context)
service = objects.ZunService(self.context)
service.disabled = False
extra_spec = {}
host = fakes.FakeHostState('host2',
{'service': service})
service_up_mock.return_value = True
self.assertTrue(filt_cls.host_passes(host, container,
extra_spec))
service_up_mock.assert_called_once_with(service)
def test_compute_filter_sgapi_fails(self, service_up_mock):
filts_cls = compute_filter.ComputeFilter()
container = objects.Container(self.context)
service = objects.ZunService(self.context)
service.disabled = False
service.updated_at = timeutils.utcnow()
extra_spec = {}
host = fakes.FakeHostState('host3',
{'service': service})
service_up_mock.return_value = False
self.assertFalse(filts_cls.host_passes(host, container,
extra_spec))
service_up_mock.assert_called_once_with(service)
|
from __future__ import division
from typing import Optional, Union
import numpy as np # type: ignore
import cupy as cp # type: ignore
from gepapy.operations import Operations
class Single_Machine(Operations):
"""Single_Machine."""
def __init__(
self,
processing_time: Optional[Union[list, np.ndarray, cp.core.core.ndarray]],
due_date: Optional[Union[list, np.ndarray, cp.core.core.ndarray]],
weights: Optional[Union[list, np.ndarray, cp.core.core.ndarray]],
n_samples: int,
n_jobs: int,
percent_cross: float = 0.5,
percent_intra_cross: float = 0.5,
percent_mutation: float = 0.5,
percent_intra_mutation: float = 0.1,
percent_migration: float = 0.5,
percent_selection: float = 0.1,
fitness_type: str = "E_Lw",
):
"""__init__.
:param processing_time:
:type processing_time: Optional[Union[list, np.ndarray, cp.core.core.ndarray]]
:param due_date:
:type due_date: Optional[Union[list, np.ndarray, cp.core.core.ndarray]]
:param weights:
:type weights: Optional[Union[list, np.ndarray, cp.core.core.ndarray]]
:param n_samples:
:type n_samples: int
:param n_jobs:
:type n_jobs: int
:param percent_cross:
:type percent_cross: float
:param percent_intra_cross:
:type percent_intra_cross: float
:param percent_mutation:
:type percent_mutation: float
:param percent_intra_mutation:
:type percent_intra_mutation: float
:param percent_migration:
:type percent_migration: float
:param percent_selection:
:type percent_selection: float
:param fitness_type:
:type fitness_type: str
"""
self._initialized = False
self._n_samples = self.set_n_samples(n_samples)
self._n_jobs = self._set_n_jobs(n_jobs)
self._n_machines = 1
self._n_operations = 1
self._fitness_type = self.set_fitness_type(fitness_type)
self._processing_time = cp.expand_dims(
self._set_processing_time(processing_time), axis=1
)
self._machine_sequence = cp.expand_dims(
cp.zeros(n_jobs, dtype=cp.float32), axis=1
)
self._due_date = self._set_due_date(due_date)
self._weights = self._set_weights(weights)
self._percent_cross = self._set_percent_cross(percent_cross)
self._percent_intra_cross = self.set_percent_intra_cross(percent_intra_cross)
self._percent_mutation = self._set_percent_mutation(percent_mutation)
self._percent_intra_mutation = self.set_percent_intra_mutation(
percent_intra_mutation
)
self._percent_migration = self._set_percent_migration(percent_migration)
self._percent_selection = self._set_percent_selection(percent_selection)
self._fitness = cp.array([], dtype=cp.float32)
self._population = cp.array([], dtype=cp.float32)
self._population = self.set_population()
self._initialized = True
|
import glypy
from glypy.structure import constants, substituent, glycan
from glypy.structure import link, named_structures, structure_composition
from glypy.io import glycoct, linear_code
from glypy.utils import StringIO, identity as ident_op, multimap, pickle, ET, enum
structures = {}
monosaccharides = glypy.monosaccharides
def emit(arg):
print(arg)
return arg
def load(name):
structure_composition.do_warn = False
res = glycoct.loads(structures[name])
structure_composition.do_warn = True
return res
structures["common_glycan"] = '''
RES
1b:b-dglc-HEX-1:5
2b:b-dgal-HEX-1:5
3b:b-dglc-HEX-1:5
4s:n-acetyl
5b:a-lgal-HEX-1:5|6:d
6b:b-dgal-HEX-1:5
7b:b-dglc-HEX-1:5
8s:n-acetyl
9b:a-lgal-HEX-1:5|6:d
10b:b-dgal-HEX-1:5
LIN
1:1o(4+1)2d
2:2o(3+1)3d
3:3d(2+1)4n
4:3o(3+1)5d
5:3o(4+1)6d
6:6o(3+1)7d
7:7d(2+1)8n
8:7o(3+1)9d
9:7o(4+1)10d'''
structures["branchy_glycan"] = '''
RES
1b:x-dglc-HEX-x:x
2s:n-acetyl
3b:b-dman-HEX-1:5
4b:a-dman-HEX-1:5
5b:b-dglc-HEX-1:5
6s:n-acetyl
7b:b-dgal-HEX-1:5
8b:b-dglc-HEX-1:5
9s:n-acetyl
10b:b-dgal-HEX-1:5
11b:a-dman-HEX-1:5
12b:b-dglc-HEX-1:5
13s:n-acetyl
14b:b-dgal-HEX-1:5
LIN
1:1d(2+1)2n
2:1o(4+1)3d
3:3o(3+1)11d
4:3o(6+1)4d
5:4o(2+1)8d
6:4o(6+1)5d
7:5d(2+1)6n
8:5o(4+1)7d
9:8d(2+1)9n
10:8o(4+1)10d
11:11o(2+1)12d
12:12d(2+1)13n
13:12o(4+1)14d'''
structures["broad_n_glycan"] = '''
RES
1b:b-dglc-HEX-1:5
2s:n-acetyl
3b:b-dglc-HEX-1:5
4s:n-acetyl
5b:b-dman-HEX-1:5
6b:a-dman-HEX-1:5
7b:b-dglc-HEX-1:5
8s:n-acetyl
9b:b-dgal-HEX-1:5
10b:a-lgal-HEX-1:5|6:d
11b:b-dglc-HEX-1:5
12s:n-acetyl
13b:b-dgal-HEX-1:5
14b:a-dman-HEX-1:5
15b:b-dglc-HEX-1:5
16s:n-acetyl
17b:b-dgal-HEX-1:5
18b:b-dglc-HEX-1:5
19s:n-acetyl
20b:b-dgal-HEX-1:5
LIN
1:1d(2+1)2n
2:1o(4+1)3d
3:3d(2+1)4n
4:3o(4+1)5d
5:5o(3+1)14d
6:5o(6+1)6d
7:6o(2+1)11d
8:6o(6+1)7d
9:7d(2+1)8n
10:7o(3+1)10d
11:7o(4+1)9d
12:11d(2+1)12n
13:11o(4+1)13d
14:14o(2+1)18d
15:14o(4+1)15d
16:15d(2+1)16n
17:15o(4+1)17d
18:18d(2+1)19n
19:18o(4+1)20d'''
structures["sulfated_glycan"] = '''
RES
1b:o-dgal-HEX-0:0|1:aldi
2b:b-dglc-HEX-1:5
3s:n-acetyl
4b:b-dgal-HEX-1:5
5b:b-dglc-HEX-1:5
6s:n-acetyl
7b:b-dgal-HEX-1:5
8b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d
9s:n-acetyl
10s:sulfate
11s:sulfate
12s:sulfate
LIN
1:1o(3+1)2d
2:2d(2+1)3n
3:2o(4+1)4d
4:4o(3+1)5d
5:5d(2+1)6n
6:5o(4+1)7d
7:7o(6+2)8d
8:8d(5+1)9n
9:5o(6+1)10n
10:4o(6+1)11n
11:2o(6+1)12n
'''
structures["complex_glycan"] = '''
RES
1b:x-dglc-HEX-1:5
2s:n-acetyl
3b:b-dglc-HEX-1:5
4s:n-acetyl
5b:b-dman-HEX-1:5
6b:a-dman-HEX-1:5
7b:b-dglc-HEX-1:5
8s:n-acetyl
9b:a-lgal-HEX-1:5|6:d
10b:b-dgal-HEX-1:5
11b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d
12s:n-glycolyl
13b:b-dglc-HEX-1:5
14s:n-acetyl
15b:b-dgal-HEX-1:5
16s:n-acetyl
17b:b-dglc-HEX-1:5
18s:n-acetyl
19b:a-dman-HEX-1:5
20b:b-dglc-HEX-1:5
21s:n-acetyl
22b:a-lgal-HEX-1:5|6:d
23b:b-dgal-HEX-1:5
24b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d
25s:n-glycolyl
26b:b-dglc-HEX-1:5
27s:n-acetyl
28b:a-lgal-HEX-1:5|6:d
29b:b-dgal-HEX-1:5
30b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d
31s:n-acetyl
32b:a-lgal-HEX-1:5|6:d
LIN
1:1d(2+1)2n
2:1o(4+1)3d
3:3d(2+1)4n
4:3o(4+1)5d
5:5o(3+1)6d
6:6o(2+1)7d
7:7d(2+1)8n
8:7o(3+1)9d
9:7o(4+1)10d
10:10o(3+2)11d
11:11d(5+1)12n
12:6o(4+1)13d
13:13d(2+1)14n
14:13o(4+1)15d
15:15d(2+1)16n
16:5o(4+1)17d
17:17d(2+1)18n
18:5o(6+1)19d
19:19o(2+1)20d
20:20d(2+1)21n
21:20o(3+1)22d
22:20o(4+1)23d
23:23o(3+2)24d
24:24d(5+1)25n
25:19o(6+1)26d
26:26d(2+1)27n
27:26o(3+1)28d
28:26o(4+1)29d
29:29o(3+2)30d
30:30d(5+1)31n
31:1o(6+1)32d
'''
structures["complex_glycan_with_ambiguous_link"] = '''
RES
1b:x-dglc-HEX-1:5
2s:n-acetyl
3b:b-dglc-HEX-1:5
4s:n-acetyl
5b:b-dman-HEX-1:5
6b:a-dman-HEX-1:5
7b:b-dglc-HEX-1:5
8s:n-acetyl
9b:a-lgal-HEX-1:5|6:d
10b:b-dgal-HEX-1:5
11b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d
12s:n-glycolyl
13b:b-dglc-HEX-1:5
14s:n-acetyl
15b:b-dgal-HEX-1:5
16s:n-acetyl
17b:b-dglc-HEX-1:5
18s:n-acetyl
19b:a-dman-HEX-1:5
20b:b-dglc-HEX-1:5
21s:n-acetyl
22b:a-lgal-HEX-1:5|6:d
23b:b-dgal-HEX-1:5
24b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d
25s:n-glycolyl
26b:b-dglc-HEX-1:5
27s:n-acetyl
28b:a-lgal-HEX-1:5|6:d
29b:b-dgal-HEX-1:5
30b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d
31s:n-acetyl
32b:a-lgal-HEX-1:5|6:d
LIN
1:1d(2+1)2n
2:1o(4+1)3d
3:3d(2+1)4n
4:3o(4+1)5d
5:5o(3+1)6d
6:6o(2+1)7d
7:7d(2+1)8n
8:7o(3+1)9d
9:7o(4+1)10d
10:10o(3+2)11d
11:11d(5+1)12n
12:6o(4+1)13d
13:13d(2+1)14n
14:13o(4+1)15d
15:15d(2+1)16n
16:5o(4+1)17d
17:17d(2+1)18n
18:5o(6+1)19d
19:19o(2+1)20d
20:20d(2+1)21n
21:20o(3+1)22d
22:20o(4+1)23d
23:23o(3+2)24d
24:24d(5+1)25n
25:19o(6+1)26d
26:26d(2+1)27n
27:26o(3+1)28d
28:26o(4+1)29d
29:29o(3+2|6)30d
30:30d(5+1)31n
31:1o(6+1)32d
'''
structures["cyclical_glycan"] = '''
RES
1b:a-dglc-HEX-1:5
2b:a-dglc-HEX-1:5
3b:a-dglc-HEX-1:5
4b:a-dglc-HEX-1:5
LIN
1:1o(3+1)2d
2:2o(6+1)3d
3:3o(3+1)4d
4:4o(6+1)1d
'''
structures["repeating_glycan"] = '''
RES
1b:a-dglc-HEX-1:5
2b:a-dglc-HEX-1:5
3r:r1
4b:b-dara-HEX-2:5|2:keto
LIN
1:1o(1+1)2d
2:2o(6+2)3n
3:3n(1+2)4d
REP
REP1:5o(1+2)5d=-1--1
RES
5b:b-dara-HEX-2:5|2:keto
'''
structures["big_repeating_glycan"] = '''
RES
1b:b-dglc-HEX-1:5
2s:n-acetyl
3b:b-dglc-HEX-1:5
4s:n-acetyl
5b:b-dman-HEX-1:5
6b:a-dman-HEX-1:5
7b:b-dglc-HEX-1:5
8s:n-acetyl
9b:b-dgal-HEX-1:5
10b:b-dglc-HEX-1:5
11s:n-acetyl
12b:b-dgal-HEX-1:5
13r:r1
14b:b-dglc-HEX-1:5
15s:n-acetyl
16b:b-dgal-HEX-1:5
17b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d
18s:n-acetyl
19b:a-dman-HEX-1:5
20b:b-dglc-HEX-1:5
21s:n-acetyl
22b:b-dgal-HEX-1:5
23r:r2
24b:b-dglc-HEX-1:5
25s:n-acetyl
26b:b-dgal-HEX-1:5
27b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d
28s:n-acetyl
29b:a-lgal-HEX-1:5|6:d
LIN
1:1d(2+1)2n
2:1o(4+1)3d
3:3d(2+1)4n
4:3o(4+1)5d
5:5o(3+1)6d
6:6o(2+1)7d
7:7d(2+1)8n
8:7o(4+1)9d
9:9o(3+1)10d
10:10d(2+1)11n
11:10o(4+1)12d
12:6o(4+1)13n
13:13n(3+1)14d
14:14d(2+1)15n
15:14o(4+1)16d
16:16o(3|6+2)17d
17:17d(5+1)18n
18:5o(6+1)19d
19:19o(2+1)20d
20:20d(2+1)21n
21:20o(4+1)22d
22:19o(6+1)23n
23:23n(3+1)24d
24:24d(2+1)25n
25:24o(4+1)26d
26:26o(3|6+2)27d
27:27d(5+1)28n
28:1o(6+1)29d
REP
REP1:32o(3+1)30d=-1--1
RES
30b:b-dglc-HEX-1:5
31s:n-acetyl
32b:b-dgal-HEX-1:5
33b:b-dglc-HEX-1:5
34s:n-acetyl
35b:a-dgal-HEX-1:5
36b:a-dgal-HEX-1:5
LIN
29:30d(2+1)31n
30:30o(4+1)32d
31:32o(6+1)33d
32:33d(2+1)34n
33:33o(4+1)35d
34:35o(3+1)36d
REP2:39o(3+1)37d=-1--1
RES
37b:b-dglc-HEX-1:5
38s:n-acetyl
39b:b-dgal-HEX-1:5
40b:b-dglc-HEX-1:5
41s:n-acetyl
42b:a-dgal-HEX-1:5
43b:a-dgal-HEX-1:5
LIN
35:37d(2+1)38n
36:37o(4+1)39d
37:39o(6+1)40d
38:40d(2+1)41n
39:40o(4+1)42d
40:42o(3+1)43d
'''
structures['G58143RL'] = '''
RES
1b:x-dglc-HEX-1:5
2s:n-acetyl
3b:b-dglc-HEX-1:5
4s:n-acetyl
5b:b-dman-HEX-1:5
6b:a-dman-HEX-1:5
7b:b-dglc-HEX-1:5
8s:n-acetyl
9b:b-dgal-HEX-1:5
10b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d
11s:n-acetyl
12b:a-dman-HEX-1:5
13b:b-dglc-HEX-1:5
14s:n-acetyl
15b:b-dgal-HEX-1:5
16b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d
17s:n-acetyl
18s:sulfate
19b:a-lgal-HEX-1:5|6:d
LIN
1:1d(2+1)2n
2:1o(4+1)3d
3:3d(2+1)4n
4:3o(4+1)5d
5:5o(3+1)6d
6:6o(2+1)7d
7:7d(2+1)8n
8:7o(4+1)9d
9:9o(6+2)10d
10:10d(5+1)11n
11:5o(6+1)12d
12:12o(2+1)13d
13:13d(2+1)14n
14:13o(4+1)15d
15:15o(6+2)16d
16:16d(5+1)17n
17:13o(6+1)18n
18:1o(6+1)19d'''
structures['G82388RB'] = '''RES
1b:b-dglc-HEX-1:5
2s:n-acetyl
3b:b-dglc-HEX-1:5
4s:n-acetyl
5b:b-dman-HEX-1:5
6b:a-dman-HEX-1:5
7b:b-dglc-HEX-1:5
8s:n-acetyl
9b:b-dgal-HEX-1:5
10b:a-dgal-HEX-1:5
11b:a-dman-HEX-1:5
12b:b-dglc-HEX-1:5
13s:n-acetyl
14b:b-dgal-HEX-1:5
15b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d
16s:n-acetyl
17b:x-lgal-HEX-1:5|6:d
LIN
1:1d(2+1)2n
2:1o(4+1)3d
3:3d(2+1)4n
4:3o(4+1)5d
5:5o(-1+1)6d
6:6o(2+1)7d
7:7d(2+1)8n
8:7o(4+1)9d
9:9o(3+1)10d
10:5o(-1+1)11d
11:11o(2+1)12d
12:12d(2+1)13n
13:12o(4+1)14d
14:14o(3+2)15d
15:15d(5+1)16n
16:1o(6+1)17d'''
structures['G28839WC'] = '''RES
1b:b-dglc-HEX-1:5
2s:n-acetyl
3b:b-dglc-HEX-1:5
4s:n-acetyl
5b:b-dman-HEX-1:5
6b:a-dman-HEX-1:5
7b:b-dglc-HEX-1:5
8s:n-acetyl
9b:a-dman-HEX-1:5
10b:b-dglc-HEX-1:5
11b:b-dgal-HEX-1:5
12b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d
13s:n-acetyl
14s:n-acetyl
LIN
1:1d(2+1)2n
2:1o(4+1)3d
3:3d(2+1)4n
4:3o(4+1)5d
5:5o(-1+1)6d
6:6o(-1+1)7d
7:7d(2+1)8n
8:5o(-1+1)9d
9:9o(-1+1)10d
10:10o(-1+1)11d
11:11o(-1+2)12d
12:12d(5+1)13n
13:10d(2+1)14n'''
structures['G37369XO'] = '''RES
1b:b-dglc-HEX-1:5
2s:n-acetyl
3b:b-dglc-HEX-1:5
4s:n-acetyl
5b:b-dman-HEX-1:5
6b:a-dman-HEX-1:5
7b:a-dman-HEX-1:5
8b:a-dman-HEX-1:5
9b:a-dman-HEX-1:5
10b:b-dglc-HEX-1:5
11s:n-acetyl
LIN
1:1d(2+1)2n
2:1o(4+1)3d
3:3d(2+1)4n
4:3o(4+1)5d
5:5o(-1+1)6d
6:6o(-1+1)7d
7:6o(-1+1)8d
8:5o(-1+1)9d
9:9o(-1+1)10d
10:10d(2+1)11n
'''
structures['G27293OK'] = '''RES
1b:b-dglc-HEX-1:5
2s:n-acetyl
3b:b-dglc-HEX-1:5
4s:n-acetyl
5b:b-dman-HEX-1:5
6b:a-dman-HEX-1:5
7b:b-dglc-HEX-1:5
8s:n-acetyl
9b:a-dman-HEX-1:5
10b:b-dglc-HEX-1:5
11s:n-acetyl
12b:a-lgal-HEX-1:5|6:d
13b:b-dgal-HEX-1:5
14b:a-lgal-HEX-1:5|6:d
LIN
1:1d(2+1)2n
2:1o(4+1)3d
3:3d(2+1)4n
4:3o(4+1)5d
5:5o(3+1)6d
6:5o(4+1)7d
7:7d(2+1)8n
8:5o(6+1)9d
9:9o(2+1)10d
10:10d(2+1)11n
11:10o(3+1)12d
12:10o(4+1)13d
13:1o(6+1)14d'''
structures['G36221RT'] = '''
RES
1b:b-dglc-HEX-1:5
2s:n-acetyl
3b:b-dglc-HEX-1:5
4s:n-acetyl
5b:b-dman-HEX-1:5
6b:a-dman-HEX-1:5
7b:b-dglc-HEX-1:5
8s:n-acetyl
9b:a-lgal-HEX-1:5|6:d
10b:b-dgal-HEX-1:5
11b:a-dman-HEX-1:5
12b:b-dglc-HEX-1:5
13s:n-acetyl
14b:a-lgal-HEX-1:5|6:d
15b:b-dgal-HEX-1:5
16s:n-acetyl
17b:a-lgal-HEX-1:5|6:d
LIN
1:1d(2+1)2n
2:1o(4+1)3d
3:3d(2+1)4n
4:3o(4+1)5d
5:5o(-1+1)6d
6:6o(2+1)7d
7:7d(2+1)8n
8:7o(3+1)9d
9:7o(4+1)10d
10:5o(-1+1)11d
11:11o(2+1)12d
12:12d(2+1)13n
13:12o(3+1)14d
14:12o(4+1)15d
15:15d(2+1)16n
16:1o(6+1)17d
'''
structures['G62831KM'] = '''RES
1b:b-dglc-HEX-1:5
2b:x-lgal-HEX-1:5|6:d
3s:n-acetyl
4b:b-dglc-HEX-1:5
5s:n-acetyl
6b:b-dman-HEX-1:5
7b:a-dman-HEX-1:5
8b:x-dglc-HEX-1:5
9b:x-dgal-HEX-1:5
10s:n-acetyl
11b:a-dman-HEX-1:5
12b:x-dglc-HEX-1:5
13b:x-dgal-HEX-1:5
14b:x-dgro-dgal-NON-2:6|1:a|2:keto|3:d
15s:n-acetyl
16s:n-acetyl
LIN
1:1o(-1+1)2d
2:1d(2+1)3n
3:1o(4+1)4d
4:4d(2+1)5n
5:4o(4+1)6d
6:6o(-1+1)7d
7:7o(-1+1)8d
8:8o(-1+1)9d
9:8d(2+1)10n
10:6o(-1+1)11d
11:11o(-1+1)12d
12:12o(-1+1)13d
13:13o(-1+2)14d
14:14d(5+1)15n
15:12d(2+1)16n'''
structures['G65832JS'] = '''RES
1b:b-dglc-HEX-1:5
2s:n-acetyl
3b:b-dglc-HEX-1:5
4s:n-acetyl
5b:b-dman-HEX-1:5
6b:a-dman-HEX-1:5
7b:b-dglc-HEX-1:5
8s:n-acetyl
9b:b-dgal-HEX-1:5
10b:a-dman-HEX-1:5
11b:b-dglc-HEX-1:5
12s:n-acetyl
13b:b-dgal-HEX-1:5
14b:b-dglc-HEX-1:5
15s:n-acetyl
16b:b-dgal-HEX-1:5
17b:b-dglc-HEX-1:5
18s:n-acetyl
19b:b-dgal-HEX-1:5
20b:a-lgal-HEX-1:5|6:d
LIN
1:1d(2+1)2n
2:1o(4+1)3d
3:3d(2+1)4n
4:3o(4+1)5d
5:5o(3+1)6d
6:6o(2+1)7d
7:7d(2+1)8n
8:7o(4+1)9d
9:5o(6+1)10d
10:10o(2+1)11d
11:11d(2+1)12n
12:11o(4+1)13d
13:13o(3+1)14d
14:14d(2+1)15n
15:14o(4+1)16d
16:10o(6+1)17d
17:17d(2+1)18n
18:17o(4+1)19d
19:1o(6+1)20d'''
structures['G07337US'] = '''RES
1b:b-dglc-HEX-1:5
2s:n-acetyl
3b:b-dglc-HEX-1:5
4s:n-acetyl
5b:b-dman-HEX-1:5
6b:a-dman-HEX-1:5
7b:b-dglc-HEX-1:5
8s:n-acetyl
9b:b-dgal-HEX-1:5
10b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d
11s:n-acetyl
12b:a-dman-HEX-1:5
13b:b-dglc-HEX-1:5
14s:n-acetyl
15b:a-lgal-HEX-1:5|6:d
16b:b-dgal-HEX-1:5
17b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d
18s:n-acetyl
19b:a-lgal-HEX-1:5|6:d
LIN
1:1d(2+1)2n
2:1o(4+1)3d
3:3d(2+1)4n
4:3o(4+1)5d
5:5o(3+1)6d
6:6o(2+1)7d
7:7d(2+1)8n
8:7o(4+1)9d
9:9o(3+2)10d
10:10d(5+1)11n
11:5o(6+1)12d
12:12o(2+1)13d
13:13d(2+1)14n
14:13o(3+1)15d
15:13o(4+1)16d
16:16o(3+2)17d
17:17d(5+1)18n
18:1o(6+1)19d'''
structures['G01120GS'] = '''RES
1b:b-dglc-HEX-1:5
2s:n-acetyl
3b:b-dglc-HEX-1:5
4s:n-acetyl
5b:b-dman-HEX-1:5
6b:a-dman-HEX-1:5
7b:b-dglc-HEX-1:5
8s:n-acetyl
9b:b-dgal-HEX-1:5
10b:a-dman-HEX-1:5
11b:b-dglc-HEX-1:5
12s:n-acetyl
13b:b-dgal-HEX-1:5
14b:b-dglc-HEX-1:5
15s:n-acetyl
16b:b-dgal-HEX-1:5
17b:a-lgal-HEX-1:5|6:d
18b:a-dgal-HEX-1:5
19s:n-acetyl
LIN
1:1d(2+1)2n
2:1o(4+1)3d
3:3d(2+1)4n
4:3o(4+1)5d
5:5o(3+1)6d
6:6o(2+1)7d
7:7d(2+1)8n
8:7o(4+1)9d
9:5o(6+1)10d
10:10o(-1+1)11d
11:11d(2+1)12n
12:11o(4+1)13d
13:10o(-1+1)14d
14:14d(2+1)15n
15:14o(4+1)16d
16:16o(2+1)17d
17:16o(3+1)18d
18:18d(2+1)19n'''
|
import setuptools
setuptools.setup(
name = "m-spacedog",
packages = setuptools.find_packages(where = "python"),
package_dir = {
"" : "python"
},
classifiers = (
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
),
install_requires = [
"numpy",
"scipy",
"openfermion",
"cirq",
"openfermioncirq"
]
)
|
# Generic/Built-in
import json
import logging
import os
# Owned
from cloudmesh.common.console import Console
from cloudmesh.configuration.Config import Config
from cloudmesh.storagelifecycle.StorageABC import StorageABC
# Other
import boto3
from botocore.exceptions import ClientError
class Provider(StorageABC):
'''
Encapsulates methods used to cost effectively manage the lifecycle
of objects stored in a cloud service provider (CSP) throughout the
objects' lifecycle.
'''
def __init__(self, service=None, config="~/.cloudmesh/cloudmesh.yaml"):
'''Initializes the provider. The default parameters are read
from the configuration file that is defined in yaml format.
:param name: The name of the provider as defined in the yaml file
:param configuration: The location of the yaml configuration file
'''
super().__init__(service=service, config=config)
# Load config values from cloudmesh.yaml
self.config = Config()
self.credentails = self.config["cloudmesh"]["storage"][service]["credentials"]
# Create client connection
self.s3_client = boto3.client(
's3',
aws_access_key_id = self.credentails["access_key_id"],
aws_secret_access_key = self.credentails["secret_access_key"],
region_name = self.credentails["region"]
)
# Basic lifecycle config template for setting expiry in days
self.lifecycle_rule = {
"Rules": [
{
"ID": "Object Store Expiration Rule",
"Filter": {
"Prefix": ""
},
"Status": "Enabled",
"Expiration": {
"Days": ""
}
}
]}
def put(self, storage_provider, storage_bucket_name, args):
'''Sets lifecycle configuration rules for your bucket. If a
lifecycle configuration exists, it replaces it.
:param storage_provider: Name of the cloud service provider
:param storage_bucket_name: Name of the storage bucket
:exception: Exception
:returns: Result of operation as string
'''
result = {}
try:
# Load config file
config_file = self.load_config_files(args)
# Invoke service
result = self.s3_client.put_bucket_lifecycle_configuration(
Bucket=storage_bucket_name,
LifecycleConfiguration=config_file)
# Debug
Console.ok(json.dumps(result, indent=4, sort_keys=True))
except ClientError as error:
Console.error(error, prefix=True, traceflag=True)
return result
def delete(self, storage_provider, storage_bucket_name):
'''Deletes the lifecycle configuration defined for a bucket.
:param storage_provider: Name of the cloud service provider
:param storage_bucket_name: Name of the storage bucket
:exception: Exception
:returns: Result of operation as string
'''
try:
# Invoke service
result = self.s3_client.delete_bucket_lifecycle(Bucket=storage_bucket_name)
# Debug
Console.ok(json.dumps(result, indent=4, sort_keys=True))
except ClientError as error:
Console.error(error, prefix=True, traceflag=True)
return False
return result
def get(self, storage_provider, storage_bucket_name):
'''Loads the lifecycle configuration defined for a bucket.
:param storage_provider: Name of the cloud service provider
:param storage_bucket_name: Name of the storage bucket
:exception: Exception
:returns: Result of operation as string
'''
try:
# Invoke service
result = self.s3_client.get_bucket_lifecycle_configuration(Bucket=storage_bucket_name)
# Debug
Console.ok(json.dumps(result, indent=4, sort_keys=True))
except ClientError as error:
if error.response['Error']['Code'] == 'NoSuchLifecycleConfiguration':
Console.warning(error.response['Error']['Code'])
return []
else:
# e.response['Error']['Code'] == 'NoSuchBucket', etc.
Console.error(error, prefix=True, traceflag=True)
return None
return result['Rules']
def load_config_files(self, config_file_args):
'''Loads config files based on user option (--expiry_in_days=NUM_DAYS | --lifecycle_config FILE) '''
# Initialize dict
config_file = {}
try:
if(config_file_args.expiry_in_days):
# Update lifecycle config template
self.lifecycle_rule['Rules'][0]['Expiration']['Days'] = int(config_file_args.expiry_in_days)
config_file = self.lifecycle_rule
else:
# Get full path to config file
config_file_uri = os.path.expanduser(config_file_args.lifecycle_config)
# Open file, read contents, convert string to dict, implicitly close file
with open(config_file_uri, 'r') as json_file:
config_file = json.load(json_file)
print(config_file)
except Exception as error:
Console.error(error, prefix=True, traceflag=True)
return config_file |
import requests
import json
import base64
import random
from Sakurajima.models import (
Anime,
RecommendationEntry,
Relation,
AniWatchEpisode,
Episode,
ChronicleEntry,
UserAnimeListEntry,
UserMedia,
UserOverview,
AniwatchStats,
Notification,
WatchListEntry,
Media,
)
from Sakurajima.utils.episode_list import EpisodeList
class Sakurajima:
def __init__(
self,
username=None,
userId=None,
authToken=None,
endpoint="https://aniwatch.me/api/ajax/APIHandle",
):
xsrf_token = self.__generate_xsrf_token()
self.userId = userId
self.headers = {
"x-xsrf-token": xsrf_token,
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36",
}
self.cookies = {"xsrf-token": xsrf_token}
self.API_URL = endpoint
if username is not None and userId is not None and authToken is not None:
self.headers["x-auth"] = authToken
session_token = (
'{"userid":'
+ str(userId)
+ ',"username":"'
+ str(username)
+ '","usergroup":4,"player_lang":1,"player_quality":0,"player_time_left_side":2,"player_time_right_side":3,"screen_orientation":1,"nsfw":1,"chrLogging":1,"mask_episode_info":0,"blur_thumbnails":0,"autoplay":1,"preview_thumbnails":1,"update_watchlist":1,"playheads":1,"seek_time":5,"cover":null,"title":"Member","premium":1,"lang":"en-US","auth":"'
+ str(authToken)
+ '","remember_login":true}'
)
self.cookies["SESSION"] = session_token
self.headers[
"COOKIE"
] = f"SESSION={session_token}; XSRF-TOKEN={xsrf_token};"
def __generate_xsrf_token(self):
characters = [
"a",
"b",
"c",
"d",
"e",
"f",
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"a",
"b",
"c",
"d",
"e",
"f",
]
return "".join(random.choice(characters) for i in range(32))
def __post(self, data):
with requests.post(
self.API_URL, headers=self.headers, json=data, cookies=self.cookies
) as url:
return json.loads(url.text)
def get_episode(self, episode_id, lang="en-US"):
data = {
"controller": "Anime",
"action": "watchAnime",
"lang": lang,
"ep_id": episode_id,
"hoster": "",
}
return AniWatchEpisode(self.__post(data), episode_id)
def get_episodes(self, anime_id):
data = {
"controller": "Anime",
"action": "getEpisodes",
"detail_id": str(anime_id),
}
return EpisodeList(
[
Episode(data_dict, self.headers, self.cookies, self.API_URL, anime_id)
for data_dict in self.__post(data)["episodes"]
]
)
def get_anime(self, anime_id):
data = {"controller": "Anime", "action": "getAnime", "detail_id": str(anime_id)}
return Anime(
self.__post(data)["anime"],
headers=self.headers,
cookies=self.cookies,
api_url=self.API_URL,
)
def get_recommendations(self, anime_id):
data = {
"controller": "Anime",
"action": "getRecommendations",
"detail_id": str(anime_id),
}
return [
RecommendationEntry(data_dict, self.headers, self.cookies, self.API_URL)
for data_dict in self.__post(data)["entries"]
]
def get_relation(self, relation_id):
data = {
"controller": "Relation",
"action": "getRelation",
"relation_id": relation_id,
}
return Relation(self.__post(data)["relation"])
def get_seasonal_anime(self, index="null", year="null"):
data = {
"controller": "Anime",
"action": "getSeasonalAnime",
"current_index": index,
"current_year": year,
}
return [
Anime(data_dict, self.headers, self.cookies, self.API_URL)
for data_dict in self.__post(data)["entries"]
]
def get_latest_releases(self):
data = {"controller": "Anime", "action": "getLatestReleases"}
return [
Anime(data_dict, self.headers, self.cookies, self.API_URL)
for data_dict in self.__post(data)["entries"]
]
def get_latest_uploads(self):
data = {"controller": "Anime", "action": "getLatestUploads"}
return [
Anime(data_dict, self.headers, self.cookies, self.API_URL)
for data_dict in self.__post(data)["entries"]
]
def get_latest_anime(self):
data = {"controller": "Anime", "action": "getLatestAnime"}
return [
Anime(data_dict, self.headers, self.cookies, self.API_URL)
for data_dict in self.__post(data)["entries"]
]
def get_random_anime(self):
data = {"controller": "Anime", "action": "getRandomAnime"}
return Anime(
self.__post(data)["entries"][0], self.headers, self.cookies, self.API_URL
)
def get_airing_anime(self, randomize=False):
data = {
"controller": "Anime",
"action": "getAiringAnime",
"randomize": randomize,
}
airing_anime_response = self.__post(data)["entries"]
airing_anime = {}
for day, animes in airing_anime_response.items():
airing_anime[day] = [
Anime(anime_dict, self.headers, self.cookies, self.API_URL)
for anime_dict in animes
]
return airing_anime
def get_popular_anime(self, page=1):
data = {"controller": "Anime", "action": "getPopularAnime", "page": page}
return [
Anime(data_dict, self.headers, self.cookies, self.API_URL)
for data_dict in self.__post(data)["entries"]
]
def get_popular_seasonal_anime(self, page=1):
data = {"controller": "Anime", "action": "getPopularSeasonals", "page": page}
return [
Anime(data_dict, self.headers, self.cookies, self.API_URL)
for data_dict in self.__post(data)["entries"]
]
def get_popular_upcoming_anime(self, page=1):
data = {"controller": "Anime", "action": "getPopularUpcomings", "page": page}
return [
Anime(data_dict, self.headers, self.cookies, self.API_URL)
for data_dict in self.__post(data)["entries"]
]
def get_hot_anime(self, page=1):
data = {"controller": "Anime", "action": "getHotAnime", "page": page}
return [
Anime(data_dict, self.headers, self.cookies, self.API_URL)
for data_dict in self.__post(data)["entries"]
]
def get_best_rated_anime(self, page=1):
data = {"controller": "Anime", "action": "getBestRatedAnime", "page": page}
return [
Anime(data_dict, self.headers, self.cookies, self.API_URL)
for data_dict in self.__post(data)["entries"]
]
def add_recommendation(self, anime_id, recommended_anime_id):
data = {
"controller": "Anime",
"action": "addRecommendation",
"detail_id": str(anime_id),
"recommendation": str(recommended_anime_id),
}
return self.__post(data)
def get_stats(self):
data = {"controller": "XML", "action": "getStatsData"}
return AniwatchStats(self.__post(data))
def get_user_overview(self):
data = {
"controller": "Profile",
"action": "getOverview",
"profile_id": str(self.userId),
}
return UserOverview(self.__post(data)["overview"])
def get_user_chronicle(self, page=1):
data = {
"controller": "Profile",
"action": "getChronicle",
"profile_id": str(self.userId),
"page": page,
}
return [
ChronicleEntry(data_dict, self.headers, self.cookies, self.API_URL)
for data_dict in self.__post(data)["chronicle"]
]
def get_user_anime_list(self):
data = {
"controller": "Profile",
"action": "getAnimelist",
"profile_id": str(self.userId),
}
return [
UserAnimeListEntry(data_dict, self.headers, self.cookies, self.API_URL)
for data_dict in self.__post(data)["animelist"]
]
def get_user_media(self, page=1):
data = {
"controller": "Profile",
"action": "getMedia",
"profile_id": str(self.userId),
"page": page,
}
return [
UserMedia(data_dict, self.headers, self.cookies, self.API_URL)
for data_dict in self.__post(data)["entries"]
]
def send_image_to_discord(self, episode_id, base64_image, episode_time):
data = {
"controller": "Profile",
"action": "sendToDiscord",
"file": base64_image,
"episode_id": int(episode_id),
"time": episode_time,
"lang": "en-US",
}
return self.__post(data)
def get_user_friends(self, page=1):
data = {"controller": "Profile", "action": "getFriends", "page": page}
return self.__post(data)
def add_friend(self, friend_user_id):
data = {
"controller": "Profile",
"action": "addFriend",
"profile_id": friend_user_id,
}
return self.__post(data)
def remove_friend(self, friend_id):
data = {
"controller": "Profile",
"action": "removeFriend",
"friend_id": friend_id,
}
return self.__post(data)
def withdraw_friend_request(self, friend_id):
data = {
"controller": "Profile",
"action": "withdrawRequest",
"friend_id": friend_id,
}
return self.__post(data)
def accept_friend_request(self, friend_id):
data = {
"controller": "Profile",
"action": "acceptRequest",
"friend_id": friend_id,
}
return self.__post(data)
def reject_friend_request(self, friend_id):
data = {
"controller": "Profile",
"action": "rejectRequest",
"friend_id": friend_id,
}
return self.__post(data)
def get_user_settings(self):
data = {"controller": "Profile", "action": "getSettings"}
return self.__post(data)
def get_notifications(self):
data = {"controller": "Profile", "action": "getNotifications"}
return [
Notification(data_dict, self.headers, self.cookies, self.API_URL)
for data_dict in self.__post(data)["notifications"]
]
def mark_all_notifications_as_read(self):
data = {
"controller": "Profile",
"action": "markAllNotificationsAsRead",
"view": 0,
}
return self.__post(data)
def delete_all_notifications(self):
data = {"controller": "Profile", "action": "deleteAllNotifications", "view": 0}
return self.__post(data)
def toggle_notification_seen(self, notification_id):
data = {
"controller": "Profile",
"action": "toggleNotificationSeen",
"id": notification_id,
}
return self.__post(data)
def delete_notification(self, notification_id):
data = {
"controller": "Profile",
"action": "deleteNotification",
"id": notification_id,
}
return self.__post(data)
def get_anime_chronicle(self, anime_id, page=1):
data = {
"controller": "Profile",
"action": "getChronicle",
"detail_id": str(anime_id),
"page": page,
}
return [
ChronicleEntry(data_dict, self.headers, self.cookies, self.API_URL)
for data_dict in self.__post(data)["chronicle"]
]
def remove_chronicle_entry(self, chronicle_id):
data = {
"controller": "Profile",
"action": "removeChronicleEntry",
"chronicle_id": chronicle_id,
}
return self.__post(data)
def get_discord_hash(self):
data = {"controller": "Profile", "action": "getDiscordHash"}
return self.__post(data)
def renew_discord_hash(self):
data = {"controller": "Profile", "action": "renewDiscordHash"}
return self.__post(data)
def remove_discord_verification(self):
data = {"controller": "Profile", "action": "removeDiscordVerification"}
return self.__post(data)
def get_unread_notifications(self):
data = {"controller": "Profile", "action": "getUnreadNotifications"}
return [
Notification(data_dict, self.headers, self.cookies, self.API_URL)
for data_dict in self.__post(data)["notifications"]
]
def toggle_mark_as_watched(self, anime_id, episode_id):
data = {
"controller": "Profile",
"action": "markAsWatched",
"detail_id": str(anime_id),
"episode_id": episode_id,
}
return self.__post(data)
def mark_as_completed(self, anime_id):
data = {
"controller": "Profile",
"action": "markAsCompleted",
"detail_id": str(anime_id),
}
return self.__post(data)
def mark_as_plan_to_watch(self, anime_id):
data = {
"controller": "Profile",
"action": "markAsPlannedToWatch",
"detail_id": str(anime_id),
}
return self.__post(data)
def mark_as_on_hold(self, anime_id):
data = {
"controller": "Profile",
"action": "markAsOnHold",
"detail_id": str(anime_id),
}
return self.__post(data)
def mark_as_dropped(self, anime_id):
data = {
"controller": "Profile",
"action": "markAsDropped",
"detail_id": str(anime_id),
}
return self.__post(data)
def mark_as_watching(self, anime_id):
data = {
"controller": "Profile",
"action": "markAsWatching",
"detail_id": str(anime_id),
}
return self.__post(data)
def remove_from_list(self, anime_id):
data = {
"controller": "Profile",
"action": "removeAnime",
"detail_id": str(anime_id),
}
return self.__post(data)
def favorite_media(self, media_id):
data = {"controller": "Media", "action": "favMedia", "media_id": str(media_id)}
return self.__post(data)
def rateAnime(self, anime_id, rating):
# Rate 0 to remove rating
data = {
"controller": "Profile",
"action": "rateAnime",
"detail_id": str(anime_id),
"rating": rating,
}
return self.__post(data)
def get_reports(self):
data = {"controller": "Profile", "action": "getReports"}
return self.__post(data)
def report_missing_anime(self, anime_name):
data = {
"controller": "Anime",
"action": "reportMissingAnime",
"anime_name": str(anime_name),
}
return self.__post(data)
def report_missing_streams(self, anime_id):
data = {
"controller": "Anime",
"action": "reportMissingStreams",
"detail_id": str(anime_id),
}
return self.__post(data)
def get_watchlist(self, page=1):
data = {
"controller": "Anime",
"action": "getWatchlist",
"detail_id": 0,
"page": page,
}
return [
WatchListEntry(data_dict, self.headers, self.cookies, self.API_URL)
for data_dict in self.__post(data)["entries"]
]
def login(self, username, password):
data = {
"username": username,
"password": base64.b64encode(bytes(password, "utf8")).decode("utf8"),
"code": "",
"controller": "Authentication",
"action": "doLogin",
}
return self.__post(data)
def forgot_password(self, email):
data = {
"controller": "Authentication",
"action": "doForgotPW",
"email": base64.b64encode(bytes(email, "utf8")).decode("utf8"),
}
return self.__post(data)
def search(self, query):
data = {
"controller": "Search",
"action": "search",
"rOrder": False,
"order": "title",
"typed": str(query),
"genre": "[]",
"staff": "[]",
"tags": [],
"langs": [],
"anyGenre": False,
"anyStaff": False,
"anyTag": False,
"animelist": [2],
"types": [0],
"status": [0],
"yearRange": [1965, 2022],
"maxEpisodes": 0,
"hasRelation": False,
}
return [
Anime(data_dict, self.headers, self.cookies, self.API_URL)
for data_dict in self.__post(data)
]
def get_media(self, anime_id):
data = {"controller": "Media", "action": "getMedia", "detail_id": str(anime_id)}
return Media(
self.__post(data), self.headers, self.cookies, self.API_URL, anime_id
)
|
#
# PySNMP MIB module CISCOSB-BRIDGE-SECURITY (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCOSB-BRIDGE-SECURITY
# Produced by pysmi-0.3.4 at Wed May 1 12:21:55 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion")
switch001, = mibBuilder.importSymbols("CISCOSB-MIB", "switch001")
InterfaceIndex, ifIndex = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex", "ifIndex")
VlanId, = mibBuilder.importSymbols("Q-BRIDGE-MIB", "VlanId")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
iso, Unsigned32, ModuleIdentity, IpAddress, Integer32, NotificationType, MibIdentifier, Gauge32, Counter32, Counter64, ObjectIdentity, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Unsigned32", "ModuleIdentity", "IpAddress", "Integer32", "NotificationType", "MibIdentifier", "Gauge32", "Counter32", "Counter64", "ObjectIdentity", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks")
RowStatus, DisplayString, MacAddress, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "MacAddress", "TextualConvention", "TruthValue")
rlBridgeSecurity = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112))
if mibBuilder.loadTexts: rlBridgeSecurity.setLastUpdated('200604020000Z')
if mibBuilder.loadTexts: rlBridgeSecurity.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: rlBridgeSecurity.setContactInfo('Postal: 170 West Tasman Drive San Jose , CA 95134-1706 USA Website: Cisco Small Business Support Community <http://www.cisco.com/go/smallbizsupport>')
if mibBuilder.loadTexts: rlBridgeSecurity.setDescription('The private MIB module definition for DHCP Snoop, ARP Inspection and Ip source Guard features.')
rlIpDhcpSnoop = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1))
rlIpSourceGuard = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2))
rlIpArpInspect = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3))
rlProtocolFiltering = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 4))
rlIpDhcpSnoopMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpDhcpSnoopMibVersion.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopMibVersion.setDescription("MIB's version, the current version is 1.")
rlIpDhcpSnoopEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopEnable.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopEnable.setDescription('Specifies a system DHCP Snoop enable state.')
rlIpDhcpSnoopFileEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopFileEnable.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopFileEnable.setDescription('Specifies a system DHCP Snoop file enable state.')
rlIpDhcpSnoopClearAction = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("noAction", 1), ("clearNow", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopClearAction.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopClearAction.setDescription('Used to clear DHCP Snoop Table.')
rlIpDhcpSnoopFileUpdateTime = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(600, 86400))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopFileUpdateTime.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopFileUpdateTime.setDescription('Configures in seconds the period of time between file updates. The valid range is 600 - 86400.')
rlIpDhcpSnoopVerifyMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopVerifyMacAddress.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopVerifyMacAddress.setDescription('Configures on an un-trusted port whether the source MAC address in a DHCP packet matches the client hardware address.')
rlIpDhcpSnoopCurrentEntiresNumber = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpDhcpSnoopCurrentEntiresNumber.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopCurrentEntiresNumber.setDescription('Contain the current number of DHCP snooping entries for all types.')
rlIpDhcpOpt82InsertionEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpOpt82InsertionEnable.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpOpt82InsertionEnable.setDescription('Specifies a DHCP option 82 insertion enable state.')
rlIpDhcpOpt82RxOnUntrustedEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpOpt82RxOnUntrustedEnable.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpOpt82RxOnUntrustedEnable.setDescription('Specifies a DHCP option 82 receive on untrusted port enable state.')
rlIpDhcpSnoopStaticTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 10), )
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticTable.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticTable.setDescription('The table specifies all DHCP Snoop Static (configured by user) entries. The entry contains a local IP address of the DHCP client, a Port interface to which a DHCP client is connected to the switch.')
rlIpDhcpSnoopStaticEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 10, 1), ).setIndexNames((0, "CISCOSB-BRIDGE-SECURITY", "rlIpDhcpSnoopStaticVLANTag"), (0, "CISCOSB-BRIDGE-SECURITY", "rlIpDhcpSnoopStaticMACAddress"))
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticEntry.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticEntry.setDescription('The row definition for this table.')
rlIpDhcpSnoopStaticVLANTag = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 10, 1, 1), VlanId())
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticVLANTag.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticVLANTag.setDescription('A DHCP Snoop Static entry vlan tag.')
rlIpDhcpSnoopStaticMACAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 10, 1, 2), MacAddress())
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticMACAddress.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticMACAddress.setDescription('A DHCP Snoop Static entry mac address')
rlIpDhcpSnoopStaticIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 10, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticIPAddress.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticIPAddress.setDescription('A DHCP Snoop Static entry IP address.')
rlIpDhcpSnoopStaticPortInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 10, 1, 4), InterfaceIndex()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticPortInterface.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticPortInterface.setDescription('A DHCP Snoop Static entry Port interface.')
rlIpDhcpSnoopStaticRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 10, 1, 5), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticRowStatus.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticRowStatus.setDescription('A status can be destroy, active or createAndGo')
class RlIpDhcpSnoopType(TextualConvention, Integer32):
description = 'Ip Dhcp Snoop entry type.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("learnedByProtocol", 1), ("deletedByTimeout", 2), ("static", 3))
rlIpDhcpSnoopTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 11), )
if mibBuilder.loadTexts: rlIpDhcpSnoopTable.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopTable.setDescription('DHCP Snoop entry. Use to add/delete a dynamic entries and to view all entries (dynamic and static)')
rlIpDhcpSnoopEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 11, 1), ).setIndexNames((0, "CISCOSB-BRIDGE-SECURITY", "rlIpDhcpSnoopVLANTag"), (0, "CISCOSB-BRIDGE-SECURITY", "rlIpDhcpSnoopMACAddress"))
if mibBuilder.loadTexts: rlIpDhcpSnoopEntry.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopEntry.setDescription('The row definition for this table.')
rlIpDhcpSnoopVLANTag = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 11, 1, 1), VlanId())
if mibBuilder.loadTexts: rlIpDhcpSnoopVLANTag.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopVLANTag.setDescription('A DHCP Snoop entry vlan tag.')
rlIpDhcpSnoopMACAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 11, 1, 2), MacAddress())
if mibBuilder.loadTexts: rlIpDhcpSnoopMACAddress.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopMACAddress.setDescription('A DHCP Snoop entry mac address')
rlIpDhcpSnoopType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 11, 1, 3), RlIpDhcpSnoopType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopType.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopType.setDescription('A DHCP Snoop entry type: static or dynamic.')
rlIpDhcpSnoopLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 11, 1, 4), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopLeaseTime.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopLeaseTime.setDescription('A DHCP Snoop lease time. For static entry the lease time is 0xFFFFFFFF')
rlIpDhcpSnoopIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 11, 1, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopIPAddress.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopIPAddress.setDescription('The IP address of the DHCP client referred to in this table entry.')
rlIpDhcpSnoopPortInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 11, 1, 6), InterfaceIndex()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopPortInterface.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopPortInterface.setDescription('Identifies the port Interface ifindex, which connected to DHCP client identified with the entry.')
rlIpDhcpSnoopRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 11, 1, 7), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopRowStatus.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopRowStatus.setDescription('Entry status. A valid status is CreateandGo or Delete.')
rlIpDhcpSnoopEnableVlanTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 12), )
if mibBuilder.loadTexts: rlIpDhcpSnoopEnableVlanTable.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopEnableVlanTable.setDescription('An Ip Dhcp Snooping enabled VLAN table.')
rlIpDhcpSnoopEnableVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 12, 1), ).setIndexNames((0, "CISCOSB-BRIDGE-SECURITY", "rlIpDhcpSnoopEnableVlanTag"))
if mibBuilder.loadTexts: rlIpDhcpSnoopEnableVlanEntry.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopEnableVlanEntry.setDescription('An Ip Dhcp Snooping enabled VLAN entry.')
rlIpDhcpSnoopEnableVlanTag = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 12, 1, 1), VlanId())
if mibBuilder.loadTexts: rlIpDhcpSnoopEnableVlanTag.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopEnableVlanTag.setDescription('A DHCP Snoop entry vlan tag.')
rlIpDhcpSnoopEnableVlanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 12, 1, 2), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopEnableVlanRowStatus.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopEnableVlanRowStatus.setDescription('Entry status. A valid status is CreateandGo and Delete.')
rlIpDhcpSnoopTrustedPortTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 13), )
if mibBuilder.loadTexts: rlIpDhcpSnoopTrustedPortTable.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopTrustedPortTable.setDescription('DHCP Snoop Trusted ports entry. The entry created when port is configured as trusted.')
rlIpDhcpSnoopTrustedPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 13, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: rlIpDhcpSnoopTrustedPortEntry.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopTrustedPortEntry.setDescription('The row definition for this table.')
rlIpDhcpSnoopTrustedPortRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 1, 13, 1, 2), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopTrustedPortRowStatus.setStatus('current')
if mibBuilder.loadTexts: rlIpDhcpSnoopTrustedPortRowStatus.setDescription('Entry status. A valid status is CreateandGo or Delete.')
rlIpSourceGuardMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpSourceGuardMibVersion.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardMibVersion.setDescription("MIB's version, the current version is 1.")
rlIpSourceGuardEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpSourceGuardEnable.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardEnable.setDescription('FALSE - There is no Ip Source Guard in the system. TRUE - Ip Source Guard is enabled on system.')
rlIpSourceGuardRetryToInsert = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("noAction", 0), ("retryToInsertNow", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpSourceGuardRetryToInsert.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardRetryToInsert.setDescription('When setted to retryToInsertNow all IP Source Guard inactive entries due to resource problem reinserted in the Policy. On get always return noAction.')
rlIpSourceGuardRetryTime = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpSourceGuardRetryTime.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardRetryTime.setDescription('Configures in seconds the period of time the application retries to insert inactive by resource problem rules. The actual range is 10-600. 0 used to sign that the timer is not active.')
rlIpSourceGuardPortTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 5), )
if mibBuilder.loadTexts: rlIpSourceGuardPortTable.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardPortTable.setDescription('IP Source Guard ports entry. The entry created when IP Source Guard enabled on port.')
rlIpSourceGuardPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 5, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: rlIpSourceGuardPortEntry.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardPortEntry.setDescription('The row definition for this table.')
rlIpSourceGuardPortRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 5, 1, 2), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpSourceGuardPortRowStatus.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardPortRowStatus.setDescription('Entry status. A valid status is CreateAndGo or Delete.')
class RlIpSourceGuardType(TextualConvention, Integer32):
description = 'Ip IP Source Guard entry type.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("dynamic", 1), ("static", 2))
class RlIpSourceGuardStatus(TextualConvention, Integer32):
description = 'Ip IP Source Guard entry status.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("active", 1), ("inactive", 2))
class RlIpSourceGuardFailReason(TextualConvention, Integer32):
description = 'Ip IP Source Guard entry reason.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("noProblem", 1), ("noResource", 2), ("noSnoopVlan", 3), ("trustPort", 4))
rlIpSourceGuardTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 6), )
if mibBuilder.loadTexts: rlIpSourceGuardTable.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardTable.setDescription('IP Source Guard entry. Use to view all entries (dynamic and static)')
rlIpSourceGuardEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 6, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCOSB-BRIDGE-SECURITY", "rlIpSourceGuardIPAddress"), (0, "CISCOSB-BRIDGE-SECURITY", "rlIpSourceGuardVLANTag"))
if mibBuilder.loadTexts: rlIpSourceGuardEntry.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardEntry.setDescription('The row definition for this table.')
rlIpSourceGuardIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 6, 1, 1), IpAddress())
if mibBuilder.loadTexts: rlIpSourceGuardIPAddress.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardIPAddress.setDescription('The IP address of the Ip Source Guard entry.')
rlIpSourceGuardVLANTag = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 6, 1, 2), VlanId())
if mibBuilder.loadTexts: rlIpSourceGuardVLANTag.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardVLANTag.setDescription('A Ip Source Guard entry vlan tag.')
rlIpSourceGuardMACAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 6, 1, 3), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpSourceGuardMACAddress.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardMACAddress.setDescription('A Ip Source Guard entry mac address')
rlIpSourceGuardType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 6, 1, 4), RlIpSourceGuardType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpSourceGuardType.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardType.setDescription('A Ip Source Guard entry type: static or dynamic.')
rlIpSourceGuardStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 6, 1, 5), RlIpSourceGuardStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpSourceGuardStatus.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardStatus.setDescription('Identifies the status of Ip Source Guard entry.')
rlIpSourceGuardFailReason = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 6, 1, 6), RlIpSourceGuardFailReason()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpSourceGuardFailReason.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardFailReason.setDescription('Identifies the reason for in-activity of Ip Source Guard entry.')
rlIpSourceGuardPermittedRuleCounterTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 7), )
if mibBuilder.loadTexts: rlIpSourceGuardPermittedRuleCounterTable.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardPermittedRuleCounterTable.setDescription('The table includes, per vlan, the IP Source Guard permitted rules counters.')
rlIpSourceGuardPermittedRuleCounterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 7, 1), ).setIndexNames((0, "CISCOSB-BRIDGE-SECURITY", "rlIpSourceGuardPermittedRuleCounterVLANTag"))
if mibBuilder.loadTexts: rlIpSourceGuardPermittedRuleCounterEntry.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardPermittedRuleCounterEntry.setDescription('The row definition for this table.')
rlIpSourceGuardPermittedRuleCounterVLANTag = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 7, 1, 1), VlanId())
if mibBuilder.loadTexts: rlIpSourceGuardPermittedRuleCounterVLANTag.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardPermittedRuleCounterVLANTag.setDescription('Ip Source Guard permitted rules counters entry Vlan tag.')
rlIpSourceGuardPermittedRuleCounterNumOfStaticRules = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 7, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpSourceGuardPermittedRuleCounterNumOfStaticRules.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardPermittedRuleCounterNumOfStaticRules.setDescription('Number of static rules added by IP Source Guard for the permitted Hosts')
rlIpSourceGuardPermittedRuleCounterNumOfDhcpRules = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 2, 7, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpSourceGuardPermittedRuleCounterNumOfDhcpRules.setStatus('current')
if mibBuilder.loadTexts: rlIpSourceGuardPermittedRuleCounterNumOfDhcpRules.setDescription('Number of rules added by IP Source Guard for the permitted Hosts, as a result of DHCP Snooping dynamic information.')
class RlIpArpInspectListNameType(DisplayString):
description = 'Ip arp inspection list name type.'
status = 'current'
subtypeSpec = DisplayString.subtypeSpec + ValueSizeConstraint(1, 32)
rlIpArpInspectMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpArpInspectMibVersion.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectMibVersion.setDescription("MIB's version, the current version is 1.")
rlIpArpInspectEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectEnable.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectEnable.setDescription('Specifies a system ARP Inspection enable state.')
rlIpArpInspectLogInterval = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 3), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectLogInterval.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectLogInterval.setDescription('Specify the minimal interval between successive ARP SYSLOG messages. 0 - message is immediately generated. 0xFFFFFFFF - messages would not be generated. A legal range is 0-86400.')
rlIpArpInspectValidation = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectValidation.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectValidation.setDescription('Defined a specific check on incoming ARP packets: Source MAC: Compare the source MAC address in the Ethernet header against the sender MAC address in the ARP body. This check is performed on both ARP requests and responses. Destination MAC: Compare the destination MAC address in the Ethernet header against the target MAC address in ARP body. This check is performed for ARP responses. IP addresses: Compare the ARP body for invalid and unexpected IP addresses. Addresses include 0.0.0.0, 255.255.255.255, and all IP multicast addresses.')
rlIpArpInspectListTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 5), )
if mibBuilder.loadTexts: rlIpArpInspectListTable.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectListTable.setDescription('The table specifies all ARP Inspection List entries. The entry contains a list name, list IP address, a list Mac address.')
rlIpArpInspectListEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 5, 1), ).setIndexNames((0, "CISCOSB-BRIDGE-SECURITY", "rlIpArpInspectListName"), (0, "CISCOSB-BRIDGE-SECURITY", "rlIpArpInspectListIPAddress"))
if mibBuilder.loadTexts: rlIpArpInspectListEntry.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectListEntry.setDescription('The row definition for this table.')
rlIpArpInspectListName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 5, 1, 1), RlIpArpInspectListNameType())
if mibBuilder.loadTexts: rlIpArpInspectListName.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectListName.setDescription('The Name of the Access List.')
rlIpArpInspectListIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 5, 1, 2), IpAddress())
if mibBuilder.loadTexts: rlIpArpInspectListIPAddress.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectListIPAddress.setDescription('ARP Inspection List IP address.')
rlIpArpInspectListMACAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 5, 1, 3), MacAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectListMACAddress.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectListMACAddress.setDescription('ARP Inspection List mac address')
rlIpArpInspectListRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 5, 1, 4), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectListRowStatus.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectListRowStatus.setDescription('A status can be destroy, active or createAndGo')
rlIpArpInspectEnableVlanTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 6), )
if mibBuilder.loadTexts: rlIpArpInspectEnableVlanTable.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectEnableVlanTable.setDescription('An Ip ARP Inspection enabled VLAN table.')
rlIpArpInspectEnableVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 6, 1), ).setIndexNames((0, "CISCOSB-BRIDGE-SECURITY", "rlIpArpInspectEnableVlanTag"))
if mibBuilder.loadTexts: rlIpArpInspectEnableVlanEntry.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectEnableVlanEntry.setDescription('An Ip ARP Inspection enabled VLAN entry.')
rlIpArpInspectEnableVlanTag = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 6, 1, 1), VlanId())
if mibBuilder.loadTexts: rlIpArpInspectEnableVlanTag.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectEnableVlanTag.setDescription('An Ip ARP Inspection entry vlan tag.')
rlIpArpInspectAssignedListName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 6, 1, 2), RlIpArpInspectListNameType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectAssignedListName.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectAssignedListName.setDescription('An Ip ARP Inspection assigned ACL name.')
rlIpArpInspectEnableVlanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 6, 1, 3), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectEnableVlanRowStatus.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectEnableVlanRowStatus.setDescription('Entry status. A valid status is CreateandGo and Delete.')
rlIpArpInspectVlanNumOfArpForwarded = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 6, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpArpInspectVlanNumOfArpForwarded.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectVlanNumOfArpForwarded.setDescription('Total number of forwarded ARP packets, packets which were validated by ARP inspection ')
rlIpArpInspectVlanNumOfArpDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 6, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpArpInspectVlanNumOfArpDropped.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectVlanNumOfArpDropped.setDescription('Number of dropped ARP packets, which were validated by ARP inspection (mismatch , not-found and dropped for any reason)')
rlIpArpInspectVlanNumOfArpMismatched = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 6, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpArpInspectVlanNumOfArpMismatched.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectVlanNumOfArpMismatched.setDescription('Number of dropped ARP packets, which were validated by ARP inspection and inconsistency was found for IP and MAC (mismatch)')
rlIpArpInspectVlanClearCountersAction = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 6, 1, 7), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectVlanClearCountersAction.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectVlanClearCountersAction.setDescription('If true, clear (set to zero) all Arp Inspection counters: rlIpArpInspectVlanNumOfArpForwarded , rlIpArpInspectVlanNumOfArpDropped and rlIpArpInspectVlanNumOfArpMismatched')
rlIpArpInspectTrustedPortTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 7), )
if mibBuilder.loadTexts: rlIpArpInspectTrustedPortTable.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectTrustedPortTable.setDescription('ARP Inspection Trusted ports entry. The entry created when port is configured as trusted.')
rlIpArpInspectTrustedPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 7, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: rlIpArpInspectTrustedPortEntry.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectTrustedPortEntry.setDescription('The row definition for this table.')
rlIpArpInspectTrustedPortRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 7, 1, 2), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectTrustedPortRowStatus.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectTrustedPortRowStatus.setDescription('Entry status. A valid status is CreateandGo or Delete.')
rlIpArpInspectClearCountersAction = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 3, 8), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectClearCountersAction.setStatus('current')
if mibBuilder.loadTexts: rlIpArpInspectClearCountersAction.setDescription('If true, clear (set to zero) on all vlans: all Arp Inspection counters: rlIpArpInspectVlanNumOfArpForwarded , rlIpArpInspectVlanNumOfArpDropped and rlIpArpInspectVlanNumOfArpMismatched')
class ProtocolFilteringMap(TextualConvention, Bits):
description = "This TC describes the list of protocol to be filtered. The bit 'all(0)' indicates all Cisco protocols in range 0100.0ccc.ccc0 - 0100.0ccc.cccf The bit 'cdp(1)' indicates Cisco CDP protocol. Identified by destination mac address: 0100.0ccc.cccc and protocol type:0x2000. The bit 'vtp(2)' indicates Cisco VTP protocol. Identified by destination mac address: 0100.0ccc.cccc and protocol type:0x2003. The bit 'dtp(3)' indicates Cisco DTP protocol. Identified by destination mac address: 0100.0ccc.cccc and protocol type:0x2004. The bit 'udld (4)' indicates Cisco UDLD protocol. Identified by destination mac address: 0100.0ccc.cccc and protocol type:0x0111. The bit 'pagp(5)' indicates Cisco PAGP protocol. Identified by destination mac address: 0100.0ccc.cccc and protocol type: 0x0104. The bit 'sstp(6)' indicates Cisco SSTP protocol. Identified by destination mac address: 0100.0ccc.cccd. "
status = 'current'
namedValues = NamedValues(("all", 0), ("cdp", 1), ("vtp", 2), ("dtp", 3), ("udld", 4), ("pagp", 5), ("sstp", 6))
rlProtocolFilteringTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 4, 1), )
if mibBuilder.loadTexts: rlProtocolFilteringTable.setStatus('current')
if mibBuilder.loadTexts: rlProtocolFilteringTable.setDescription('Protocol filter configuration entry')
rlProtocolFilteringEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 4, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: rlProtocolFilteringEntry.setStatus('current')
if mibBuilder.loadTexts: rlProtocolFilteringEntry.setDescription('The row definition for this table.')
rlProtocolFilteringList = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 4, 1, 1, 1), ProtocolFilteringMap()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlProtocolFilteringList.setStatus('current')
if mibBuilder.loadTexts: rlProtocolFilteringList.setDescription('The list of protocol to be filtered.')
rlProtocolFilteringRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 112, 4, 1, 1, 2), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlProtocolFilteringRowStatus.setStatus('current')
if mibBuilder.loadTexts: rlProtocolFilteringRowStatus.setDescription('A status can be destroy, active or createAndGo')
mibBuilder.exportSymbols("CISCOSB-BRIDGE-SECURITY", rlIpDhcpSnoopFileUpdateTime=rlIpDhcpSnoopFileUpdateTime, rlIpArpInspectVlanNumOfArpForwarded=rlIpArpInspectVlanNumOfArpForwarded, rlIpDhcpSnoop=rlIpDhcpSnoop, rlIpSourceGuardPermittedRuleCounterNumOfDhcpRules=rlIpSourceGuardPermittedRuleCounterNumOfDhcpRules, rlIpDhcpOpt82RxOnUntrustedEnable=rlIpDhcpOpt82RxOnUntrustedEnable, rlIpSourceGuardType=rlIpSourceGuardType, rlIpDhcpSnoopRowStatus=rlIpDhcpSnoopRowStatus, RlIpSourceGuardType=RlIpSourceGuardType, rlIpArpInspectAssignedListName=rlIpArpInspectAssignedListName, rlIpSourceGuardPortRowStatus=rlIpSourceGuardPortRowStatus, rlBridgeSecurity=rlBridgeSecurity, rlIpSourceGuardPortEntry=rlIpSourceGuardPortEntry, rlProtocolFilteringEntry=rlProtocolFilteringEntry, rlIpArpInspectTrustedPortEntry=rlIpArpInspectTrustedPortEntry, RlIpSourceGuardStatus=RlIpSourceGuardStatus, rlIpArpInspectTrustedPortRowStatus=rlIpArpInspectTrustedPortRowStatus, rlIpArpInspectListRowStatus=rlIpArpInspectListRowStatus, rlIpSourceGuardEntry=rlIpSourceGuardEntry, rlIpSourceGuardPermittedRuleCounterVLANTag=rlIpSourceGuardPermittedRuleCounterVLANTag, rlIpArpInspectTrustedPortTable=rlIpArpInspectTrustedPortTable, rlIpArpInspectEnableVlanEntry=rlIpArpInspectEnableVlanEntry, rlIpSourceGuardRetryTime=rlIpSourceGuardRetryTime, rlIpArpInspectListIPAddress=rlIpArpInspectListIPAddress, rlIpArpInspectValidation=rlIpArpInspectValidation, rlIpDhcpSnoopTrustedPortTable=rlIpDhcpSnoopTrustedPortTable, rlIpSourceGuardEnable=rlIpSourceGuardEnable, rlIpDhcpSnoopEnableVlanTag=rlIpDhcpSnoopEnableVlanTag, rlIpDhcpSnoopStaticVLANTag=rlIpDhcpSnoopStaticVLANTag, rlIpDhcpSnoopStaticMACAddress=rlIpDhcpSnoopStaticMACAddress, rlIpSourceGuardMibVersion=rlIpSourceGuardMibVersion, rlIpDhcpSnoopMibVersion=rlIpDhcpSnoopMibVersion, rlIpDhcpOpt82InsertionEnable=rlIpDhcpOpt82InsertionEnable, rlIpArpInspectListName=rlIpArpInspectListName, rlIpDhcpSnoopPortInterface=rlIpDhcpSnoopPortInterface, rlIpSourceGuardRetryToInsert=rlIpSourceGuardRetryToInsert, rlIpDhcpSnoopType=rlIpDhcpSnoopType, rlIpArpInspectVlanNumOfArpMismatched=rlIpArpInspectVlanNumOfArpMismatched, rlIpArpInspectVlanNumOfArpDropped=rlIpArpInspectVlanNumOfArpDropped, rlIpSourceGuardPortTable=rlIpSourceGuardPortTable, rlIpSourceGuardVLANTag=rlIpSourceGuardVLANTag, RlIpArpInspectListNameType=RlIpArpInspectListNameType, rlIpDhcpSnoopIPAddress=rlIpDhcpSnoopIPAddress, rlIpArpInspectEnable=rlIpArpInspectEnable, rlIpDhcpSnoopStaticIPAddress=rlIpDhcpSnoopStaticIPAddress, rlIpDhcpSnoopVLANTag=rlIpDhcpSnoopVLANTag, rlIpDhcpSnoopFileEnable=rlIpDhcpSnoopFileEnable, rlIpSourceGuardIPAddress=rlIpSourceGuardIPAddress, rlIpArpInspectVlanClearCountersAction=rlIpArpInspectVlanClearCountersAction, rlIpDhcpSnoopEnableVlanRowStatus=rlIpDhcpSnoopEnableVlanRowStatus, rlProtocolFiltering=rlProtocolFiltering, rlIpDhcpSnoopEnableVlanTable=rlIpDhcpSnoopEnableVlanTable, rlIpArpInspectListMACAddress=rlIpArpInspectListMACAddress, rlIpDhcpSnoopVerifyMacAddress=rlIpDhcpSnoopVerifyMacAddress, rlIpDhcpSnoopTrustedPortRowStatus=rlIpDhcpSnoopTrustedPortRowStatus, rlIpSourceGuardFailReason=rlIpSourceGuardFailReason, rlIpSourceGuardPermittedRuleCounterTable=rlIpSourceGuardPermittedRuleCounterTable, rlIpSourceGuardStatus=rlIpSourceGuardStatus, rlProtocolFilteringTable=rlProtocolFilteringTable, rlIpSourceGuardTable=rlIpSourceGuardTable, rlIpDhcpSnoopClearAction=rlIpDhcpSnoopClearAction, RlIpDhcpSnoopType=RlIpDhcpSnoopType, rlIpDhcpSnoopEntry=rlIpDhcpSnoopEntry, rlIpArpInspectListEntry=rlIpArpInspectListEntry, rlIpSourceGuardPermittedRuleCounterEntry=rlIpSourceGuardPermittedRuleCounterEntry, RlIpSourceGuardFailReason=RlIpSourceGuardFailReason, rlIpArpInspectEnableVlanTable=rlIpArpInspectEnableVlanTable, rlIpDhcpSnoopCurrentEntiresNumber=rlIpDhcpSnoopCurrentEntiresNumber, rlIpArpInspectListTable=rlIpArpInspectListTable, rlIpArpInspectMibVersion=rlIpArpInspectMibVersion, rlIpDhcpSnoopEnableVlanEntry=rlIpDhcpSnoopEnableVlanEntry, rlIpArpInspect=rlIpArpInspect, PYSNMP_MODULE_ID=rlBridgeSecurity, rlIpDhcpSnoopTrustedPortEntry=rlIpDhcpSnoopTrustedPortEntry, rlIpArpInspectEnableVlanTag=rlIpArpInspectEnableVlanTag, rlIpDhcpSnoopStaticRowStatus=rlIpDhcpSnoopStaticRowStatus, rlIpSourceGuardMACAddress=rlIpSourceGuardMACAddress, rlProtocolFilteringList=rlProtocolFilteringList, rlIpDhcpSnoopMACAddress=rlIpDhcpSnoopMACAddress, rlIpDhcpSnoopTable=rlIpDhcpSnoopTable, ProtocolFilteringMap=ProtocolFilteringMap, rlIpSourceGuard=rlIpSourceGuard, rlIpDhcpSnoopStaticTable=rlIpDhcpSnoopStaticTable, rlIpDhcpSnoopStaticEntry=rlIpDhcpSnoopStaticEntry, rlIpDhcpSnoopLeaseTime=rlIpDhcpSnoopLeaseTime, rlProtocolFilteringRowStatus=rlProtocolFilteringRowStatus, rlIpArpInspectEnableVlanRowStatus=rlIpArpInspectEnableVlanRowStatus, rlIpDhcpSnoopStaticPortInterface=rlIpDhcpSnoopStaticPortInterface, rlIpArpInspectClearCountersAction=rlIpArpInspectClearCountersAction, rlIpArpInspectLogInterval=rlIpArpInspectLogInterval, rlIpDhcpSnoopEnable=rlIpDhcpSnoopEnable, rlIpSourceGuardPermittedRuleCounterNumOfStaticRules=rlIpSourceGuardPermittedRuleCounterNumOfStaticRules)
|
import json
import logging
from enum import Enum
from http import HTTPStatus
import pytz
import requests
from django.conf import settings
from django.http import JsonResponse
from django.utils import timezone
from errors.views import ErrorCodes, getError
from Users.utils import getUserByAccessToken
# Get an instance of a logger
logger = logging.getLogger(__name__)
def badRequestResponse(data):
return errorResponse(HTTPStatus.BAD_REQUEST, data)
def unAuthorizedResponse(data):
return errorResponse(HTTPStatus.FORBIDDEN, data)
def unAuthenticatedResponse(data):
return errorResponse(HTTPStatus.UNAUTHORIZED, data)
def resourceConflictResponse(data):
return errorResponse(HTTPStatus.CONFLICT, data)
def resourceNotFoundResponse(data):
return errorResponse(HTTPStatus.NOT_FOUND, data)
def internalServerErrorResponse(data):
return errorResponse(HTTPStatus.INTERNAL_SERVER_ERROR, data)
def errorResponse(httpStatusCode, data):
return JsonResponse(data, status=httpStatusCode, safe=False)
# success responses
def createdResponse(message="", body={}):
return successResponse(HTTPStatus.CREATED, message=message, body=body)
def paginatedResponse(httpStatusCode=HTTPStatus.OK, message="", body={}, pagination={}, **kwargs):
return successResponse(httpStatusCode, message, body, pagination, kwargs)
def successResponse(httpStatusCode=HTTPStatus.OK, message="", body={}, pagination=None, kwargs=None):
responseData = dict()
responseData['data'] = body
responseData['metaData'] = kwargs
responseData['message'] = message
if pagination:
responseData['pagination'] = pagination
response = JsonResponse(responseData, status=httpStatusCode, safe=False)
return response
# helper functions
def getUserIpAddress(request):
if request.META.get('HTTP_X_FORWARDED_FOR'):
ip = request.META.get('HTTP_X_FORWARDED_FOR')
elif request.META.get('HTTP_X_FORWARDED_HOST'):
ip = request.META.get('HTTP_X_FORWARDED_HOST')
elif request.META.get('HTTP_X_FORWARDED_SERVER'):
ip = request.META.get('HTTP_X_FORWARDED_SERVER')
else:
ip = request.META.get('REMOTE_ADDR')
return ip
class MultipleProxyMiddleware:
FORWARDED_FOR_FIELDS = [
'HTTP_X_FORWARDED_FOR',
'HTTP_X_FORWARDED_HOST',
'HTTP_X_FORWARDED_SERVER',
]
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
"""
Rewrites the proxy headers so that only the client IP address is used.
"""
for field in self.FORWARDED_FOR_FIELDS:
if field in request.META:
if ',' in request.META[field]:
parts = request.META[field].split(',')
request.META[field] = parts[0].strip()
return self.get_response(request)
class TimezoneMiddleware(object):
"""
Middleware to properly handle the users timezone
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# make sure they are authenticated so we know we have their tz info.
token = request.headers.get('Token')
if token is None:
# deactivate and the default time zone will be used anyway
timezone.deactivate()
# get user with access token
user = getUserByAccessToken(token)
if user is None:
return unAuthenticatedResponse(getError
(ErrorCodes.UNAUTHENTICATED_REQUEST,
"Your session has expired. Please login."))
# we are getting the users timezone that in this case is stored in a user's profile
tz_str = user.timezone
timezone.activate(pytz.timezone(tz_str))
response = self.get_response(request)
return response
class ExtendedEnum(Enum):
@classmethod
def list(cls, *args):
if args:
# return list of specified operations
return list(map(lambda c: c.value, args))
return list(map(lambda c: c.value, cls))
def request_response(error_type, code, msg):
"""error code response for all endpoints"""
if not msg:
msg = ""
return error_type(getError(code, msg))
def sms_notifier(recipient, message, action="send"):
"""send sms notification to a single or list of numbers"""
if len(recipient) > 1:
recipient = [clean_phone_numbers(item) for item in recipient]
elif len(recipient) == 1:
recipient = clean_phone_numbers(recipient)
payload = {
"sender_name": settings.BULK_SMS_SENDER,
"message": message,
"to": recipient,
}
try:
response = requests.request("POST", url=settings.BULK_SMS_BASE_URL + action, data=json.dumps(payload),
headers=settings.BULK_SMS_HEADER)
if response.status_code != 200:
raise ValueError('SMS sending failed')
logger.info(f'status code: {response.status_code}')
logger.info(response.json()['data']['status'])
return response.json()
except Exception as e:
logger.error('sms_notifier@Error')
logger.error(e)
return False
def clean_phone_numbers(phone_no):
if phone_no[0] == '+':
phone_no = phone_no[1:]
elif phone_no[0] == '0':
phone_no = '234' + phone_no[1:]
return int(phone_no)
|
"""Read and process data for Instagram from Quintly."""
import datetime as dt
import json
from time import sleep
from typing import Dict, Optional
from django.db.utils import IntegrityError
from django.db.models import Q, Sum
from loguru import logger
from sentry_sdk import capture_exception
from ...models.insta import (
Insta,
InstaInsight,
InstaPost,
InstaStory,
InstaIGTV,
InstaIGTVData,
InstaComment,
InstaDemographics,
InstaHourlyFollowers,
)
from . import quintly
from ..common.utils import BERLIN, local_today
def scrape_full(insta: Insta):
"""Initiate scraping for daily, weekly, and monthly Instagram data from Quintly.
Args:
insta (Insta): Instagram object to collect data for.
"""
logger.info('Starting full scrape for Instagram account "{}"', insta.name)
insta_filter = Q(id=insta.id)
start_date = dt.date(2019, 1, 1)
sleep(1)
# scrape_insights(start_date=start_date, insta_filter=insta_filter)
# scrape_stories(start_date=start_date, insta_filter=insta_filter)
# scrape_posts(start_date=start_date, insta_filter=insta_filter)
# scrape_igtv(start_date=start_date, insta_filter=insta_filter)
scrape_comments(start_date=start_date, insta_filter=insta_filter)
scrape_demographics(start_date=start_date, insta_filter=insta_filter)
scrape_hourly_followers(start_date=start_date, insta_filter=insta_filter)
logger.success('Finished full scrape for Instagram account "{}"', insta.name)
def scrape_insights(
*,
start_date: Optional[dt.date] = None,
insta_filter: Optional[Q] = None,
):
"""Retrieve Instagram insights data from Quintly.
Results are saved in :class:`~okr.models.insta.InstaInsight`.
Args:
start_date (Optional[dt.date], optional): Earliest date to request data for.
Defaults to None.
insta_filter (Optional[Q], optional): Filter to apply to
:class:`~okr.models.insta.Insta` object. Defaults to None.
"""
instas = Insta.objects.all()
if insta_filter:
instas = instas.filter(insta_filter)
for insta in instas:
logger.info(f"Scraping Instagram insights for {insta.name}")
df = quintly.get_insta_insights(insta.quintly_profile_id, start_date=start_date)
for index, row in df.iterrows():
if row.importTime is None:
continue
defaults = {
"quintly_last_updated": BERLIN.localize(
dt.datetime.fromisoformat(row.importTime)
),
"reach": row.reachDay,
"reach_7_days": row.reachWeek,
"reach_28_days": row.reachDays28,
"impressions": row.impressionsDay,
"followers": row.followers,
"text_message_clicks_day": row.textMessageClicksDay,
"email_contacts_day": row.emailContactsDay,
"profile_views": row.profileViewsDay,
}
try:
obj, created = InstaInsight.objects.update_or_create(
insta=insta,
date=dt.date.fromisoformat(row.time),
defaults=defaults,
)
except IntegrityError as e:
capture_exception(e)
logger.exception(
"Data for insights on {} failed integrity check:\n{}",
row.time,
defaults,
)
def scrape_stories(
*, start_date: Optional[dt.date] = None, insta_filter: Optional[Q] = None
):
"""Retrieve data for Instagram stories from Quintly.
Results are saved in :class:`~okr.models.insta.InstaStory`.
Args:
start_date (Optional[dt.date], optional): Earliest date to request data for.
Defaults to None.
insta_filter (Optional[Q], optional): Filter to apply to
:class:`~okr.models.insta.Insta` object. Defaults to None.
"""
instas = Insta.objects.all()
if insta_filter:
instas = instas.filter(insta_filter)
for insta in instas:
logger.info(f"Scraping Instagram stories for {insta.name}")
df = quintly.get_insta_stories(insta.quintly_profile_id, start_date=start_date)
for index, row in df.iterrows():
defaults = {
"created_at": BERLIN.localize(dt.datetime.fromisoformat(row.time)),
"quintly_last_updated": BERLIN.localize(
dt.datetime.fromisoformat(row.importTime)
),
"caption": row.caption,
"reach": row.reach,
"impressions": row.impressions,
"replies": row.replies,
"taps_forward": row.tapsForward,
"taps_back": row.tapsBack,
"story_type": row.type,
"link": row.link,
"exits": row.exits,
}
try:
obj, created = InstaStory.objects.update_or_create(
insta=insta, external_id=row.externalId, defaults=defaults
)
except IntegrityError as e:
capture_exception(e)
logger.exception(
"Data for story with ID {} failed integrity check:\n{}",
row.externalId,
defaults,
)
def scrape_posts(
*, start_date: Optional[dt.date] = None, insta_filter: Optional[Q] = None
):
"""Retrieve data for Instagram posts from Quintly.
Results are saved in :class:`~okr.models.insta.InstaPost`.
Args:
start_date (Optional[dt.date], optional): Earliest date to request data for.
Defaults to None.
insta_filter (Optional[Q], optional): Filter to apply to
:class:`~okr.models.insta.Insta` object. Defaults to None.
"""
instas = Insta.objects.all()
if insta_filter:
instas = instas.filter(insta_filter)
for insta in instas:
logger.info(f"Scraping Instagram posts for {insta.name}")
df = quintly.get_insta_posts(insta.quintly_profile_id, start_date=start_date)
for index, row in df.iterrows():
defaults = {
"created_at": BERLIN.localize(dt.datetime.fromisoformat(row.time)),
"quintly_last_updated": BERLIN.localize(
dt.datetime.fromisoformat(row.importTime)
),
"message": row.message or "",
"comments": row.comments,
"reach": row.reach,
"impressions": row.impressions,
"likes": row.likes,
"saved": row.saved,
"video_views": row.videoViews,
"post_type": row.type,
"link": row.link,
}
try:
obj, created = InstaPost.objects.update_or_create(
insta=insta, external_id=row.externalId, defaults=defaults
)
except IntegrityError as e:
capture_exception(e)
logger.exception(
"Data for post with ID {} failed integrity check:\n{}",
row.externalId,
defaults,
)
def scrape_igtv(
*, start_date: Optional[dt.date] = None, insta_filter: Optional[Q] = None
):
"""Retrieve data for Instagram IGTV videos from Quintly.
Results are saved in :class:`~okr.models.insta.InstaIGTV`.
Args:
start_date (Optional[dt.date], optional): Earliest date to request data for.
Defaults to None.
insta_filter (Optional[Q], optional): Filter to apply to
:class:`~okr.models.insta.Insta` object. Defaults to None.
"""
instas = Insta.objects.all()
if insta_filter:
instas = instas.filter(insta_filter)
for insta in instas:
logger.info(f"Scraping IGTV for {insta.name}")
df = quintly.get_insta_igtv(insta.quintly_profile_id, start_date=start_date)
for index, row in df.iterrows():
defaults = {
"created_at": BERLIN.localize(dt.datetime.fromisoformat(row.time)),
"quintly_last_updated": BERLIN.localize(
dt.datetime.fromisoformat(row.importTime)
),
"message": row.message or "",
"video_title": row.videoTitle,
"likes": row.likes,
"comments": row.comments,
"reach": row.reach,
"impressions": row.impressions,
"saved": row.saved,
"video_views": row.videoViews,
"link": row.link,
}
try:
obj, created = InstaIGTV.objects.update_or_create(
insta=insta, external_id=row.externalId, defaults=defaults
)
_scrape_igtv_daily(insta, obj, defaults)
except IntegrityError as e:
capture_exception(e)
logger.exception(
"Data for post with ID {} failed integrity check:\n{}",
row.externalId,
defaults,
)
def scrape_comments(
*, start_date: Optional[dt.date] = None, insta_filter: Optional[Q] = None
):
"""Retrieve data for Instagram comments from Quintly.
Results are saved in :class:`~okr.models.insta.InstaComment`.
Args:
start_date (Optional[dt.date], optional): Earliest date to request data for.
Defaults to None.
insta_filter (Optional[Q], optional): Filter to apply to
:class:`~okr.models.insta.Insta` object. Defaults to None.
"""
instas = Insta.objects.all()
if insta_filter:
instas = instas.filter(insta_filter)
for insta in instas:
logger.info(f"Scraping Instagram comments for {insta.name}")
df = quintly.get_insta_comments(insta.quintly_profile_id, start_date=start_date)
post_cache: Dict[str, InstaPost] = {}
for index, row in df.iterrows():
defaults = {
"created_at": BERLIN.localize(dt.datetime.fromisoformat(row.time)),
"quintly_last_updated": BERLIN.localize(
dt.datetime.fromisoformat(row.importTime)
),
"is_account_answer": bool(row.isAccountAnswer),
"username": row.username,
"message_length": len(row.message or ""),
"likes": row.likes,
"is_reply": bool(row.isReply),
"parent_comment_id": row.parentCommentId,
"is_hidden": bool(row.isHidden),
}
if row.externalPostId in post_cache:
post = post_cache[row.externalPostId]
else:
post = InstaPost.objects.filter(
insta=insta,
external_id=row.externalPostId,
).first()
post_cache[row.externalPostId] = post
if not post:
logger.debug(
"Comment with ID {} and post ID {} has no corresponding post",
row.externalId,
row.externalPostId,
)
continue
defaults["post"] = post
try:
obj, created = InstaComment.objects.update_or_create(
external_id=row.externalId,
defaults=defaults,
)
except IntegrityError as e:
capture_exception(e)
logger.exception(
"Data for comment with ID {} failed integrity check:\n{}",
row.externalId,
defaults,
)
def _scrape_igtv_daily(insta: Insta, igtv: InstaIGTV, defaults: dict):
"""Scrape IGTV daily stats from Quintly."""
# Copy defaults to avoid modifying the original dict
defaults = defaults.copy()
# Delete fields that are not part of the daily stats
del defaults["created_at"]
del defaults["message"]
del defaults["video_title"]
del defaults["link"]
today = local_today()
diff_fields = ["likes", "comments", "reach", "impressions", "saved", "video_views"]
# Get last InstaIGTVData
aggregations = [Sum(field) for field in diff_fields]
last_data = InstaIGTVData.objects.filter(
igtv=igtv,
date__lt=today,
).aggregate(*aggregations)
# If there is data, calculate differences and save
for field in diff_fields:
defaults[field] -= last_data[f"{field}__sum"] or 0
obj, created = InstaIGTVData.objects.update_or_create(
igtv=igtv,
date=today,
defaults=defaults,
)
def scrape_demographics(
*,
start_date: Optional[dt.date] = None,
insta_filter: Optional[Q] = None,
):
"""Retrieve Instagram demographics data from Quintly.
Results are saved in :class:`~okr.models.insta.InstaDemographics`.
Args:
start_date (Optional[dt.date], optional): Earliest date to request data for.
Defaults to None.
insta_filter (Optional[Q], optional): Filter to apply to
:class:`~okr.models.insta.Insta` object. Defaults to None.
"""
instas = Insta.objects.all()
if insta_filter:
instas = instas.filter(insta_filter)
for insta in instas:
logger.info(f"Scraping Instagram demographics for {insta.name}")
df = quintly.get_insta_demographics(
insta.quintly_profile_id, start_date=start_date
)
for index, row in df.iterrows():
if not row.audienceGenderAndAge:
continue
for entry in json.loads(row.audienceGenderAndAge):
gender, _, age_range = entry["id"].partition("-")
followers = entry["followers"]
defaults = {
"quintly_last_updated": BERLIN.localize(
dt.datetime.fromisoformat(row.importTime)
),
"followers": followers,
}
try:
obj, created = InstaDemographics.objects.update_or_create(
insta=insta,
date=dt.date.fromisoformat(row.time[:10]),
age_range=age_range,
gender=gender,
defaults=defaults,
)
except IntegrityError as e:
capture_exception(e)
logger.exception(
"Data for demographics on {} failed integrity check:\n{}",
row.time,
insta,
)
def scrape_hourly_followers(
*,
start_date: Optional[dt.date] = None,
insta_filter: Optional[Q] = None,
):
"""Retrieve Instagram hourly followers data from Quintly.
Results are saved in :class:`~okr.models.insta.InstaHourlyFollowers`.
Args:
start_date (Optional[dt.date], optional): Earliest date to request data for.
Defaults to None.
insta_filter (Optional[Q], optional): Filter to apply to
:class:`~okr.models.insta.Insta` object. Defaults to None.
"""
instas = Insta.objects.all()
if insta_filter:
instas = instas.filter(insta_filter)
for insta in instas:
logger.info(f"Scraping Instagram hourly followers for {insta.name}")
df = quintly.get_insta_hourly_followers(
insta.quintly_profile_id, start_date=start_date
)
for index, row in df.iterrows():
if not row.onlineFollowers:
continue
date = dt.date.fromisoformat(row.time[:10])
for entry in json.loads(row.onlineFollowers):
hour = entry["id"]
followers = entry["followers"]
date_time = BERLIN.localize(
dt.datetime(date.year, date.month, date.day, hour)
)
defaults = {
"quintly_last_updated": BERLIN.localize(
dt.datetime.fromisoformat(row.importTime)
),
"followers": followers,
}
try:
obj, created = InstaHourlyFollowers.objects.update_or_create(
insta=insta,
date_time=date_time,
defaults=defaults,
)
except IntegrityError as e:
capture_exception(e)
logger.exception(
"Data for hourly followers on {} failed integrity check:\n{}",
date_time,
insta,
)
|
sum_square = 0
sum = 0
for i in range(1,101):
sum_square += i**2
sum +=i
square_sum = sum**2
diff = square_sum - sum_square
print(diff)
|
from error_matrix import ErrorMatrix
from ensemble import Ensemble
from model import Model
import multiprocessing as mp
import numpy as np
import os
class AutoLearner:
def __init__(self, selected_algorithms='all', selected_hyperparameters='default', ensemble_size=3,
ensemble_method='Logit', error_matrix_values='default', n_cores=None, verbose=True):
"""instantiates an AutoLearner object """
self.error_matrix = ErrorMatrix(selected_algorithms, selected_hyperparameters, ensemble_size, error_matrix_values, verbose, n_cores)
"""error matrix defined for specific dataset"""
self.ensemble = Ensemble(ensemble_size, ensemble_method, verbose, n_cores)
"""instantiate empty ensemble object"""
def fit(self, train_features, train_labels):
"""fit the model to a given training feature (for now, no categorical features) and label"""
# no preprocessing for now
self.error_matrix.add_dataset(train_features, train_labels)
for model in self.error_matrix.best_algorithms(train_features, train_labels):
self.ensemble.add_learner(model)
self.ensemble.bayesian_optimize()
self.ensemble.fit_base_learners(train_features, train_labels)
self.ensemble.fit_stacked_learner(train_features, train_labels)
os.system('rm -rf smac3-output*')
def refit(self, train_features, train_labels):
"""refits the autolearner object to a newly provided training set"""
# no preprocessing for now
self.ensemble.fit_base_learners(train_features, train_labels)
self.ensemble.fit_stacked_learner(train_features, train_labels)
os.system('rm -rf smac3-output*')
def predict(self, test_features):
"""returns predictions of the autolearner object on newly provided test set"""
p = mp.Pool()
a = [p.apply_async(Model.predict, args=(model, test_features)) for model in self.ensemble.base_learners]
p.close()
p.join()
predictions = ()
for i in a:
predictions += (i.get(),)
test_second_layer_matrix = np.matrix.transpose(np.stack(predictions))
return self.ensemble.model.predict(test_second_layer_matrix)
|
from PIL import Image
import glob, os, sys, face_recognition, itertools, subprocess, concurrent.futures, numpy, face_util
global is_verbose
global known_faces
global known_encodings
def print_help():
print(" ")
print("Usage: ")
print(" ")
print("python3 sort.py [ -f faces-dir ] [ -t 0.5] [ -v ] [ -h ]")
print(" -f faces-dir")
print(" default = 'faces' if ommited")
print(" Directory where to find all known and unknown faces.")
print(" The script tries to recognize unknown faces and move")
print(" them into the directory of the known person.")
print(" Example: 'python sort.py faces'")
print(" will use")
print(" - './faces/unknown/' to find unknown faces")
print(" - './faces/known/' to find known faces in subdirectories,")
print(" e.g. './faces/known/Barack Obama/bigparty.jpg'")
print(" -t 0.5 (optional)")
print(" tolerance. Default = 0.5.")
print(" Less then 0.6 is more strict/similar, for example 0.5")
print(" Please refer to the documemtation of face_recognition:")
print(" face_recognition.api.compare_faces(known_face_encodings, face_encoding_to_check, tolerance=0.6)")
print(" -v (optional)")
print(" verbose. Prints more messages.")
print(" -h or --help")
print(" Print this help message.")
print(" ")
print("Run 'collect.py' first to find (unknown) faces in image files.")
print("'collect.py' creates a subdirectory './faces/unknown/'.")
print("'sort.py' will read all unknown faces from there and will move/sort.")
print("them. Exmple './faces/known/Barack Obama/obamas_face.jpg'")
quit()
def compare_face(file):
unknown_img = os.path.join(dir_faces_unknown, file)
image = face_recognition.load_image_file(unknown_img)
img_encodings = face_recognition.face_encodings(image)
numberofelements = len(img_encodings)
if not numberofelements == 1:
if is_verbose == "-v": print("WARNING: Found '" + str(numberofelements) + "' faces in file. Expected exactly '1'. File " + unknown_img)
return file
nearest_face, nearest_encoding = face_util.compare_face(img_encodings[0], known_faces, known_encodings, face_tolerance, is_verbose)
if not nearest_face == "":
suggested_dir = os.path.join(dir_faces_suggested, nearest_face)
if not os.path.exists(suggested_dir):
os.makedirs(suggested_dir, exist_ok=True)
movedfile = os.path.join(dir_faces_suggested, nearest_face, file)
os.rename(unknown_img, movedfile)
os.utime(movedfile)
print(nearest_face + " < " + file)
return file;
is_verbose = ""
dir_faces = "faces"
dir_faces_unknown = ""
dir_faces_suggested = ""
face_tolerance = 0.5
next_arg = ""
for arg in sys.argv[1:]:
assert isinstance(arg, object)
if arg == '-v':
is_verbose = "-v"
elif arg == '-f':
next_arg = "-f"
elif next_arg == '-f':
dir_faces = arg
next_arg = ""
elif arg == '-t':
next_arg = "-t"
elif next_arg == '-t':
face_tolerance = float(arg)
next_arg = ""
elif arg == '-h' or arg == '--help':
print_help()
dir_script = os.path.dirname(__file__)
if not os.path.isabs(dir_faces):
dir_faces = os.path.join(dir_script, dir_faces)
if not os.path.exists(dir_faces):
print("Directory for faces does not exist " + dir_faces)
print_help()
dir_faces_unknown = os.path.join(dir_faces, "unknown")
if not os.path.exists(dir_faces_unknown):
os.mkdir(dir_faces_unknown)
print("Directory for unknown faces does not exist. Creating dir = " + dir_faces_unknown)
print("There is nothing to do. The script expects pictures of unkown persons there.")
print_help()
dir_faces_known = os.path.join(dir_faces, "known")
if not os.path.exists(dir_faces_known):
os.mkdir(dir_faces_known)
print("Directory for nown faces does not exist. Dir = " + dir_faces_known)
print("Create directories for persons, e.g. './faces/known/Obama' and move faces into it.")
print(" './faces/known/Barack'")
print(" './faces/known/Michelle'")
print("... and move their faces into it.")
print("There is nothing to do. Creating dir "+ dir_faces_known + "...")
print_help()
dir_faces_suggested = os.path.join(dir_faces, "suggested")
if not os.path.exists(dir_faces_suggested):
os.mkdir(dir_faces_suggested)
print("Directory for suggested faces does not exist. Creating dir = " + dir_faces_suggested)
# Remove empty subdirs in suggested
for subdir in os.listdir(dir_faces_suggested):
if os.path.isdir(os.path.join(dir_faces_suggested, subdir)):
if not os.listdir(os.path.join(dir_faces_suggested, subdir)):
if is_verbose: print("Remove empty dir for suggested face dir " + subdir)
os.rmdir(os.path.join(dir_faces_suggested, subdir))
# Read known faces recursivly
known_faces = []
known_encodings = []
known_faces, known_encodings = face_util.read_known_faces(dir_faces_known, is_verbose)
# Read unknown faces
files = os.listdir(dir_faces_unknown)
with concurrent.futures.ProcessPoolExecutor() as executor:
# Process the list of files, but split the work across the process pool to use all CPUs!
for file, processed_file in zip(files, executor.map(compare_face, files)):
if is_verbose == "-v" : print("Finished reading known face from file " + file)
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
import tkinter
import math
scale = 4
robotSpeed = 6
robotSize = 22 * scale
robotX = 600
robotY = 600
robotCatchZone = 30
robotForwarding = 4
ballSize = 7.4 * scale
ballX = 600
ballY = 600
touchDistancingDeg = 1 #内側からの距離補正の次数.2以上だと加速度が滑らかに,1だと無駄な安定な点ができない
touchDistance = (robotSize+ballSize)/2
root = tkinter.Tk()
root.geometry("1200x1250")
canvas = tkinter.Canvas(root, bg="#fff", width=1200, height=1200)
canvas.pack()
#軸の描画
robot = canvas.create_oval(robotX-robotSize/2,robotY-robotSize/2,robotX+robotSize/2,robotY+robotSize/2, fill="#f0f0f0", tags="robot")
canvas.create_oval(ballX-ballSize/2,ballY-ballSize/2,ballX+ballSize/2,ballY+ballSize/2, fill="#f0f0f0",tags="ball")
def motion(r,the):
if(the < -math.pi):
while(the < -math.pi):
the += 2*math.pi
elif(the >= math.pi):
while(the >= math.pi):
the -= 2*math.pi
x,y = getXY(r,the)
print(r,360*the/2/math.pi,x,y)
#以下に回り込みのアルゴリズムを記入
if(the == 0):
theSign = 1
else:
theSign = (abs(the)/the)
if(r <= touchDistance):
m = (1,-the + theSign*((r/touchDistance)**touchDistancingDeg)*3*math.pi/2 )
elif(r > touchDistance and x >= -touchDistance):
m = (1,-theSign*(math.pi - abs(the) + math.asin(touchDistance/r)))
elif(r > touchDistance and x < -touchDistance):
m = (1,-theSign*(math.pi - abs(the) + math.atan((-x/abs(y))) - math.atan(((-x-touchDistance)/abs(y)))))
return m
def getRobotRD(ballR,ballThe):
return ballR,-ballThe
def getBallRD(robotR,robotThe):
return robotR,-robotThe
def getXY(r, the):
x = r * math.cos(the)
y = r * math.sin(the)
return x, y
def getRD(x,y):
r = math.sqrt(x**2+y**2)
the = math.atan2(y, x)
return r,the
def mouse(event):
global ballX
global ballY
ballX = event.x
ballY = event.y
def draw():
global robotX
global robotY
global robotSize
global robotSpeed
global robotCatchZone
#dX,dY = getXY(getBallRD(motion(getRobotRD(getRD(robotX,robotY)))))
rRD,theRD = getRD(robotX-ballX,robotY-ballY)
rRobotRD,theRobotRD=getRobotRD(rRD,theRD)
rMotion,theMotion=motion(rRobotRD,theRobotRD)
rBallRD,theBallRD=getBallRD(rMotion,theMotion)
dX,dY=getXY(rBallRD,theBallRD)
robotX += robotSpeed * dX
robotY += robotSpeed * dY
canvas.delete("all")
canvas.create_oval(ballX-ballSize/2,ballY-ballSize/2,ballX+ballSize/2,ballY+ballSize/2, fill="#f0f0f0",tags="ball")
canvas.create_oval(robotX-robotSize/2,robotY-robotSize/2,robotX+robotSize/2,robotY+robotSize/2, fill="#f0f0f0", tags="robot")
#canvas.move(robot, 5, 0)
canvas.after(10, clk)
def clk():
canvas.bind("<Motion>", mouse)
draw()
Start = tkinter.Button(root, text="Start", command=clk)
Start.pack()
root.mainloop() |
from .node import Node
class BreakNode(Node):
"""Key word BREAK"""
__slots__ = ()
class ReturnNode(Node):
"""Key word RETURN"""
__slots__ = ()
|
# from https://linked.data.gov.au/dataset/bdr/conservation-status-taxa-wa
# in the sop_recipe_abis_model datagraphs
CONSERVATION_STATUS_TAXA = [
"https://test-idafd.biodiversity.org.au/name/afd/70162908",
"https://test-idafd.biodiversity.org.au/name/afd/70162916",
"https://test-idafd.biodiversity.org.au/name/afd/70164586",
"https://test-idafd.biodiversity.org.au/name/afd/70165201",
"https://test-idafd.biodiversity.org.au/name/afd/70165203",
"https://test-idafd.biodiversity.org.au/name/afd/70167551",
"https://test-idafd.biodiversity.org.au/name/afd/70167566",
"https://test-idafd.biodiversity.org.au/name/afd/70167571",
"https://test-idafd.biodiversity.org.au/name/afd/70168226",
"https://test-idafd.biodiversity.org.au/name/afd/70171645",
"https://test-idafd.biodiversity.org.au/name/afd/70174584",
"https://test-idafd.biodiversity.org.au/name/afd/70174588",
"https://test-idafd.biodiversity.org.au/name/afd/70174589",
"https://test-idafd.biodiversity.org.au/name/afd/70174590",
"https://test-idafd.biodiversity.org.au/name/afd/70174591",
"https://test-idafd.biodiversity.org.au/name/afd/70174593",
"https://test-idafd.biodiversity.org.au/name/afd/70174596",
"https://test-idafd.biodiversity.org.au/name/afd/70174597",
"https://test-idafd.biodiversity.org.au/name/afd/70174598",
"https://test-idafd.biodiversity.org.au/name/afd/70174599",
"https://test-idafd.biodiversity.org.au/name/afd/70174601",
"https://test-idafd.biodiversity.org.au/name/afd/70174604",
"https://test-idafd.biodiversity.org.au/name/afd/70174605",
"https://test-idafd.biodiversity.org.au/name/afd/70174608",
"https://test-idafd.biodiversity.org.au/name/afd/70174611",
"https://test-idafd.biodiversity.org.au/name/afd/70174612",
"https://test-idafd.biodiversity.org.au/name/afd/70175999",
"https://test-idafd.biodiversity.org.au/name/afd/70176189",
"https://test-idafd.biodiversity.org.au/name/afd/70176351",
"https://test-idafd.biodiversity.org.au/name/afd/70176993",
"https://test-idafd.biodiversity.org.au/name/afd/70177704",
"https://test-idafd.biodiversity.org.au/name/afd/70177902",
"https://test-idafd.biodiversity.org.au/name/afd/70177903",
"https://test-idafd.biodiversity.org.au/name/afd/70179310",
"https://test-idafd.biodiversity.org.au/name/afd/70179311",
"https://test-idafd.biodiversity.org.au/name/afd/70179312",
"https://test-idafd.biodiversity.org.au/name/afd/70179313",
"https://test-idafd.biodiversity.org.au/name/afd/70179314",
"https://test-idafd.biodiversity.org.au/name/afd/70179315",
"https://test-idafd.biodiversity.org.au/name/afd/70179345",
"https://test-idafd.biodiversity.org.au/name/afd/70180670",
"https://test-idafd.biodiversity.org.au/name/afd/70180673",
"https://test-idafd.biodiversity.org.au/name/afd/70180674",
"https://test-idafd.biodiversity.org.au/name/afd/70180678",
"https://test-idafd.biodiversity.org.au/name/afd/70180682",
"https://test-idafd.biodiversity.org.au/name/afd/70181348",
"https://test-idafd.biodiversity.org.au/name/afd/70181350",
"https://test-idafd.biodiversity.org.au/name/afd/70181355",
"https://test-idafd.biodiversity.org.au/name/afd/70181379",
"https://test-idafd.biodiversity.org.au/name/afd/70181382",
"https://test-idafd.biodiversity.org.au/name/afd/70182008",
"https://test-idafd.biodiversity.org.au/name/afd/70182161",
"https://test-idafd.biodiversity.org.au/name/afd/70182790",
"https://test-idafd.biodiversity.org.au/name/afd/70182791",
"https://test-idafd.biodiversity.org.au/name/afd/70182814",
"https://test-idafd.biodiversity.org.au/name/afd/70183770",
"https://test-idafd.biodiversity.org.au/name/afd/70184890",
"https://test-idafd.biodiversity.org.au/name/afd/70185407",
"https://test-idafd.biodiversity.org.au/name/afd/70188407",
"https://test-idafd.biodiversity.org.au/name/afd/70188409",
"https://test-idafd.biodiversity.org.au/name/afd/70188412",
"https://test-idafd.biodiversity.org.au/name/afd/70188414",
"https://test-idafd.biodiversity.org.au/name/afd/70188418",
"https://test-idafd.biodiversity.org.au/name/afd/70195524",
"https://test-idafd.biodiversity.org.au/name/afd/70195525",
"https://test-idafd.biodiversity.org.au/name/afd/70195863",
"https://test-idafd.biodiversity.org.au/name/afd/70195981",
"https://test-idafd.biodiversity.org.au/name/afd/70195982",
"https://test-idafd.biodiversity.org.au/name/afd/70195984",
"https://test-idafd.biodiversity.org.au/name/afd/70195985",
"https://test-idafd.biodiversity.org.au/name/afd/70195987",
"https://test-idafd.biodiversity.org.au/name/afd/70195988",
"https://test-idafd.biodiversity.org.au/name/afd/70195989",
"https://test-idafd.biodiversity.org.au/name/afd/70195990",
"https://test-idafd.biodiversity.org.au/name/afd/70195991",
"https://test-idafd.biodiversity.org.au/name/afd/70195992",
"https://test-idafd.biodiversity.org.au/name/afd/70197086",
"https://test-idafd.biodiversity.org.au/name/afd/70197169",
"https://test-idafd.biodiversity.org.au/name/afd/70197172",
"https://test-idafd.biodiversity.org.au/name/afd/70197262",
"https://test-idafd.biodiversity.org.au/name/afd/70198528",
"https://test-idafd.biodiversity.org.au/name/afd/70199327",
"https://test-idafd.biodiversity.org.au/name/afd/70199426",
"https://test-idafd.biodiversity.org.au/name/afd/70199430",
"https://test-idafd.biodiversity.org.au/name/afd/70199663",
"https://test-idafd.biodiversity.org.au/name/afd/70199746",
"https://test-idafd.biodiversity.org.au/name/afd/70202632",
"https://test-idafd.biodiversity.org.au/name/afd/70202636",
"https://test-idafd.biodiversity.org.au/name/afd/70202637",
"https://test-idafd.biodiversity.org.au/name/afd/70202993",
"https://test-idafd.biodiversity.org.au/name/afd/70202998",
"https://test-idafd.biodiversity.org.au/name/afd/70203007",
"https://test-idafd.biodiversity.org.au/name/afd/70203256",
"https://test-idafd.biodiversity.org.au/name/afd/70204649",
"https://test-idafd.biodiversity.org.au/name/afd/70204650",
"https://test-idafd.biodiversity.org.au/name/afd/70204655",
"https://test-idafd.biodiversity.org.au/name/afd/70204658",
"https://test-idafd.biodiversity.org.au/name/afd/70207034",
"https://test-idafd.biodiversity.org.au/name/afd/70208223",
"https://test-idafd.biodiversity.org.au/name/afd/70208224",
"https://test-idafd.biodiversity.org.au/name/afd/70208227",
"https://test-idafd.biodiversity.org.au/name/afd/70214460",
"https://test-idafd.biodiversity.org.au/name/afd/70214469",
"https://test-idafd.biodiversity.org.au/name/afd/70214498",
"https://test-idafd.biodiversity.org.au/name/afd/70214833",
"https://test-idafd.biodiversity.org.au/name/afd/70215602",
"https://test-idafd.biodiversity.org.au/name/afd/70216090",
"https://test-idafd.biodiversity.org.au/name/afd/70216176",
"https://test-idafd.biodiversity.org.au/name/afd/70216838",
"https://test-idafd.biodiversity.org.au/name/afd/70216839",
"https://test-idafd.biodiversity.org.au/name/afd/70216888",
"https://test-idafd.biodiversity.org.au/name/afd/70217012",
"https://test-idafd.biodiversity.org.au/name/afd/70217319",
"https://test-idafd.biodiversity.org.au/name/afd/70217838",
"https://test-idafd.biodiversity.org.au/name/afd/70220134",
"https://test-idafd.biodiversity.org.au/name/afd/70220995",
"https://test-idafd.biodiversity.org.au/name/afd/70222918",
"https://test-idafd.biodiversity.org.au/name/afd/70222925",
"https://test-idafd.biodiversity.org.au/name/afd/70223068",
"https://test-idafd.biodiversity.org.au/name/afd/70223300",
"https://test-idafd.biodiversity.org.au/name/afd/70224616",
"https://test-idafd.biodiversity.org.au/name/afd/70226370",
"https://test-idafd.biodiversity.org.au/name/afd/70227478",
"https://test-idafd.biodiversity.org.au/name/afd/70227609",
"https://test-idafd.biodiversity.org.au/name/afd/70227610",
"https://test-idafd.biodiversity.org.au/name/afd/70227612",
"https://test-idafd.biodiversity.org.au/name/afd/70227613",
"https://test-idafd.biodiversity.org.au/name/afd/70227615",
"https://test-idafd.biodiversity.org.au/name/afd/70227616",
"https://test-idafd.biodiversity.org.au/name/afd/70227617",
"https://test-idafd.biodiversity.org.au/name/afd/70227618",
"https://test-idafd.biodiversity.org.au/name/afd/70227620",
"https://test-idafd.biodiversity.org.au/name/afd/70227621",
"https://test-idafd.biodiversity.org.au/name/afd/70227622",
"https://test-idafd.biodiversity.org.au/name/afd/70227623",
"https://test-idafd.biodiversity.org.au/name/afd/70227624",
"https://test-idafd.biodiversity.org.au/name/afd/70227625",
"https://test-idafd.biodiversity.org.au/name/afd/70227905",
"https://test-idafd.biodiversity.org.au/name/afd/70228973",
"https://test-idafd.biodiversity.org.au/name/afd/70229034",
"https://test-idafd.biodiversity.org.au/name/afd/70229898",
"https://test-idafd.biodiversity.org.au/name/afd/70229899",
"https://test-idafd.biodiversity.org.au/name/afd/70229924",
"https://test-idafd.biodiversity.org.au/name/afd/70229928",
"https://test-idafd.biodiversity.org.au/name/afd/70230322",
"https://test-idafd.biodiversity.org.au/name/afd/70230324",
"https://test-idafd.biodiversity.org.au/name/afd/70231054",
"https://test-idafd.biodiversity.org.au/name/afd/70231058",
"https://test-idafd.biodiversity.org.au/name/afd/70231088",
"https://test-idafd.biodiversity.org.au/name/afd/70232375",
"https://test-idafd.biodiversity.org.au/name/afd/70232451",
"https://test-idafd.biodiversity.org.au/name/afd/70232583",
"https://test-idafd.biodiversity.org.au/name/afd/70233031",
"https://test-idafd.biodiversity.org.au/name/afd/70234546",
"https://test-idafd.biodiversity.org.au/name/afd/70235188",
"https://test-idafd.biodiversity.org.au/name/afd/70239683",
"https://test-idafd.biodiversity.org.au/name/afd/70241451",
"https://test-idafd.biodiversity.org.au/name/afd/70244527",
"https://test-idafd.biodiversity.org.au/name/afd/70244528",
"https://test-idafd.biodiversity.org.au/name/afd/70246906",
"https://test-idafd.biodiversity.org.au/name/afd/70246921",
"https://test-idafd.biodiversity.org.au/name/afd/70248191",
"https://test-idafd.biodiversity.org.au/name/afd/70248815",
"https://test-idafd.biodiversity.org.au/name/afd/70248846",
"https://test-idafd.biodiversity.org.au/name/afd/70250437",
"https://test-idafd.biodiversity.org.au/name/afd/70251591",
"https://test-idafd.biodiversity.org.au/name/afd/70251592",
"https://test-idafd.biodiversity.org.au/name/afd/70251593",
"https://test-idafd.biodiversity.org.au/name/afd/70251594",
"https://test-idafd.biodiversity.org.au/name/afd/70251595",
"https://test-idafd.biodiversity.org.au/name/afd/70251596",
"https://test-idafd.biodiversity.org.au/name/afd/70251813",
"https://test-idafd.biodiversity.org.au/name/afd/70252582",
"https://test-idafd.biodiversity.org.au/name/afd/70252585",
"https://test-idafd.biodiversity.org.au/name/afd/70252798",
"https://test-idafd.biodiversity.org.au/name/afd/70253122",
"https://test-idafd.biodiversity.org.au/name/afd/70253249",
"https://test-idafd.biodiversity.org.au/name/afd/70254508",
"https://test-idafd.biodiversity.org.au/name/afd/70256724",
"https://test-idafd.biodiversity.org.au/name/afd/70256790",
"https://test-idafd.biodiversity.org.au/name/afd/70256793",
"https://test-idafd.biodiversity.org.au/name/afd/70258140",
"https://test-idafd.biodiversity.org.au/name/afd/70258503",
"https://test-idafd.biodiversity.org.au/name/afd/70260063",
"https://test-idafd.biodiversity.org.au/name/afd/70260064",
"https://test-idafd.biodiversity.org.au/name/afd/70260071",
"https://test-idafd.biodiversity.org.au/name/afd/70261170",
"https://test-idafd.biodiversity.org.au/name/afd/70261287",
"https://test-idafd.biodiversity.org.au/name/afd/70263603",
"https://test-idafd.biodiversity.org.au/name/afd/70263610",
"https://test-idafd.biodiversity.org.au/name/afd/70264561",
"https://test-idafd.biodiversity.org.au/name/afd/70264569",
"https://test-idafd.biodiversity.org.au/name/afd/70264662",
"https://test-idafd.biodiversity.org.au/name/afd/70265111",
"https://test-idafd.biodiversity.org.au/name/afd/70265123",
"https://test-idafd.biodiversity.org.au/name/afd/70265124",
"https://test-idafd.biodiversity.org.au/name/afd/70265184",
"https://test-idafd.biodiversity.org.au/name/afd/70266881",
"https://test-idafd.biodiversity.org.au/name/afd/70268851",
"https://test-idafd.biodiversity.org.au/name/afd/70270743",
"https://test-idafd.biodiversity.org.au/name/afd/70271780",
"https://test-idafd.biodiversity.org.au/name/afd/70271857",
"https://test-idafd.biodiversity.org.au/name/afd/70271859",
"https://test-idafd.biodiversity.org.au/name/afd/70272009",
"https://test-idafd.biodiversity.org.au/name/afd/70273308",
"https://test-idafd.biodiversity.org.au/name/afd/70273330",
"https://test-idafd.biodiversity.org.au/name/afd/70273907",
"https://test-idafd.biodiversity.org.au/name/afd/70274243",
"https://test-idafd.biodiversity.org.au/name/afd/70274706",
"https://test-idafd.biodiversity.org.au/name/afd/70274719",
"https://test-idafd.biodiversity.org.au/name/afd/70277849",
"https://test-idafd.biodiversity.org.au/name/afd/70277851",
"https://test-idafd.biodiversity.org.au/name/afd/70285187",
"https://test-idafd.biodiversity.org.au/name/afd/70285194",
"https://test-idafd.biodiversity.org.au/name/afd/70285198",
"https://test-idafd.biodiversity.org.au/name/afd/70285584",
"https://test-idafd.biodiversity.org.au/name/afd/70285746",
"https://test-idafd.biodiversity.org.au/name/afd/70285750",
"https://test-idafd.biodiversity.org.au/name/afd/70286324",
"https://test-idafd.biodiversity.org.au/name/afd/70287749",
"https://test-idafd.biodiversity.org.au/name/afd/70287751",
"https://test-idafd.biodiversity.org.au/name/afd/70287816",
"https://test-idafd.biodiversity.org.au/name/afd/70287833",
"https://test-idafd.biodiversity.org.au/name/afd/70288732",
"https://test-idafd.biodiversity.org.au/name/afd/70289475",
"https://test-idafd.biodiversity.org.au/name/afd/70289476",
"https://test-idafd.biodiversity.org.au/name/afd/70289479",
"https://test-idafd.biodiversity.org.au/name/afd/70289480",
"https://test-idafd.biodiversity.org.au/name/afd/70289481",
"https://test-idafd.biodiversity.org.au/name/afd/70289486",
"https://test-idafd.biodiversity.org.au/name/afd/70290449",
"https://test-idafd.biodiversity.org.au/name/afd/70290501",
"https://test-idafd.biodiversity.org.au/name/afd/70292085",
"https://test-idafd.biodiversity.org.au/name/afd/70292086",
"https://test-idafd.biodiversity.org.au/name/afd/70292087",
"https://test-idafd.biodiversity.org.au/name/afd/70292088",
"https://test-idafd.biodiversity.org.au/name/afd/70295160",
"https://test-idafd.biodiversity.org.au/name/afd/70295161",
"https://test-idafd.biodiversity.org.au/name/afd/70296349",
"https://test-idafd.biodiversity.org.au/name/afd/70297596",
"https://test-idafd.biodiversity.org.au/name/afd/70297628",
"https://test-idafd.biodiversity.org.au/name/afd/70298163",
"https://test-idafd.biodiversity.org.au/name/afd/70298233",
"https://test-idafd.biodiversity.org.au/name/afd/70298740",
"https://test-idafd.biodiversity.org.au/name/afd/70299515",
"https://test-idafd.biodiversity.org.au/name/afd/70299516",
"https://test-idafd.biodiversity.org.au/name/afd/70299517",
"https://test-idafd.biodiversity.org.au/name/afd/70299518",
"https://test-idafd.biodiversity.org.au/name/afd/70299519",
"https://test-idafd.biodiversity.org.au/name/afd/70299520",
"https://test-idafd.biodiversity.org.au/name/afd/70299521",
"https://test-idafd.biodiversity.org.au/name/afd/70300122",
"https://test-idafd.biodiversity.org.au/name/afd/70300340",
"https://test-idafd.biodiversity.org.au/name/afd/70300451",
"https://test-idafd.biodiversity.org.au/name/afd/70300457",
"https://test-idafd.biodiversity.org.au/name/afd/70300965",
"https://test-idafd.biodiversity.org.au/name/afd/70302014",
"https://test-idafd.biodiversity.org.au/name/afd/70302016",
"https://test-idafd.biodiversity.org.au/name/afd/70302020",
"https://test-idafd.biodiversity.org.au/name/afd/70302022",
"https://test-idafd.biodiversity.org.au/name/afd/70302024",
"https://test-idafd.biodiversity.org.au/name/afd/70302102",
"https://test-idafd.biodiversity.org.au/name/afd/70302288",
"https://test-idafd.biodiversity.org.au/name/afd/70303806",
"https://test-idafd.biodiversity.org.au/name/afd/70303807",
"https://test-idafd.biodiversity.org.au/name/afd/70304409",
"https://test-idafd.biodiversity.org.au/name/afd/70305109",
"https://test-idafd.biodiversity.org.au/name/afd/70305487",
"https://test-idafd.biodiversity.org.au/name/afd/70306547",
"https://test-idafd.biodiversity.org.au/name/afd/70311479",
"https://test-idafd.biodiversity.org.au/name/afd/70311489",
"https://test-idafd.biodiversity.org.au/name/afd/70312855",
"https://test-idafd.biodiversity.org.au/name/afd/70312859",
"https://test-idafd.biodiversity.org.au/name/afd/70312865",
"https://test-idafd.biodiversity.org.au/name/afd/70312866",
"https://test-idafd.biodiversity.org.au/name/afd/70312867",
"https://test-idafd.biodiversity.org.au/name/afd/70312876",
"https://test-idafd.biodiversity.org.au/name/afd/70312878",
"https://test-idafd.biodiversity.org.au/name/afd/70312882",
"https://test-idafd.biodiversity.org.au/name/afd/70313621",
"https://test-idafd.biodiversity.org.au/name/afd/70313626",
"https://test-idafd.biodiversity.org.au/name/afd/70314474",
"https://test-idafd.biodiversity.org.au/name/afd/70315958",
"https://test-idafd.biodiversity.org.au/name/afd/70316947",
"https://test-idafd.biodiversity.org.au/name/afd/70316953",
"https://test-idafd.biodiversity.org.au/name/afd/70317053",
"https://test-idafd.biodiversity.org.au/name/afd/70317056",
"https://test-idafd.biodiversity.org.au/name/afd/70317058",
"https://test-idafd.biodiversity.org.au/name/afd/70317059",
"https://test-idafd.biodiversity.org.au/name/afd/70317254",
"https://test-idafd.biodiversity.org.au/name/afd/70322776",
"https://test-idafd.biodiversity.org.au/name/afd/70322779",
"https://test-idafd.biodiversity.org.au/name/afd/70323779",
"https://test-idafd.biodiversity.org.au/name/afd/70324406",
"https://test-idafd.biodiversity.org.au/name/afd/70324452",
"https://test-idafd.biodiversity.org.au/name/afd/70324453",
"https://test-idafd.biodiversity.org.au/name/afd/70324458",
"https://test-idafd.biodiversity.org.au/name/afd/70325324",
"https://test-idafd.biodiversity.org.au/name/afd/70325733",
"https://test-idafd.biodiversity.org.au/name/afd/70327447",
"https://test-idafd.biodiversity.org.au/name/afd/70329302",
"https://test-idafd.biodiversity.org.au/name/afd/70330861",
"https://test-idafd.biodiversity.org.au/name/afd/70331275",
"https://test-idafd.biodiversity.org.au/name/afd/70333752",
"https://test-idafd.biodiversity.org.au/name/afd/70334439",
"https://test-idafd.biodiversity.org.au/name/afd/70335813",
"https://test-idafd.biodiversity.org.au/name/afd/70335929",
"https://test-idafd.biodiversity.org.au/name/afd/70342130",
"https://test-idafd.biodiversity.org.au/name/afd/70345235",
"https://test-idafd.biodiversity.org.au/name/afd/70345238",
"https://test-idafd.biodiversity.org.au/name/afd/70345433",
"https://test-idafd.biodiversity.org.au/name/afd/70346174",
"https://test-idafd.biodiversity.org.au/name/afd/70346208",
"https://test-idafd.biodiversity.org.au/name/afd/70346234",
"https://test-idafd.biodiversity.org.au/name/afd/70346268",
"https://test-idafd.biodiversity.org.au/name/afd/70346281",
"https://test-idafd.biodiversity.org.au/name/afd/70346329",
"https://test-idafd.biodiversity.org.au/name/afd/70346335",
"https://test-idafd.biodiversity.org.au/name/afd/70347205",
"https://test-idafd.biodiversity.org.au/name/afd/70347698",
"https://test-idafd.biodiversity.org.au/name/afd/70349617",
"https://test-idafd.biodiversity.org.au/name/afd/70349619",
"https://test-idafd.biodiversity.org.au/name/afd/70349808",
"https://test-idafd.biodiversity.org.au/name/afd/70350130",
"https://test-idafd.biodiversity.org.au/name/afd/70350988",
"https://test-idafd.biodiversity.org.au/name/afd/70352881",
"https://test-idafd.biodiversity.org.au/name/afd/70352885",
"https://test-idafd.biodiversity.org.au/name/afd/70352894",
"https://test-idafd.biodiversity.org.au/name/afd/70355354",
"https://test-idafd.biodiversity.org.au/name/afd/70355444",
"https://test-idafd.biodiversity.org.au/name/afd/70356809",
"https://test-idafd.biodiversity.org.au/name/afd/70356810",
"https://test-idafd.biodiversity.org.au/name/afd/70357156",
"https://test-idafd.biodiversity.org.au/name/afd/70360348",
"https://test-idafd.biodiversity.org.au/name/afd/70360885",
"https://test-idafd.biodiversity.org.au/name/afd/70360892",
"https://test-idafd.biodiversity.org.au/name/afd/70361265",
"https://test-idafd.biodiversity.org.au/name/afd/70362757",
"https://test-idafd.biodiversity.org.au/name/afd/70365077",
"https://test-idafd.biodiversity.org.au/name/afd/70366346",
"https://test-idafd.biodiversity.org.au/name/afd/70367067",
"https://test-idafd.biodiversity.org.au/name/afd/70368015",
"https://test-idafd.biodiversity.org.au/name/afd/70368163",
"https://test-idafd.biodiversity.org.au/name/afd/70368935",
"https://test-idafd.biodiversity.org.au/name/afd/70373790",
"https://test-idafd.biodiversity.org.au/name/afd/70376631",
"https://test-idafd.biodiversity.org.au/name/afd/70377818",
"https://test-idafd.biodiversity.org.au/name/afd/70378180",
"https://test-idafd.biodiversity.org.au/name/afd/70378181",
"https://test-idafd.biodiversity.org.au/name/afd/70378420",
"https://test-idafd.biodiversity.org.au/name/afd/70379814",
"https://test-idafd.biodiversity.org.au/name/afd/70380413",
"https://test-idafd.biodiversity.org.au/name/afd/70381780",
"https://test-idafd.biodiversity.org.au/name/afd/70381781",
"https://test-idafd.biodiversity.org.au/name/afd/70381782",
"https://test-idafd.biodiversity.org.au/name/afd/70382107",
"https://test-idafd.biodiversity.org.au/name/afd/70382989",
"https://test-idafd.biodiversity.org.au/name/afd/70383316",
"https://test-idafd.biodiversity.org.au/name/afd/70384107",
"https://test-idafd.biodiversity.org.au/name/afd/70384108",
"https://test-idafd.biodiversity.org.au/name/afd/70384110",
"https://test-idafd.biodiversity.org.au/name/afd/70384115",
"https://test-idafd.biodiversity.org.au/name/afd/70385776",
"https://test-idafd.biodiversity.org.au/name/afd/70386439",
"https://test-idafd.biodiversity.org.au/name/afd/70386757",
"https://test-idafd.biodiversity.org.au/name/afd/70386758",
"https://test-idafd.biodiversity.org.au/name/afd/70387748",
"https://test-idafd.biodiversity.org.au/name/afd/70387753",
"https://test-idafd.biodiversity.org.au/name/afd/70387758",
"https://test-idafd.biodiversity.org.au/name/afd/70387777",
"https://test-idafd.biodiversity.org.au/name/afd/70387785",
"https://test-idafd.biodiversity.org.au/name/afd/70387786",
"https://test-idafd.biodiversity.org.au/name/afd/70387790",
"https://test-idafd.biodiversity.org.au/name/afd/70387796",
"https://test-idafd.biodiversity.org.au/name/afd/70387802",
"https://test-idafd.biodiversity.org.au/name/afd/70387814",
"https://test-idafd.biodiversity.org.au/name/afd/70387825",
"https://test-idafd.biodiversity.org.au/name/afd/70387827",
"https://test-idafd.biodiversity.org.au/name/afd/70387830",
"https://test-idafd.biodiversity.org.au/name/afd/70387831",
"https://test-idafd.biodiversity.org.au/name/afd/70387832",
"https://test-idafd.biodiversity.org.au/name/afd/70387834",
"https://test-idafd.biodiversity.org.au/name/afd/70387849",
"https://test-idafd.biodiversity.org.au/name/afd/70387854",
"https://test-idafd.biodiversity.org.au/name/afd/70388323",
"https://test-idafd.biodiversity.org.au/name/afd/70388364",
"https://test-idafd.biodiversity.org.au/name/afd/70388365",
"https://test-idafd.biodiversity.org.au/name/afd/70389001",
"https://test-idafd.biodiversity.org.au/name/afd/70389257",
"https://test-idafd.biodiversity.org.au/name/afd/70389258",
"https://test-idafd.biodiversity.org.au/name/afd/70389259",
"https://test-idafd.biodiversity.org.au/name/afd/70389524",
"https://test-idafd.biodiversity.org.au/name/afd/70389533",
"https://test-idafd.biodiversity.org.au/name/afd/70393015",
"https://test-idafd.biodiversity.org.au/name/afd/70393185",
"https://test-idafd.biodiversity.org.au/name/afd/70393188",
"https://test-idafd.biodiversity.org.au/name/afd/70393533",
"https://test-idafd.biodiversity.org.au/name/afd/70393537",
"https://test-idafd.biodiversity.org.au/name/afd/70395745",
"https://test-idafd.biodiversity.org.au/name/afd/70397406",
"https://test-idafd.biodiversity.org.au/name/afd/70397409",
"https://test-idafd.biodiversity.org.au/name/afd/70401805",
"https://test-idafd.biodiversity.org.au/name/afd/70404092",
"https://test-idafd.biodiversity.org.au/name/afd/70404580",
"https://test-idafd.biodiversity.org.au/name/afd/70404975",
"https://test-idafd.biodiversity.org.au/name/afd/70404977",
"https://test-idafd.biodiversity.org.au/name/afd/70404979",
"https://test-idafd.biodiversity.org.au/name/afd/70404980",
"https://test-idafd.biodiversity.org.au/name/afd/70404981",
"https://test-idafd.biodiversity.org.au/name/afd/70404982",
"https://test-idafd.biodiversity.org.au/name/afd/70404984",
"https://test-idafd.biodiversity.org.au/name/afd/70404985",
"https://test-idafd.biodiversity.org.au/name/afd/70406086",
"https://test-idafd.biodiversity.org.au/name/afd/70407584",
"https://test-idafd.biodiversity.org.au/name/afd/70408085",
"https://test-idafd.biodiversity.org.au/name/afd/70408100",
"https://test-idafd.biodiversity.org.au/name/afd/70408131",
"https://test-idafd.biodiversity.org.au/name/afd/70408134",
"https://test-idafd.biodiversity.org.au/name/afd/70408595",
"https://test-idafd.biodiversity.org.au/name/afd/70408935",
"https://test-idafd.biodiversity.org.au/name/afd/70408956",
"https://test-idafd.biodiversity.org.au/name/afd/70409735",
"https://test-idafd.biodiversity.org.au/name/afd/70409767",
"https://test-idafd.biodiversity.org.au/name/afd/70411788",
"https://test-idafd.biodiversity.org.au/name/afd/70411799",
"https://test-idafd.biodiversity.org.au/name/afd/70412646",
"https://test-idafd.biodiversity.org.au/name/afd/70413908",
"https://test-idafd.biodiversity.org.au/name/afd/70417610",
"https://test-idafd.biodiversity.org.au/name/afd/70418247",
"https://test-idafd.biodiversity.org.au/name/afd/70418975",
"https://test-idafd.biodiversity.org.au/name/afd/70421266",
"https://test-idafd.biodiversity.org.au/name/afd/70421274",
"https://test-idafd.biodiversity.org.au/name/afd/70422141",
"https://test-idafd.biodiversity.org.au/name/afd/70422144",
"https://test-idafd.biodiversity.org.au/name/afd/70422212",
"https://test-idafd.biodiversity.org.au/name/afd/70422292",
"https://test-idafd.biodiversity.org.au/name/afd/70422369",
"https://test-idafd.biodiversity.org.au/name/afd/70422374",
"https://test-idafd.biodiversity.org.au/name/afd/70422561",
"https://test-idafd.biodiversity.org.au/name/afd/70423636",
"https://test-idafd.biodiversity.org.au/name/afd/70424033",
"https://test-idafd.biodiversity.org.au/name/afd/70424035",
"https://test-idafd.biodiversity.org.au/name/afd/70424571",
"https://test-idafd.biodiversity.org.au/name/afd/70425463",
"https://test-idafd.biodiversity.org.au/name/afd/70427304",
"https://test-idafd.biodiversity.org.au/name/afd/70427979",
"https://test-idafd.biodiversity.org.au/name/afd/70427980",
"https://test-idafd.biodiversity.org.au/name/afd/70428447",
"https://test-idafd.biodiversity.org.au/name/afd/70430090",
"https://test-idafd.biodiversity.org.au/name/afd/70430093",
"https://test-idafd.biodiversity.org.au/name/afd/70430554",
"https://test-idafd.biodiversity.org.au/name/afd/70432923",
"https://test-idafd.biodiversity.org.au/name/afd/70433252",
"https://test-idafd.biodiversity.org.au/name/afd/70433493",
"https://test-idafd.biodiversity.org.au/name/afd/70433509",
"https://test-idafd.biodiversity.org.au/name/afd/70433515",
"https://test-idafd.biodiversity.org.au/name/afd/70433519",
"https://test-idafd.biodiversity.org.au/name/afd/70434370",
"https://test-idafd.biodiversity.org.au/name/afd/70437941",
"https://test-idafd.biodiversity.org.au/name/afd/70438048",
"https://test-idafd.biodiversity.org.au/name/afd/70440034",
"https://test-idafd.biodiversity.org.au/name/afd/70440498",
"https://test-idafd.biodiversity.org.au/name/afd/70444906",
"https://test-idafd.biodiversity.org.au/name/afd/70445431",
"https://test-idafd.biodiversity.org.au/name/afd/70448887",
"https://test-idafd.biodiversity.org.au/name/afd/70449358",
"https://test-idafd.biodiversity.org.au/name/afd/70449359",
"https://test-idafd.biodiversity.org.au/name/afd/70449361",
"https://test-idafd.biodiversity.org.au/name/afd/70449558",
"https://test-idafd.biodiversity.org.au/name/afd/70449562",
"https://test-idafd.biodiversity.org.au/name/afd/70451548",
"https://test-idafd.biodiversity.org.au/name/afd/70451556",
"https://test-idafd.biodiversity.org.au/name/afd/70456220",
"https://test-idafd.biodiversity.org.au/name/afd/70456222",
"https://test-idafd.biodiversity.org.au/name/afd/70456224",
"https://test-idafd.biodiversity.org.au/name/afd/70456226",
"https://test-idafd.biodiversity.org.au/name/afd/70456227",
"https://test-idafd.biodiversity.org.au/name/afd/70456228",
"https://test-idafd.biodiversity.org.au/name/afd/70456237",
"https://test-idafd.biodiversity.org.au/name/afd/70457940",
"https://test-idafd.biodiversity.org.au/name/afd/70463103",
"https://test-idafd.biodiversity.org.au/name/afd/70464994",
"https://test-idafd.biodiversity.org.au/name/afd/70465807",
"https://test-idafd.biodiversity.org.au/name/afd/70465833",
"https://test-idafd.biodiversity.org.au/name/afd/70465843",
"https://test-idafd.biodiversity.org.au/name/afd/70466502",
"https://test-idafd.biodiversity.org.au/name/afd/70467686",
"https://test-idafd.biodiversity.org.au/name/afd/70467687",
"https://test-idafd.biodiversity.org.au/name/afd/70467688",
"https://test-idafd.biodiversity.org.au/name/afd/70468229",
"https://test-idafd.biodiversity.org.au/name/afd/70496191",
] |
from collections import defaultdict
import xmltodict
begin_year = 2008
end_year = 2010
user_badges = defaultdict(set)
with open('Badges.xml') as xml_file:
for element in xml_file:
if element.strip().startswith('<row'):
row = xmltodict.parse(element)['row']
if begin_year <= int(row['@Date'].split('-')[0]) <= end_year:
user_badges[row['@UserId']].add(row['@Name'])
with open('badges.csv', 'w') as output:
for user, badges in user_badges.items():
output.write(user + ',' + ','.join(badges) + '\n')
|
import os
import itertools
import logging as L
import numpy as np
from perf_compare import execute
L.basicConfig(format='%(levelname)s:%(message)s', level=L.DEBUG)
class Autotune():
def __init__(self, template_list, key_values, cmd):
"""
template_list: ['GroupCOOSparseMatrix.h.t', 'cnnBench2.cu.t']
key_values: {'$COLUMN_PER_GROUP$': [2, 4, 8],
'$BS$': [32, 64]}
"""
self.template_list = template_list
self.key_values = key_values
self.cmd = cmd
def _compile(self):
L.info('Compiling ...')
os.system('./make.sh')
def _gen_unrolling_src(self, cpg):
template = {}
c = ['float c%d =0.0; '%i for i in range(cpg)]
c_definition = ''.join(c)
template['$c_definition$'] = c_definition
c_unroll_write = []
for i in range(cpg):
if i == 0:
s = 'if (index == 0) c0 += a*b; '
else:
s = 'else if (index == %d) c%d += a*b; '%(i, i)
c_unroll_write.append(s)
template['$c_unroll_write$'] = ''.join(c_unroll_write);
c_unroll_write_to_C = []
for i in range(cpg):
s = 'if (Cj0+%d < wB) C[Ci * wB + Cj0 + %d] = c%d; ' % (i, i, i)
c_unroll_write_to_C.append(s)
template['$c_unroll_write_to_C$'] = ''.join(c_unroll_write_to_C);
return template
def _replace_src(self, kv):
L.info('Generate source codes with configured values ...')
for template in self.template_list:
with open(template, 'r') as f:
content = f.read()
#print content
cpg = int(kv['$COLUMN_PER_GROUP$'])
unrolling_src = self._gen_unrolling_src(cpg)
kv.update(unrolling_src)
for k in kv:
v = kv[k]
content = content.replace(k, str(v))
new_filename = template[0:-2]
with open(new_filename, 'w') as newf:
newf.write(content)
def run(self):
keys = self.key_values.keys()
all_values = [self.key_values[k] for k in keys]
experiments = list(itertools.product(*all_values))
exps = []
for e in experiments:
ed = {}
for i, v in enumerate(e):
ed[keys[i]] = v
exps.append(ed)
results = []
for ed in exps:
self._replace_src(ed)
self._compile()
#os.system(self.cmd)
try:
ms = execute(self.cmd)
except:
ms = 10000000.0
results.append(ms)
min = np.min(np.array(results))
minidx = np.argmin(np.array(results))
L.info('exps: {}'.format(exps))
L.info('results: {}'.format(results))
with open('result.log', 'a+') as f:
f.write('%s\n%s: %f\n'%(self.cmd, exps[minidx], min))
if __name__ == '__main__':
template_list = ['constants.h.t', 'group_spgemm_kernels.cu.t']
#key_values = {'$COLUMN_PER_GROUP$': [1, 2, 4, 8, 16, 32],
# '$BS$': [32, 64, 128, 256, 512]}
#key_values = {'$COLUMN_PER_GROUP$': [4],
# '$BS$': [32, 64, 128, 256, 512, 1024]}
key_values = {'$COLUMN_PER_GROUP$': [4],
'$BS$': [128]}
with open('bc.conf', 'r') as f:
ls = f.readlines()
for l in ls:
cmd = l[0:-1]
print cmd
at = Autotune(template_list, key_values, cmd)
at.run()
|
A= input('Digite a primeira nota:')
B= input('Digite a segunda nota:')
C= input('Digite a terceira nota:')
D= input('Digite a quarta nota:')
MA= (int(A)+int(B)+int(C)+int(D))/4
print ('A média aritmética é ',MA) |
import time
from CreeDictionary.utils.profiling import timed
def test_timed_decorator(capsys):
@timed(msg="{func_name} finished in {second:.1f} seconds")
def quick_nap():
time.sleep(0.1)
quick_nap()
out, err = capsys.readouterr()
assert "quick_nap finished in 0.1 seconds\n" == out
|
preço = float(input('Qual é o preço do produto RS'))
desconto = preço - (preço * 5 / 100)
print('O produto que custava {:.2f} na promoção com desconto de 5% vai custar {:.2f} RS'.format(preço, desconto, )) |
# Executable to append voter-codes to voter records
# Import external modules
import argparse
import random
# Import app modules
import security
# Handle command-line arguments.
parser = argparse.ArgumentParser()
parser.add_argument( '--file', help='Path to file containing voter records as tab-separated-values', required=True )
args = parser.parse_args()
with open( args.file ) as inStream:
# Read headers, print headers
schemaLine = inStream.readline()
schema = schemaLine.strip('\n\r').split('\t')
print( '\t'.join( schema + ['code'] ) )
# For each record...
for line in inStream:
line = line.strip('\n\r')
# Print line plus code
code = security.newVoterCode()
print( line + '\t' + code )
|
from irods.session import iRODSSession
import ssl
import os
class TuRODSSession(iRODSSession):
def __init__(self, client_user=None):
host = os.environ.get('IRODS_HOST', '')
port = os.environ.get('IRODS_PORT', '')
user = os.environ.get('IRODS_USER', '')
password = os.environ.get('IRODS_PWD', '')
zone = os.environ.get('IRODS_ZONE', '')
context = ssl.create_default_context(
purpose=ssl.Purpose.SERVER_AUTH,
cafile="/etc/irods/crt-sara-tud.crt",
capath=None,
cadata=None)
ssl_settings = {
'client_server_negotiation': 'request_server_negotiation',
'client_server_policy': 'CS_NEG_REQUIRE',
'encryption_algorithm': 'AES-256-CBC',
'encryption_key_size': 32,
'encryption_num_hash_rounds': 16,
'encryption_salt_size': 8,
'ssl_context': context}
super(TuRODSSession, self).__init__(
host=host,
port=port,
user=user,
password=password,
zone=zone,
client_user=client_user,
**ssl_settings
)
|
import numpy as np
from nntoolbox.losses import PinballLoss
import torch
class TestPinball:
def test_pinball(self):
"""
Adopt from https://www.tensorflow.org/addons/api_docs/python/tfa/losses/PinballLoss
"""
target = torch.from_numpy(np.array([0., 0., 1., 1.]))
input = torch.from_numpy(np.array([1., 1., 1., 0.]))
loss = PinballLoss(tau=0.1)
assert abs(loss(input, target).item() - 0.475) < 1e-3 |
"""Tests for causal inference methods."""
import numpy as np
import pytest
import whynot as wn
from whynot.algorithms import ols, propensity_score_matching, propensity_weighted_ols
def generate_dataset(num_samples, num_features, true_ate=1.0, seed=1234):
"""Generate an observational dataset."""
np.random.seed(seed)
features = [0.25 * np.random.randn(num_samples, 1) for _ in range(num_features)]
covariates = np.concatenate(features, axis=1)
# Logisitic treatment probability
arg = np.sum(covariates, axis=1) + np.random.randn(num_samples)
prob = np.exp(arg) / (1.0 + np.exp(arg))
treatment = np.random.binomial(1, prob)
# Outcome is confounded by treatment
# ATE is true_ate since the covariates are zero-mean.
outcome = (np.sum(covariates, axis=1) + true_ate) * treatment
outcome += np.random.randn(num_samples)
return covariates, treatment.ravel(), outcome.ravel()
@pytest.mark.parametrize(
"estimator",
[ols, propensity_score_matching, propensity_weighted_ols],
ids=["ols", "propensity_score_matching", "propensity_weighted_ols"],
)
def test_estimator(estimator):
"""Verify the estimator correctly computes treatment effects."""
num_samples = 5000
num_features = 3
true_ate = 8
covariates, treatment, outcome = generate_dataset(
num_samples=num_samples, num_features=num_features, true_ate=true_ate
)
result = propensity_score_matching.estimate_treatment_effect(
covariates, treatment, outcome
)
assert result.ci[0] <= result.ate <= result.ci[1]
assert result.ci[0] <= true_ate <= result.ci[1]
assert true_ate - 0.1 <= result.ate <= true_ate + 0.1
def test_causal_suite():
"""Integration test for the causal suite."""
num_samples = 200
num_features = 15
covariates, treatment, outcomes = generate_dataset(
num_samples=num_samples, num_features=num_features
)
result = wn.causal_suite(covariates, treatment, outcomes)
assert "ols" in result
assert "propensity_score_matching" in result
assert "propensity_weighted_ols" in result
|
import pandas as pd
import numpy as np
from PIL import Image,ImageDraw,ImageFont
from pandas.core.frame import DataFrame
import requests
import time
from requests.api import options
import streamlit as st
from PIL import Image
import requests
from PIL import Image
import requests
import base64
data = pd.read_csv("data1.csv")
data2 = data.drop(data.columns[0],axis=1)
data_b = pd.read_csv("data2.csv")
data2_b = data_b.drop(data_b.columns[0],axis=1)
url_cam = "https://www.bekokibris.com/wp-content/uploads/2020/04/BK9102EYS1.jpg"
url_bul = "https://statics.vestel.com.tr/productimages/20264045_r1_900_1254.jpg"
url_buzdo = "https://cdn.akakce.com/samsung/samsung-rb50rs334sa-a-kombi-no-frost-x.jpg"
im = Image.open(requests.get(url_cam, stream=True).raw)
#im = im.resize((500,500))
im2 = Image.open(requests.get(url_bul, stream=True).raw)
#im2 = im2.resize((500,500))
im3 = Image.open(requests.get(url_buzdo, stream=True).raw)
#im3 = im3.resize((500,500))
st.set_page_config(page_title='Customer Recommendation Project', page_icon=':house_with_garden')
#Menü gizleme
st.markdown(""" <style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style> """, unsafe_allow_html=True)
#Tek sayfaya sığdırma
padding = 0
st.markdown(f""" <style>
.reportview-container .main .block-container{{
padding-top: {padding}rem;
padding-right: {padding}rem;
padding-left: {padding}rem;
padding-bottom: {padding}rem;
}} </style> """, unsafe_allow_html=True)
st.markdown(
"""
<style>
.reportview-container {
background: url("https://www.birbeymetal.com.tr/wp-content/uploads/2019/02/Savin-NY-Website-Background-Web.jpg")
}
.sidebar .sidebar-content {
background: url("https://www.birbeymetal.com.tr/wp-content/uploads/2019/02/Savin-NY-Website-Background-Web.jpg")
}
</style>
""",
unsafe_allow_html=True
)
options_m = [' ','Çamaşır Makinesi', 'Bulaşık Makinesi']
machine = st.sidebar.selectbox('Ne arıyorsunuz? 👉', options=options_m)
dil = ["TR", "EN"]
col1, col2, col3, col4, col5,col6,col7,col8,col9,col10,col11,col12 = st.columns([1,1,1,1,1,1,1,1,1,1,1,5])
with col12:
dil_secenek = st.radio("Language",dil)
st.write('<style>div.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True)
dataf = data2
dataf_b = data2_b
##ÇAMAŞIR
brand1 = ["Bosch", "Siemens","Samsung","Electrolux"]
brand2 = ["Arçelik", "Vestel","LG","Profilo","Beko"]
data4 = data2.copy()
for i in range(len(data2['brand'])):
if (data2['brand'][i] in brand1):
data4['brand'][i] = 3
elif (data2['brand'][i] in brand2):
data4['brand'][i] = 2
else:
data4['brand'][i] = 1
for i in range(len(data2['capacity'])):
if data2['capacity'][i] == "Yüksek Kapasite":
data4['capacity'][i] = 3
elif data2['capacity'][i] == "Orta Kapasite":
data4['capacity'][i] = 2
else:
data4['capacity'][i] = 1
for i in range(len(data2['cycle'])):
if data2['cycle'][i] == "Yüksek Devir":
data4['cycle'][i] = 3
elif data2['cycle'][i] == "Orta Devir":
data4['cycle'][i] = 2
else:
data4['cycle'][i] = 1
for i in range(len(data2['size'])):
if data2['size'][i] == "Standart üstü":
data4['size'][i] = 3
elif data2['size'][i] == "Standart Boyut":
data4['size'][i] = 2
else:
data4['size'][i] = 1
for i in range(len(data2['energy_usage'])):
if data2['energy_usage'][i] == "Çok önemli":
data4['energy_usage'][i] = 3
elif data2['energy_usage'][i] == "Önemli":
data4['energy_usage'][i] = 2
else:
data4['energy_usage'][i] = 1
for i in range(len(data2['blanket'])):
if data2['blanket'][i] == "VAR":
data4['blanket'][i] = 2
else:
data4['blanket'][i] = 1
for i in range(len(data2['wifi'])):
if data2['wifi'][i] == "VAR":
data4['wifi'][i] = 2
else:
data4['wifi'][i] = 1
for i in range(len(data2['load_sensor'])):
if data2['load_sensor'][i] == "VAR":
data4['load_sensor'][i] = 2
else:
data4['load_sensor'][i] = 1
for i in range(len(data2['delay'])):
if data2['delay'][i] == "VAR":
data4['delay'][i] = 2
else:
data4['delay'][i] = 1
for i in range(len(data2['control_panel'])):
if data2['control_panel'][i] == "VAR":
data4['control_panel'][i] = 2
else:
data4['control_panel'][i] = 1
for i in range(len(data2['vapor'])):
if data2['vapor'][i] == "VAR":
data4['vapor'][i] = 2
else:
data4['vapor'][i] = 1
for i in range(len(data2['vapor'])):
if data2['vapor'][i] == "VAR":
data4['vapor'][i] = 2
else:
data4['vapor'][i] = 1
for i in range(len(data2['anti_alergy'])):
if data2['anti_alergy'][i] == "VAR":
data4['anti_alergy'][i] = 2
else:
data4['anti_alergy'][i] = 1
for i in range(len(data2['baby_p'])):
if data2['baby_p'][i] == "VAR":
data4['baby_p'][i] = 2
else:
data4['baby_p'][i] = 1
for i in range(len(data2['sensitive_p'])):
if data2['sensitive_p'][i] == "VAR":
data4['sensitive_p'][i] = 2
else:
data4['sensitive_p'][i] = 1
for i in range(len(data2['child_lock'])):
if data2['child_lock'][i] == "VAR":
data4['child_lock'][i] = 2
else:
data4['child_lock'][i] = 1
puan = 0
data4["puan"] = ""
for k in range(len(data2['full_name'])):
for j in data4.drop(["full_name","price","image","puan"],axis=1).columns:
if j != "child_lock":
puan = puan + data4[j][k]
else:
puan = puan + data4[j][k]
data4["puan"][k] = puan
puan = 0
dataf["puan"] = data4["puan"]
len_lst1 = []
len_lst2 = []
##BULAŞIK
brand1 = ["Bosch", "Siemens","Samsung","Electrolux"]
brand2 = ["Arçelik", "Vestel","LG","Profilo","Beko"]
data4_b = data2_b.copy()
for i in range(len(data2_b['brand'])):
if (data2_b['brand'][i] in brand1):
data4_b['brand'][i] = 3
elif (data2_b['brand'][i] in brand2):
data4_b['brand'][i] = 2
else:
data4_b['brand'][i] = 1
for i in range(len(data2_b['capacity'])):
if data2_b['capacity'][i] == "Yüksek Kapasite":
data4_b['capacity'][i] = 3
elif data2_b['capacity'][i] == "Orta Kapasite":
data4_b['capacity'][i] = 2
else:
data4_b['capacity'][i] = 1
for i in range(len(data2_b['type_'])):
if data2_b['type_'][i] == "Ankastre":
data4_b['type_'][i] = 3
elif data2_b['type_'][i] == "Solo":
data4_b['type_'][i] = 2
else:
data4_b['type_'][i] = 1
for i in range(len(data2_b['size'])):
if data2_b['size'][i] == "Standart üstü":
data4_b['size'][i] = 3
elif data2_b['size'][i] == "Standart Boyut":
data4_b['size'][i] = 2
else:
data4_b['size'][i] = 1
for i in range(len(data2_b['energy_usage'])):
if data2_b['energy_usage'][i] == "Çok önemli":
data4_b['energy_usage'][i] = 3
elif data2_b['energy_usage'][i] == "Önemli":
data4_b['energy_usage'][i] = 2
else:
data4_b['energy_usage'][i] = 1
for i in range(len(data2_b['wifi'])):
if data2_b['wifi'][i] == "VAR":
data4_b['wifi'][i] = 2
else:
data4_b['wifi'][i] = 1
for i in range(len(data2_b['control_panel'])):
if data2_b['control_panel'][i] == "VAR":
data4_b['control_panel'][i] = 2
else:
data4_b['control_panel'][i] = 1
for i in range(len(data2_b['box'])):
if data2_b['box'][i] == "Çekmeceli":
data4_b['box'][i] = 2
else:
data4_b['box'][i] = 1
for i in range(len(data2_b['number_of_program'])):
if data2_b['number_of_program'][i] == "9+":
data4_b['number_of_program'][i] = 3
elif data2_b['number_of_program'][i] == "5-8 Program":
data4_b['number_of_program'][i] = 2
else:
data4_b['number_of_program'][i] = 1
for i in range(len(data2_b['water_consumption'])):
if data2_b['water_consumption'][i] == "Düşük Tüketim":
data4_b['water_consumption'][i] = 3
elif data2_b['water_consumption'][i] == "Orta Tüketim":
data4_b['water_consumption'][i] = 2
else:
data4_b['water_consumption'][i] = 1
puan = 0
data4_b["puan"] = ""
for k in range(len(data2_b['full_name'])):
for j in data4_b.drop(["full_name","price","image","puan"],axis=1).columns:
if j != "water_consumption":
puan = puan + data4_b[j][k]
else:
puan = puan + data4_b[j][k]
data4_b["puan"][k] = puan
puan = 0
dataf_b["puan"] = data4_b["puan"]
dataf_b["puan"] = dataf_b.puan.astype(int)
dataf_b["price"] = dataf_b.price.astype(float)
if dil_secenek == "TR":
if machine ==" ":
col1, col2, col3, col4, col5,col6,col7,col8,col9,col10,col11,col12 = st.columns([1,1,1,1,1,1,1,1,1,1,1,5])
with col12:
if dil_secenek == "TR":
button = st.button("Beğen 👍")
if button:
st.write("Teşekkür ederiz 💗")
file1 = open("counter.txt","r")
count = file1.read()
count_int = count.replace("'","")
count_int = int(count_int) + 1
with open('counter.txt', 'w') as f:
f.write(str(count_int))
st.title("Proje hakkında")
st.markdown("<b><i>Tüketici Ürün Rehberi </i></b>, beyaz eşya ihtiyacı bulunan tüketicilerin, kendileri için en iyi ürünü seçmesine yardım etmeyi amaçlayan bir Python projesidir.", unsafe_allow_html=True)
st.markdown("İnsanlar, etkileşimde bulundukları e-ticaret web sitelerinin, kim olduklarını ve neyle ilgilendiklerini hatırlamalarını ve önceki etkinliklerine dayalı olarak yeni içerik ve ürünler ile kendi ihtiyaçlarına uygun önerilerde bulunulmasını bekler. Bu talepleri karşılayamayan herhangi bir uygulama veya web sitesi, kullanıcılarının hızla azaldığını görecektir.")
st.markdown("Tüketici Ürün Rehberi, belirli bir kullanıcının ihtiyaçlarına göre satın almak istediği eşyalar için öneriler oluşturmak amacı ile tasarlanmış bir yazılım aracıdır.")
st.markdown(" ")
st.title("Proje Geliştiricileri")
st.markdown(" ")
col1, col2, col3, col4, col5,col6,col7 = st.columns([1,1,1,1,1,1,1])
with col1:
st.markdown("<b><i>Mert Türkyılmaz</i></b>", unsafe_allow_html=True)
st.markdown("[](https://www.linkedin.com/in/mertturkyilmaz/)")
st.markdown("[](https://github.com/mertturkyilmaz)")
with col4:
st.markdown("<b><i>Sarper Yılmaz</i></b>", unsafe_allow_html=True)
st.markdown("[](https://www.linkedin.com/in/sarperyilmaz/)")
st.markdown("[](https://github.com/sarperyilmaz)")
with col7:
st.markdown("<b><i>Doğukan Doğru</i></b>", unsafe_allow_html=True)
st.markdown("[](https://www.linkedin.com/in/do%C4%9Fukando%C4%9Fru/)")
st.markdown("[](https://github.com/dogudogru)")
elif machine =="Çamaşır Makinesi":
with st.sidebar:
capacity_options = [' ','Düşük Kapasite','Orta Kapasite', 'Yüksek Kapasite']
capacity_help = '''Düşük kapasite: 0-6 KG , Orta Kapasite: 7-10 KG, Yüksek Kapasite: 10+ KG'''.strip()
capacity = st.sidebar.selectbox('Almak istediğiniz çamaşır makinesinin kapasitesi ne kadar olmalı?',options=capacity_options,help=capacity_help)
cycle_options = [' ',"Düşük Devir","Orta Devir","Yüksek Devir"]
cycle_help = '''Düşük devir: 1000'e kadar,
Orta devir: 1000 - 1200,
Yüksek Kapasite: 1200+'''.strip(",")
cycle = st.sidebar.selectbox('Almak istediğiniz çamaşır makinesinin devir sayısı ne olmalı?',options=cycle_options,help=cycle_help)
size_options = [' ',"Küçük boyut","Standart Boyut","Standard üstü"]
size = st.sidebar.selectbox('Almak istediğiniz çamaşır makinesinin büyüklüğü ne kadar olmalı?',options=size_options)
energy_usage_options = [' ','Çok önemli', 'Önemli', 'Az önemli', 'Önemsiz']
energy_usage_help = '''Çok Önemli: A+++ A++, Önemli : A+ A, Az Önemli: B C, Önemsiz: D E F G)'''.strip()
energy_usage = st.sidebar.selectbox('Almak istediğiniz çamaşır makinesinin tükettiği enerji miktarı sizin için önemli mi?',options=energy_usage_options,help=energy_usage_help)
soru_list = [capacity,cycle,size,energy_usage]
soru_list1 = ["capacity","cycle","size","energy_usage"]
soru_list2 = [capacity,cycle,size,energy_usage]
if all([i == " " for i in soru_list2]):
st.title('Bakalım sizin için nelerimiz var?')
col1, col2, col3, col4, col5 = st.columns([1,1,1,1,1])
data3 = data2.sample(frac=1).drop_duplicates(['brand']).sample(10).reset_index()
im1 = Image.open(requests.get(data3.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(data3.image[1], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(data3.image[2], stream=True).raw).resize((100,150))
im4 = Image.open(requests.get(data3.image[3], stream=True).raw).resize((100,150))
im5 = Image.open(requests.get(data3.image[4], stream=True).raw).resize((100,150))
im6 = Image.open(requests.get(data3.image[5], stream=True).raw).resize((100,150))
im7 = Image.open(requests.get(data3.image[6], stream=True).raw).resize((100,150))
im8 = Image.open(requests.get(data3.image[7], stream=True).raw).resize((100,150))
im9 = Image.open(requests.get(data3.image[8], stream=True).raw).resize((100,150))
im10 = Image.open(requests.get(data3.image[9], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
st.markdown(data3.brand[0])
b6 = st.image(im6, width=120)
st.markdown(data3.brand[5])
with col2:
b2 = st.image(im2, width=120)
st.markdown(data3.brand[1])
b7 = st.image(im7, width=120)
st.markdown(data3.brand[6])
with col3:
b3 = st.image(im3, width=120)
st.markdown(data3.brand[2])
b8 = st.image(im8, width=120)
st.markdown(data3.brand[7])
with col4:
b4 = st.image(im4, width=120)
st.markdown(data3.brand[3])
b9 = st.image(im9, width=120)
st.markdown(data3.brand[8])
with col5:
b5 = st.image(im5, width=120)
st.markdown(data3.brand[4])
b10 = st.image(im10, width=120)
st.markdown(data3.brand[9])
elif any([i != " " for i in soru_list2]):
for m in soru_list2:
if m == " ":
pass
else:
m_index = soru_list2.index(m)
len_lst1.append(soru_list1[m_index])
len_lst2.append(m)
for k in range(0,len(len_lst2)):
dataf = dataf[dataf[len_lst1[k]] == len_lst2[k]]
if len(dataf) == 0:
st.title("Seçilen Kriterlere Uygun Bir Ürün Bulunamadı")
elif len(dataf) == 1:
st.title("Seçilen Kriterlere Uygun Bir Ürün Bulundu")
dataf = dataf.reset_index()
im1 = Image.open(requests.get(dataf.image[0], stream=True).raw).resize((100,150))
b1 = st.image(im1, width=120)
st.title(dataf.brand[0])
st.title("Fiyat")
st.title(dataf.price[0])
elif len(dataf) == 2:
st.title("Seçilen Kriterlere Uygun İki Ürün Bulundu")
col1, col2 = st.columns([1,1])
dataf = dataf.reset_index()
im1 = Image.open(requests.get(dataf.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(dataf.image[1], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
st.title(dataf.brand[0])
st.title("Fiyat")
st.title(dataf.price[0])
with col2:
b2 = st.image(im2, width=120)
st.title(dataf.brand[1])
st.title("Fiyat")
st.title(dataf.price[1])
elif len(dataf) == 3:
st.title("Seçilen Kriterlere Uygun Üç Ürün Bulundu")
col1, col2, col3 = st.columns([1,1,1])
dataf = dataf.reset_index()
im1 = Image.open(requests.get(dataf.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(dataf.image[1], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(dataf.image[2], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
st.title(dataf.brand[0])
st.title("Fiyat")
st.title(dataf.price[0])
with col2:
b2 = st.image(im2, width=120)
st.title(dataf.brand[1])
st.title("Fiyat")
st.title(dataf.price[1])
with col3:
b3 = st.image(im3, width=120)
st.title(dataf.brand[2])
st.title("Fiyat")
st.title(dataf.price[2])
elif len(dataf) >3:
st.title("Seçilen Kriterlere En Uygun Ürünler")
ucuz = dataf.sort_values(by="price", ascending=True).reset_index()
fp1 = dataf[dataf["puan"] > dataf["puan"].quantile(0.25)].sort_values(by="puan", ascending=False).reset_index()
fp1 = fp1.drop(["index"],axis=1)
fp2 = fp1[fp1["price"] <dataf["price"].quantile(0.75)].sort_values(by="puan", ascending=False).reset_index()
fp2 = fp2.drop(["index"],axis=1)
fp3 = fp2.sort_values(by="puan", ascending=False).reset_index()
fp3 = fp3.drop(["index"],axis=1)
if len(fp3.puan) == 2:
col1, col2 = st.columns([1,1])
im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(fp3.image[1], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
b2 = st.image(im2, width=120)
b3 = st.image(im3, width=120)
with col2:
st.title("En Ucuz")
st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True)
st.markdown("Fiyat : " + str(ucuz.price[0]))
st.title("Fiyat Performans")
st.markdown("Ürün Adı : " + fp3.full_name[0] )
st.markdown("Fiyat : " + str(fp3.price[0]) )
st.title(" ")
st.title("Çok Satılan")
st.markdown("Ürün Adı : " + fp3.full_name[1] )
st.markdown("Fiyat : " + str(fp3.price[1]) )
elif len(fp3.puan) == 1:
col1, col2 = st.columns([1,1])
im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(fp1.image[0], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
b2 = st.image(im2, width=120)
b3 = st.image(im3, width=120)
with col2:
st.title("En Ucuz")
st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True)
st.markdown("Fiyat : " + str(ucuz.price[0]))
st.title("Fiyat Performans")
st.markdown("Ürün Adı : " + fp3.full_name[0] )
st.markdown("Fiyat : " + str(fp3.price[0]) )
st.title(" ")
st.title("Çok Satılan")
st.markdown("Ürün Adı : " + fp1.full_name[0] )
st.markdown("Fiyat : " + str(fp1.price[0]) )
elif len(fp3.puan) > 2:
col1, col2 = st.columns([1,1])
im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(fp1.image[0], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
b2 = st.image(im2, width=120)
b3 = st.image(im3, width=120)
with col2:
st.title("En Ucuz")
st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True)
st.markdown("Fiyat : " + str(ucuz.price[0]))
st.title("Fiyat Performans")
st.markdown("Ürün Adı : " + fp3.full_name[0] )
st.markdown("Fiyat : " + str(fp3.price[0]) )
st.title(" ")
st.title("Çok Satılan")
st.markdown("Ürün Adı : " + fp3.full_name[1] )
st.markdown("Fiyat : " + str(fp3.price[1]) )
elif machine =="Bulaşık Makinesi":
capacity_options = [' ','Düşük Kapasite','Orta Kapasite', 'Yüksek Kapasite']
capacity_help = '''Düşük kapasite: 12 Kişilik ve Altı , Orta Kapasite: 13 Kişilik, Yüksek Kapasite: 14 Kişilik ve Üstü'''.strip()
capacity = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin kapasitesi ne kadar olmalı?',options=capacity_options,help=capacity_help)
type_options = [' ',"Solo","Ankastre"]
type_help = '''Kullanım Tipi'''.strip(",")
type_ = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin kullanım tipi nasıl olmalı?',options=type_options,help=type_help)
size_options = [' ',"Küçük boyut","Standart Boyut","Standard üstü"]
size = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin büyüklüğü ne kadar olmalı?',options=size_options)
energy_usage_options = [' ','Çok önemli', 'Önemli', 'Az önemli', 'Önemsiz']
energy_usage_help = '''Çok Önemli: A+++ A++, Önemli : A+ A, Az Önemli: B C, Önemsiz: D E F G)'''.strip()
energy_usage = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin tükettiği enerji miktarı sizin için önemli mi?',options=energy_usage_options,help=energy_usage_help)
box_options = [' ',"Sepetli","Çekmeceli"]
box_help = '''Çatal Kaşık Bölmesi Tipi'''.strip(",")
box = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin çatal kaşık bölmesi nasıl olmalı?',options=box_options,help=box_help)
soru_list = [capacity,type_,size,energy_usage,box]
soru_list1 = ["capacity","type_","size","energy_usage","box"]
soru_list2 = [capacity,type_,size,energy_usage,box]
if all([i == " " for i in soru_list2]):
st.title('Bakalım sizin için nelerimiz var?')
col1, col2, col3, col4, col5 = st.columns([1,1,1,1,1])
data2_b = data2_b[data2_b.image != "YOK"]
data3 = data2_b.sample(frac=1).drop_duplicates(['brand']).sample(10).reset_index()
im1 = Image.open(requests.get(data3.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(data3.image[1], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(data3.image[2], stream=True).raw).resize((100,150))
im4 = Image.open(requests.get(data3.image[3], stream=True).raw).resize((100,150))
im5 = Image.open(requests.get(data3.image[4], stream=True).raw).resize((100,150))
im6 = Image.open(requests.get(data3.image[5], stream=True).raw).resize((100,150))
im7 = Image.open(requests.get(data3.image[6], stream=True).raw).resize((100,150))
im8 = Image.open(requests.get(data3.image[7], stream=True).raw).resize((100,150))
im9 = Image.open(requests.get(data3.image[8], stream=True).raw).resize((100,150))
im10 = Image.open(requests.get(data3.image[9], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
st.markdown(data3.brand[0])
b6 = st.image(im6, width=120)
st.markdown(data3.brand[5])
with col2:
b2 = st.image(im2, width=120)
st.markdown(data3.brand[1])
b7 = st.image(im7, width=120)
st.markdown(data3.brand[6])
with col3:
b3 = st.image(im3, width=120)
st.markdown(data3.brand[2])
b8 = st.image(im8, width=120)
st.markdown(data3.brand[7])
with col4:
b4 = st.image(im4, width=120)
st.markdown(data3.brand[3])
b9 = st.image(im9, width=120)
st.markdown(data3.brand[8])
with col5:
b5 = st.image(im5, width=120)
st.markdown(data3.brand[4])
b10 = st.image(im10, width=120)
st.markdown(data3.brand[9])
elif any([i != " " for i in soru_list2]):
for m in soru_list2:
if m == " ":
pass
else:
m_index = soru_list2.index(m)
len_lst1.append(soru_list1[m_index])
len_lst2.append(m)
for k in range(0,len(len_lst2)):
dataf_b = dataf_b[dataf_b[len_lst1[k]] == len_lst2[k]]
if len(dataf_b) == 0:
st.title("Seçilen Kriterlere Uygun Bir Ürün Bulunamadı")
elif len(dataf_b) == 1:
st.title("Seçilen Kriterlere Uygun Bir Ürün Bulundu")
dataf_b = dataf_b.reset_index()
im1 = Image.open(requests.get(dataf_b.image[0], stream=True).raw).resize((100,150))
b1 = st.image(im1, width=120)
st.title(dataf_b.brand[0])
st.title("Fiyat")
st.title(dataf_b.price[0])
elif len(dataf_b) == 2:
st.title("Seçilen Kriterlere Uygun İki Ürün Bulundu")
col1, col2 = st.columns([1,1])
dataf_b = dataf_b.reset_index()
im1 = Image.open(requests.get(dataf_b.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(dataf_b.image[1], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
st.title(dataf_b.brand[0])
st.title("Fiyat")
st.title(dataf_b.price[0])
with col2:
b2 = st.image(im2, width=120)
st.title(dataf_b.brand[1])
st.title("Fiyat")
st.title(dataf_b.price[1])
elif len(dataf_b) == 3:
st.title("Seçilen Kriterlere Uygun Üç Ürün Bulundu")
col1, col2, col3 = st.columns([1,1,1])
dataf_b = dataf_b.reset_index()
im1 = Image.open(requests.get(dataf_b.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(dataf_b.image[1], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(dataf_b.image[2], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
st.title(dataf_b.brand[0])
st.title("Fiyat")
st.title(dataf_b.price[0])
with col2:
b2 = st.image(im2, width=120)
st.title(dataf_b.brand[1])
st.title("Fiyat")
st.title(dataf_b.price[1])
with col3:
b3 = st.image(im3, width=120)
st.title(dataf_b.brand[2])
st.title("Fiyat")
st.title(dataf_b.price[2])
elif len(dataf_b) >3:
st.title("Seçilen Kriterlere En Uygun Ürünler")
ucuz = dataf_b.sort_values(by="price", ascending=True).reset_index()
fp1 = dataf_b[dataf_b["puan"] > dataf_b["puan"].quantile(0.25)].sort_values(by="puan", ascending=False).reset_index()
fp1 = fp1.drop(["index"],axis=1)
fp2 = fp1[fp1["price"] <dataf_b["price"].quantile(0.75)].sort_values(by="puan", ascending=False).reset_index()
fp2 = fp2.drop(["index"],axis=1)
fp3 = fp2.sort_values(by="puan", ascending=False).reset_index()
fp3 = fp3.drop(["index"],axis=1)
if len(fp3.puan) == 2:
col1, col2 = st.columns([1,1])
im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(fp3.image[1], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
b2 = st.image(im2, width=120)
b3 = st.image(im3, width=120)
with col2:
st.title("En Ucuz")
st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True)
st.markdown("Fiyat : " + str(ucuz.price[0]))
st.title("Fiyat Performans")
st.markdown("Ürün Adı : " + fp3.full_name[0] )
st.markdown("Fiyat : " + str(fp3.price[0]) )
st.title(" ")
st.title("Çok Satılan")
st.markdown("Ürün Adı : " + fp3.full_name[1] )
st.markdown("Fiyat : " + str(fp3.price[1]) )
elif len(fp3.puan) == 1:
col1, col2 = st.columns([1,1])
im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(fp1.image[0], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
b2 = st.image(im2, width=120)
b3 = st.image(im3, width=120)
with col2:
st.title("En Ucuz")
st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True)
st.markdown("Fiyat : " + str(ucuz.price[0]))
st.title("Fiyat Performans")
st.markdown("Ürün Adı : " + fp3.full_name[0] )
st.markdown("Fiyat : " + str(fp3.price[0]) )
st.title(" ")
st.title("Çok Satılan")
st.markdown("Ürün Adı : " + fp1.full_name[0] )
st.markdown("Fiyat : " + str(fp1.price[0]) )
elif len(fp3.puan) > 2:
col1, col2 = st.columns([1,1])
im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(fp1.image[0], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
b2 = st.image(im2, width=120)
b3 = st.image(im3, width=120)
with col2:
st.title("En Ucuz")
st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True)
st.markdown("Fiyat : " + str(ucuz.price[0]))
st.title("Fiyat Performans")
st.markdown("Ürün Adı : " + fp3.full_name[0] )
st.markdown("Fiyat : " + str(fp3.price[0]) )
st.title(" ")
st.title("Çok Satılan")
st.markdown("Ürün Adı : " + fp3.full_name[1] )
st.markdown("Fiyat : " + str(fp3.price[1]) )
if dil_secenek == "EN":
if machine ==" ":
col1, col2, col3, col4, col5,col6,col7,col8,col9,col10,col11,col12 = st.columns([1,1,1,1,1,1,1,1,1,1,1,5])
with col12:
if dil_secenek == "EN":
button = st.button("Like 👍")
if button:
st.write("Appreciated 💗")
file1 = open("counter.txt","r")
count = file1.read()
count_int = count.replace("'","")
count_int = int(count_int) + 1
with open('counter.txt', 'w') as f:
f.write(str(count_int))
st.title("About")
st.markdown("With <b><i> Customer Recommendation Project</i></b>, we aim to help consumers choose best white goods for them.", unsafe_allow_html=True)
st.markdown("People expect the e-commerce websites they engage with to remember who they are and what they’re interested in, and make relevant, individualized, and accurate recommendations for new content and new products based on their previous activities. Any app or website that fails to deliver on these demands will quickly see its users flocking out the digital door.")
st.markdown("Customer recommendation system is a software tool designed to generate and provide suggestions for items or content a specific user would like to purchase or engage with based on their needs.")
st.markdown(" ")
st.title("Project Developers")
st.markdown(" ")
col1, col2, col3, col4, col5,col6,col7 = st.columns([1,1,1,1,1,1,1])
with col1:
st.markdown("<b><i>Mert Türkyılmaz</i></b>", unsafe_allow_html=True)
st.markdown("[](https://www.linkedin.com/in/mertturkyilmaz/)")
st.markdown("[](https://github.com/mertturkyilmaz)")
with col4:
st.markdown("<b><i>Sarper Yılmaz</i></b>", unsafe_allow_html=True)
st.markdown("[](https://www.linkedin.com/in/sarperyilmaz/)")
st.markdown("[](https://github.com/sarperyilmaz)")
with col7:
st.markdown("<b><i>Doğukan Doğru</i></b>", unsafe_allow_html=True)
st.markdown("[](https://www.linkedin.com/in/do%C4%9Fukando%C4%9Fru/)")
st.markdown("[](https://github.com/dogudogru)")
elif machine =="Çamaşır Makinesi":
with st.sidebar:
capacity_options = [' ','Düşük Kapasite','Orta Kapasite', 'Yüksek Kapasite']
capacity_help = '''Düşük kapasite: 0-6 KG , Orta Kapasite: 7-10 KG, Yüksek Kapasite: 10+ KG'''.strip()
capacity = st.sidebar.selectbox('Almak istediğiniz çamaşır makinesinin kapasitesi ne kadar olmalı?',options=capacity_options,help=capacity_help)
cycle_options = [' ',"Düşük Devir","Orta Devir","Yüksek Devir"]
cycle_help = '''Düşük devir: 1000'e kadar,
Orta devir: 1000 - 1200,
Yüksek Kapasite: 1200+'''.strip(",")
cycle = st.sidebar.selectbox('Almak istediğiniz çamaşır makinesinin devir sayısı ne olmalı?',options=cycle_options,help=cycle_help)
size_options = [' ',"Küçük boyut","Standart Boyut","Standard üstü"]
size = st.sidebar.selectbox('Almak istediğiniz çamaşır makinesinin büyüklüğü ne kadar olmalı?',options=size_options)
energy_usage_options = [' ','Çok önemli', 'Önemli', 'Az önemli', 'Önemsiz']
energy_usage_help = '''Çok Önemli: A+++ A++, Önemli : A+ A, Az Önemli: B C, Önemsiz: D E F G)'''.strip()
energy_usage = st.sidebar.selectbox('Almak istediğiniz çamaşır makinesinin tükettiği enerji miktarı sizin için önemli mi?',options=energy_usage_options,help=energy_usage_help)
soru_list = [capacity,cycle,size,energy_usage]
soru_list1 = ["capacity","cycle","size","energy_usage"]
soru_list2 = [capacity,cycle,size,energy_usage]
if all([i == " " for i in soru_list2]):
st.title('Bakalım sizin için nelerimiz var?')
col1, col2, col3, col4, col5 = st.columns([1,1,1,1,1])
data3 = data2.sample(frac=1).drop_duplicates(['brand']).sample(10).reset_index()
im1 = Image.open(requests.get(data3.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(data3.image[1], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(data3.image[2], stream=True).raw).resize((100,150))
im4 = Image.open(requests.get(data3.image[3], stream=True).raw).resize((100,150))
im5 = Image.open(requests.get(data3.image[4], stream=True).raw).resize((100,150))
im6 = Image.open(requests.get(data3.image[5], stream=True).raw).resize((100,150))
im7 = Image.open(requests.get(data3.image[6], stream=True).raw).resize((100,150))
im8 = Image.open(requests.get(data3.image[7], stream=True).raw).resize((100,150))
im9 = Image.open(requests.get(data3.image[8], stream=True).raw).resize((100,150))
im10 = Image.open(requests.get(data3.image[9], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
st.markdown(data3.brand[0])
b6 = st.image(im6, width=120)
st.markdown(data3.brand[5])
with col2:
b2 = st.image(im2, width=120)
st.markdown(data3.brand[1])
b7 = st.image(im7, width=120)
st.markdown(data3.brand[6])
with col3:
b3 = st.image(im3, width=120)
st.markdown(data3.brand[2])
b8 = st.image(im8, width=120)
st.markdown(data3.brand[7])
with col4:
b4 = st.image(im4, width=120)
st.markdown(data3.brand[3])
b9 = st.image(im9, width=120)
st.markdown(data3.brand[8])
with col5:
b5 = st.image(im5, width=120)
st.markdown(data3.brand[4])
b10 = st.image(im10, width=120)
st.markdown(data3.brand[9])
elif any([i != " " for i in soru_list2]):
for m in soru_list2:
if m == " ":
pass
else:
m_index = soru_list2.index(m)
len_lst1.append(soru_list1[m_index])
len_lst2.append(m)
for k in range(0,len(len_lst2)):
dataf = dataf[dataf[len_lst1[k]] == len_lst2[k]]
if len(dataf) == 0:
st.title("Seçilen Kriterlere Uygun Bir Ürün Bulunamadı")
elif len(dataf) == 1:
st.title("Seçilen Kriterlere Uygun Bir Ürün Bulundu")
dataf = dataf.reset_index()
im1 = Image.open(requests.get(dataf.image[0], stream=True).raw).resize((100,150))
b1 = st.image(im1, width=120)
st.title(dataf.brand[0])
st.title("Fiyat")
st.title(dataf.price[0])
elif len(dataf) == 2:
st.title("Seçilen Kriterlere Uygun İki Ürün Bulundu")
col1, col2 = st.columns([1,1])
dataf = dataf.reset_index()
im1 = Image.open(requests.get(dataf.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(dataf.image[1], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
st.title(dataf.brand[0])
st.title("Fiyat")
st.title(dataf.price[0])
with col2:
b2 = st.image(im2, width=120)
st.title(dataf.brand[1])
st.title("Fiyat")
st.title(dataf.price[1])
elif len(dataf) == 3:
st.title("Seçilen Kriterlere Uygun Üç Ürün Bulundu")
col1, col2, col3 = st.columns([1,1,1])
im1 = Image.open(requests.get(dataf.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(dataf.image[1], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(dataf.image[2], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
st.title(dataf.brand[0])
st.title("Fiyat")
st.title(dataf.price[0])
with col2:
b2 = st.image(im2, width=120)
st.title(dataf.brand[1])
st.title("Fiyat")
st.title(dataf.price[1])
with col3:
b3 = st.image(im3, width=120)
st.title(dataf.brand[2])
st.title("Fiyat")
st.title(dataf.price[2])
elif len(dataf) >3:
st.title("Seçilen Kriterlere En Uygun Ürünler")
ucuz = dataf.sort_values(by="price", ascending=True).reset_index()
fp1 = dataf[dataf["puan"] > dataf["puan"].quantile(0.25)].sort_values(by="puan", ascending=False).reset_index()
fp1 = fp1.drop(["index"],axis=1)
fp2 = fp1[fp1["price"] <dataf["price"].quantile(0.75)].sort_values(by="puan", ascending=False).reset_index()
fp2 = fp2.drop(["index"],axis=1)
fp3 = fp2.sort_values(by="puan", ascending=False).reset_index()
fp3 = fp3.drop(["index"],axis=1)
if len(fp3.puan) == 2:
col1, col2 = st.columns([1,1])
im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(fp3.image[1], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
b2 = st.image(im2, width=120)
b3 = st.image(im3, width=120)
with col2:
st.title("En Ucuz")
st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True)
st.markdown("Fiyat : " + str(ucuz.price[0]))
st.title("Fiyat Performans")
st.markdown("Ürün Adı : " + fp3.full_name[0] )
st.markdown("Fiyat : " + str(fp3.price[0]) )
st.title(" ")
st.title("Çok Satılan")
st.markdown("Ürün Adı : " + fp3.full_name[1] )
st.markdown("Fiyat : " + str(fp3.price[1]) )
elif len(fp3.puan) == 1:
col1, col2 = st.columns([1,1])
im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(fp1.image[0], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
b2 = st.image(im2, width=120)
b3 = st.image(im3, width=120)
with col2:
st.title("En Ucuz")
st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True)
st.markdown("Fiyat : " + str(ucuz.price[0]))
st.title("Fiyat Performans")
st.markdown("Ürün Adı : " + fp3.full_name[0] )
st.markdown("Fiyat : " + str(fp3.price[0]) )
st.title(" ")
st.title("Çok Satılan")
st.markdown("Ürün Adı : " + fp1.full_name[0] )
st.markdown("Fiyat : " + str(fp1.price[0]) )
elif len(fp3.puan) > 2:
col1, col2 = st.columns([1,1])
im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(fp1.image[0], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
b2 = st.image(im2, width=120)
b3 = st.image(im3, width=120)
with col2:
st.title("En Ucuz")
st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True)
st.markdown("Fiyat : " + str(ucuz.price[0]))
st.title("Fiyat Performans")
st.markdown("Ürün Adı : " + fp3.full_name[0] )
st.markdown("Fiyat : " + str(fp3.price[0]) )
st.title(" ")
st.title("Çok Satılan")
st.markdown("Ürün Adı : " + fp3.full_name[1] )
st.markdown("Fiyat : " + str(fp3.price[1]) )
elif machine =="Bulaşık Makinesi":
capacity_options = [' ','Düşük Kapasite','Orta Kapasite', 'Yüksek Kapasite']
capacity_help = '''Düşük kapasite: 12 Kişilik ve Altı , Orta Kapasite: 13 Kişilik, Yüksek Kapasite: 14 Kişilik ve Üstü'''.strip()
capacity = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin kapasitesi ne kadar olmalı?',options=capacity_options,help=capacity_help)
type_options = [' ',"Solo","Ankastre"]
type_help = '''Kullanım Tipi'''.strip(",")
type_ = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin kullanım tipi nasıl olmalı?',options=type_options,help=type_help)
size_options = [' ',"Küçük boyut","Standart Boyut","Standard üstü"]
size = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin büyüklüğü ne kadar olmalı?',options=size_options)
energy_usage_options = [' ','Çok önemli', 'Önemli', 'Az önemli', 'Önemsiz']
energy_usage_help = '''Çok Önemli: A+++ A++, Önemli : A+ A, Az Önemli: B C, Önemsiz: D E F G)'''.strip()
energy_usage = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin tükettiği enerji miktarı sizin için önemli mi?',options=energy_usage_options,help=energy_usage_help)
box_options = [' ',"Sepetli","Çekmeceli"]
box_help = '''Çatal Kaşık Bölmesi Tipi'''.strip(",")
box = st.sidebar.selectbox('Almak istediğiniz bulaşık makinesinin çatal kaşık bölmesi nasıl olmalı?',options=box_options,help=box_help)
soru_list = [capacity,type_,size,energy_usage,box]
soru_list1 = ["capacity","type_","size","energy_usage","box"]
soru_list2 = [capacity,type_,size,energy_usage,box]
if all([i == " " for i in soru_list2]):
st.title('Bakalım sizin için nelerimiz var?')
col1, col2, col3, col4, col5 = st.columns([1,1,1,1,1])
data2_b = data2_b[data2_b.image != "YOK"]
data3 = data2_b.sample(frac=1).drop_duplicates(['brand']).sample(10).reset_index()
im1 = Image.open(requests.get(data3.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(data3.image[1], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(data3.image[2], stream=True).raw).resize((100,150))
im4 = Image.open(requests.get(data3.image[3], stream=True).raw).resize((100,150))
im5 = Image.open(requests.get(data3.image[4], stream=True).raw).resize((100,150))
im6 = Image.open(requests.get(data3.image[5], stream=True).raw).resize((100,150))
im7 = Image.open(requests.get(data3.image[6], stream=True).raw).resize((100,150))
im8 = Image.open(requests.get(data3.image[7], stream=True).raw).resize((100,150))
im9 = Image.open(requests.get(data3.image[8], stream=True).raw).resize((100,150))
im10 = Image.open(requests.get(data3.image[9], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
st.markdown(data3.brand[0])
b6 = st.image(im6, width=120)
st.markdown(data3.brand[5])
with col2:
b2 = st.image(im2, width=120)
st.markdown(data3.brand[1])
b7 = st.image(im7, width=120)
st.markdown(data3.brand[6])
with col3:
b3 = st.image(im3, width=120)
st.markdown(data3.brand[2])
b8 = st.image(im8, width=120)
st.markdown(data3.brand[7])
with col4:
b4 = st.image(im4, width=120)
st.markdown(data3.brand[3])
b9 = st.image(im9, width=120)
st.markdown(data3.brand[8])
with col5:
b5 = st.image(im5, width=120)
st.markdown(data3.brand[4])
b10 = st.image(im10, width=120)
st.markdown(data3.brand[9])
elif any([i != " " for i in soru_list2]):
for m in soru_list2:
if m == " ":
pass
else:
m_index = soru_list2.index(m)
len_lst1.append(soru_list1[m_index])
len_lst2.append(m)
for k in range(0,len(len_lst2)):
dataf_b = dataf_b[dataf_b[len_lst1[k]] == len_lst2[k]]
if len(dataf_b) == 0:
st.title("Seçilen Kriterlere Uygun Bir Ürün Bulunamadı")
elif len(dataf_b) == 1:
st.title("Seçilen Kriterlere Uygun Bir Ürün Bulundu")
dataf_b = dataf_b.reset_index()
im1 = Image.open(requests.get(dataf_b.image[0], stream=True).raw).resize((100,150))
b1 = st.image(im1, width=120)
st.title(dataf_b.brand[0])
st.title("Fiyat")
st.title(dataf_b.price[0])
elif len(dataf_b) == 2:
st.title("Seçilen Kriterlere Uygun İki Ürün Bulundu")
col1, col2 = st.columns([1,1])
dataf_b = dataf_b.reset_index()
im1 = Image.open(requests.get(dataf_b.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(dataf_b.image[1], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
st.title(dataf_b.brand[0])
st.title("Fiyat")
st.title(dataf_b.price[0])
with col2:
b2 = st.image(im2, width=120)
st.title(dataf_b.brand[1])
st.title("Fiyat")
st.title(dataf_b.price[1])
elif len(dataf_b) == 3:
st.title("Seçilen Kriterlere Uygun Üç Ürün Bulundu")
col1, col2, col3 = st.columns([1,1,1])
im1 = Image.open(requests.get(dataf_b.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(dataf_b.image[1], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(dataf_b.image[2], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
st.title(dataf_b.brand[0])
st.title("Fiyat")
st.title(dataf_b.price[0])
with col2:
b2 = st.image(im2, width=120)
st.title(dataf_b.brand[1])
st.title("Fiyat")
st.title(dataf_b.price[1])
with col3:
b3 = st.image(im3, width=120)
st.title(dataf_b.brand[2])
st.title("Fiyat")
st.title(dataf_b.price[2])
elif len(dataf_b) >3:
st.title("Seçilen Kriterlere En Uygun Ürünler")
ucuz = dataf_b.sort_values(by="price", ascending=True).reset_index()
fp1 = dataf_b[dataf_b["puan"] > dataf_b["puan"].quantile(0.25)].sort_values(by="puan", ascending=False).reset_index()
fp1 = fp1.drop(["index"],axis=1)
fp2 = fp1[fp1["price"] <dataf_b["price"].quantile(0.75)].sort_values(by="puan", ascending=False).reset_index()
fp2 = fp2.drop(["index"],axis=1)
fp3 = fp2.sort_values(by="puan", ascending=False).reset_index()
fp3 = fp3.drop(["index"],axis=1)
if len(fp3.puan) == 2:
col1, col2 = st.columns([1,1])
im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(fp3.image[1], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
b2 = st.image(im2, width=120)
b3 = st.image(im3, width=120)
with col2:
st.title("En Ucuz")
st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True)
st.markdown("Fiyat : " + str(ucuz.price[0]))
st.title("Fiyat Performans")
st.markdown("Ürün Adı : " + fp3.full_name[0] )
st.markdown("Fiyat : " + str(fp3.price[0]) )
st.title(" ")
st.title("Çok Satılan")
st.markdown("Ürün Adı : " + fp3.full_name[1] )
st.markdown("Fiyat : " + str(fp3.price[1]) )
elif len(fp3.puan) == 1:
col1, col2 = st.columns([1,1])
im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(fp1.image[0], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
b2 = st.image(im2, width=120)
b3 = st.image(im3, width=120)
with col2:
st.title("En Ucuz")
st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True)
st.markdown("Fiyat : " + str(ucuz.price[0]))
st.title("Fiyat Performans")
st.markdown("Ürün Adı : " + fp3.full_name[0] )
st.markdown("Fiyat : " + str(fp3.price[0]) )
st.title(" ")
st.title("Çok Satılan")
st.markdown("Ürün Adı : " + fp1.full_name[0] )
st.markdown("Fiyat : " + str(fp1.price[0]) )
elif len(fp3.puan) > 2:
col1, col2 = st.columns([1,1])
im1 = Image.open(requests.get(ucuz.image[0], stream=True).raw).resize((100,150))
im2 = Image.open(requests.get(fp3.image[0], stream=True).raw).resize((100,150))
im3 = Image.open(requests.get(fp1.image[0], stream=True).raw).resize((100,150))
with col1:
b1 = st.image(im1, width=120)
b2 = st.image(im2, width=120)
b3 = st.image(im3, width=120)
with col2:
st.title("En Ucuz")
st.markdown("Ürün Adı : " + ucuz.full_name[0], unsafe_allow_html=True)
st.markdown("Fiyat : " + str(ucuz.price[0]))
st.title("Fiyat Performans")
st.markdown("Ürün Adı : " + fp3.full_name[0] )
st.markdown("Fiyat : " + str(fp3.price[0]) )
st.title(" ")
st.title("Çok Satılan")
st.markdown("Ürün Adı : " + fp3.full_name[3] )
st.markdown("Fiyat : " + str(fp3.price[3]) )
|
import smart_imports
smart_imports.all()
E = 0.001
class RaceInfo(typing.NamedTuple):
race: rels.relations.Record
percents: float
optimal_percents: float
persons_percents: float
delta: float
class Races(object):
def __init__(self, races=None):
if races is None:
races = {race: 1.0 / len(game_relations.RACE.records) for race in game_relations.RACE.records}
self._races = races
def serialize(self):
return {race.value: percents for race, percents in self._races.items()}
@classmethod
def deserialize(cls, data):
return cls(races={game_relations.RACE(int(race_id)): percents for race_id, percents in data.items()})
def get_race_percents(self, race):
return self._races.get(race, 0)
def get_optimal_pressure(self, persons, demographics_pressure_modifires):
trends = {race: 0.0 for race in game_relations.RACE.records}
for person in persons:
pressure = person.attrs.demographics_pressure + demographics_pressure_modifires.get(person.race, 0)
delta = politic_power_storage.persons.total_power_fraction(person.id) * pressure
trends[person.race] += delta
# normalize trends
normalizer = sum(trends.values())
if not trends or normalizer < E:
return copy.copy(self._races)
return {race: float(power) / normalizer for race, power in trends.items()}
def get_next_races(self, persons, demographics_pressure_modifires):
trends = self.get_optimal_pressure(persons, demographics_pressure_modifires)
new_races = {race: max(0.0, percents + c.PLACE_RACE_CHANGE_DELTA * trends[race]) for race, percents in self._races.items()}
# normalize
normalizer = sum(new_races.values())
new_races = {race: percents / normalizer for race, percents in new_races.items()}
return new_races
def update(self, persons, demographics_pressure_modifires):
self._races = self.get_next_races(persons, demographics_pressure_modifires)
@property
def dominant_race(self):
if self._races:
return max(self._races.items(), key=lambda x: x[1])[0]
return None
def get_next_delta(self, persons, demographics_pressure_modifires):
next_races = self.get_next_races(persons, demographics_pressure_modifires)
return {race: next_races[race] - self._races[race] for race in game_relations.RACE.records}
def demographics(self, persons, demographics_pressure_modifires):
races = []
trends = self.get_optimal_pressure(persons, demographics_pressure_modifires)
next_delta = self.get_next_delta(persons, demographics_pressure_modifires)
persons_percents = map_logic.get_person_race_percents(persons)
for race in game_relations.RACE.records:
races.append(RaceInfo(race=race,
percents=self._races[race],
optimal_percents=trends[race],
delta=next_delta[race],
persons_percents=persons_percents[race.value]))
return sorted(races, key=lambda r: -r.percents)
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self._races == other.races)
def __ne__(self, other):
return not self.__eq__(other)
|
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility class for saving metrics in memory using dictionaries."""
import bisect
import collections
from typing import Any, Dict, Mapping, Optional
from tensorflow_federated.python.simulation import metrics_manager
class DictionaryMetricsManager(metrics_manager.MetricsManager):
"""A manager for keeping metrics in memory using an ordered dictionary.
Note that this class stores all metrics in memory, and may be prohibitively
expensive in large-scale simulations, especiall those storing large tensor
metrics.
"""
def __init__(self):
"""Returns an initialized `DictionaryMetricsManager`.
This class will maintain metrics in a dictionary held in memory, where the
keys will be integer round numbers, and the values are the metrics for the
given round.
"""
self._latest_round_num = None
self._metrics = collections.OrderedDict()
def save_metrics(self, metrics: Mapping[str, Any], round_num: int) -> None:
"""Updates the stored metrics data with metrics for a specific round.
Args:
metrics: A nested structure of metrics collected during `round_num`.
round_num: Integer round at which `metrics` was collected.
Raises:
ValueError: If `round_num` is negative.
ValueError: If `round_num` is less than or equal to the latest round
number used to save metrics.
"""
if not isinstance(round_num, int) or round_num < 0:
raise ValueError(
f'round_num must be a nonnegative integer, received {round_num}.')
if self._latest_round_num and round_num <= self._latest_round_num:
raise ValueError(f'Attempting to append metrics for round {round_num}, '
'but metrics already exist through round '
f'{self._latest_round_num}.')
self._metrics[round_num] = metrics
self._latest_round_num = round_num
def clear_metrics(self, round_num: int) -> None:
"""Clear out metrics at and after a given starting `round_num`.
Note that if `clear_metrics(round_num=0)` is called, all metrics are cleared
in a more performant manner. Rather than removing all keys associated to
round numbers after `round_num`, we simply clear the entire dictionary.
Args:
round_num: A nonnegative integer indicating the starting round number for
clearing metrics from the manager's associated dictionary.
Raises:
ValueError: If `round_num` is negative.
"""
if round_num < 0:
raise ValueError('Attempting to clear metrics after round '
f'{round_num}, which is negative.')
round_numbers = list(self._metrics.keys())
removal_index = bisect.bisect_left(round_numbers, round_num)
if removal_index == 0:
self._metrics.clear()
self._latest_round_num = None
else:
for x in round_numbers[removal_index:]:
del self._metrics[x]
self._latest_round_num = round_numbers[removal_index - 1]
@property
def metrics(self) -> Dict[int, Any]:
"""Retrieve the stored experiment metrics data for all rounds."""
return self._metrics.copy()
@property
def latest_round_num(self) -> Optional[int]:
"""The last round number passed to `save_metrics`.
If no metrics have been written, this will be `None`, otherwise it will
be a nonnegative integer.
"""
return self._latest_round_num
|
from gevent.hub import PYPY
if PYPY:
from gevent import corecffi as _core
else:
from gevent import corecext as _core
for item in dir(_core):
if item.startswith('__'):
continue
globals()[item] = getattr(_core, item)
__all__ = _core.__all__
|
"""
You are given a perfect binary tree where all leaves are on the same level, and every parent has two children. The binary tree has the following definition:
struct Node {
int val;
Node *left;
Node *right;
Node *next;
}
Populate each next pointer to point to its next right node. If there is no next right node, the next pointer should be set to NULL.
Initially, all next pointers are set to NULL.
Follow up:
You may only use constant extra space.
Recursive approach is fine, you may assume implicit stack space does not count as extra space for this problem.
Example 1:
Input: root = [1,2,3,4,5,6,7]
Output: [1,#,2,3,#,4,5,6,7,#]
Explanation: Given the above perfect binary tree, your function should populate each next pointer to point to its next right node.
The serialized output is in level order as connected by the next pointers, with '#' signifying the end of each level.
Constraints:
The number of nodes in the given tree is less than 4096.
-1000 <= node.val <= 1000
"""
"""
# Definition for a Node.
class Node(object):
def __init__(self, val=0, left=None, right=None, next=None):
self.val = val
self.left = left
self.right = right
self.next = next
"""
class Solution(object):
def connect(self, root):
"""
:type root: Node
:rtype: Node
"""
# 如果根节点为空,则直接返回
if not root:
return root
# 这个node相当于起到一个每层最左节点的作用(leftmost)
node = root
# 遍历每一层
while node.left:
tmp = node
# 更新node指针
node = node.left
# 在每一层中让tmp指针一直向右移动
while tmp.next:
# 左节点的next指针应该指向同个父亲节点下的右节点
tmp.left.next = tmp.right
# 右节点的next指针应该指向父亲节点的next节点的左节点
tmp.right.next = tmp.next.left
# 更新tmp指针
tmp = tmp.next
# 对于每一层的最后一个节点,只需要设置其左节点的next指针,而其右节点的next指针指向None,即为默认值
tmp.left.next = tmp.right
return root
"""
思路:使用已建立的next指针,考虑左右节点的两种不同情况,详见注释。算法时间复杂度为O(n),空间复杂度为O(1)。
"""
|
"""
.. automodule:: eumjeol_util
이 모듈은 Krcorpus에서 음절관련 기능을 담당하는 부분이다.
음절은 한국어에서 한글자를 의미하는 것으로 음절을 음소로 분해하거나
음소를 다시 음절을 합치는 등의 유틸리티 기능이 있다.
"""
import traceback
#: 종성없음
JONGSUNG_TYPE_NONE = 1
#: 종성이 유음(ㄹ)
JONGSUNG_TYPE_LIEUL = 2
#: 종성이 ㄹ을 제외한 받침
JONGSUNG_TYPE_COMMON = 3
_HANGUL_CODE_START = 44032
_HANGUL_CODE_END = 55199
_CHOSUNG = 19
_JUNGSUNG = 21
_JONGSUNG = 28
_CHOSUNG_LIST = [u'ㄱ', u'ㄲ', u'ㄴ', u'ㄷ', u'ㄸ', u'ㄹ', u'ㅁ', u'ㅂ', u'ㅃ', u'ㅅ',
u'ㅆ', u'ㅇ', u'ㅈ', u'ㅉ', u'ㅊ', u'ㅋ', u'ㅌ', u'ㅍ', u'ㅎ']
_JUNGSUNG_LIST = [u'ㅏ', u'ㅐ', u'ㅑ', u'ㅒ', u'ㅓ', u'ㅔ', u'ㅕ', u'ㅖ', u'ㅗ', u'ㅘ',
u'ㅙ', u'ㅚ', u'ㅛ', u'ㅜ', u'ㅝ', u'ㅞ', u'ㅟ', u'ㅠ', u'ㅡ', u'ㅢ',
u'ㅣ']
_JONGSUNG_LIST = [u'', u'ㄱ', u'ㄲ', u'ㄳ', u'ㄴ', u'ㄵ', u'ㄶ', u'ㄷ', u'ㄹ', u'ㄺ',
u'ㄻ', u'ㄼ', u'ㄽ', u'ㄾ', u'ㄿ', u'ㅀ', u'ㅁ', u'ㅂ', u'ㅄ', u'ㅅ',
u'ㅆ', u'ㅇ', u'ㅈ', u'ㅊ', u'ㅋ', u'ㅌ', u'ㅍ', u'ㅎ']
#: 양성모음
YANG_VOWEL = [u"ㅏ", u"ㅑ", u"ㅗ", u"ㅛ"]
def parse_eumjeol(eumjeol):
"""음절을 자소로 분리함
Args :
eumjeol (str) : 검사하려는 음절
Returns:
[ cho, jung, jong ]
cho : 초성
jung : 중성(모음)
jong : 종성(받침, 없으면 "")
ex) ["ㄱ", "ㅏ", "ㄴ"]
"""
if eumjeol in ["", " "]:
return [None, None, None]
eumjeol_int = ord(eumjeol)
if _HANGUL_CODE_START > eumjeol_int and _HANGUL_CODE_END < eumjeol_int:
return None
base = eumjeol_int - _HANGUL_CODE_START
cho = int(base / (_JUNGSUNG * _JONGSUNG))
temp = base % (_JUNGSUNG * _JONGSUNG)
jung = int(temp / _JONGSUNG)
jong = temp % _JONGSUNG
return [_CHOSUNG_LIST[cho], _JUNGSUNG_LIST[jung], _JONGSUNG_LIST[jong]]
def change_jaso(eumjeol, cho, jung, jong):
"""음절의 초성, 중성, 종성을 변경함.
Args :
eumjeol (str) : 변경하려는 음절
cho : 초성, 변경 필요 없으면 ""
jung : 중성(모음), 변경 필요 없으면 ""
jong : 종성(받침), 변경 필요 없으면 ""
Returns:
변경된 음절
Ex):
change_jaso("간", "ㅁ", None, None) => "만"
change_jaso("간", None, None, "") => "가"
"""
(org_cho, org_jung, org_jong) = parse_eumjeol(eumjeol)
if cho is not None:
org_cho = cho
if jung is not None:
org_jung = jung
if jong is not None:
org_jong = jong
return build_eumjeol(org_cho, org_jung, org_jong)
def get_jongsung_type(eumjeol):
"""음절에서 종성(받침)의 종류를 리턴함
Args :
eumjeol (str) : 확인하려는 음절
Returns:
| 종성의 종류
| JONGSUNG_TYPE_NONE : 종성이 없음
| JONGSUNG_TYPE_LIEUL : ㄹ
| JONGSUNG_TYPE_COMMON : ㄹ을 제외한 받침
Ex):
change_jaso("간", "ㅁ","","") => "만"
"""
jong = (parse_eumjeol(eumjeol))[2]
if jong == "":
return JONGSUNG_TYPE_NONE
elif jong == u"ㄹ":
return JONGSUNG_TYPE_LIEUL
return JONGSUNG_TYPE_COMMON
def has_jongsung(eumjeol):
jong = (parse_eumjeol(eumjeol))[2]
if jong == "":
return False
return JONGSUNG_TYPE_COMMON
def check_phoneme_restriction(eumjeol, phoneme):
# # 예외사항으로 음절이 None일 경우 True이다.
if eumjeol is None:
return True
[_, jung, jong] = parse_eumjeol(eumjeol)
"""
L ㄹ받침
VO 받침없음
FS ㄹ제외한받침
L|FS ㄹ포함모든받침
VO|L 받침없거나 ㄹ받침
NUL 제한없음
YANG1 모음이 ㅏ,ㅗ
YANG2 모음이 ㅏ,ㅑ,ㅗ
EUM1 모음이 ㅏ,ㅗ 제외
"""
# yangsung = jung in [u"ㅏ", u"ㅑ", u"ㅗ", u"ㅛ"] # pt 기준에 대한 양성모음
# 표준국어대사전 기준 양성모음 ㅏ,ㅗ,ㅑ,ㅛ,ㅘ,ㅚ,ㅐ 이나
# ㅐ로 종결하는 용언 개어(개/VV+어/EC), 내어 처럼 음성모음과 어울리므로 뺀다.
# 과(고/VV+아/EC), 봐(보/VV+아/EC) 같은 경우 활용된 형태는 과, 봐 이고 아/EC 와 어울리므로 추가하는게 맞다.
# ㅚ 의 경우 아직 알 수 없음
yang1 = jung in [u'ㅏ', u'ㅗ']
yang2 = jung in [u'ㅏ', u'ㅑ', u'ㅗ']
if phoneme == "NUL":
return True
elif jong == "" and "VO" in phoneme: # 모음제약
return True
elif jong == u"ㄹ" and "LQ" in phoneme: # 유음(ㄹ)제약
return True
elif jong not in ["", u"ㄹ"] and "FS" in phoneme:
return True
elif yang1 and "YANG1" in phoneme: # 양성모음1체크
return True
elif yang2 and "YANG2" in phoneme: # 양성모음2체크
return True
elif not yang1 and "EUM1" in phoneme: # 음성모음체크
return True
return False
def build_eumjeol(cho, jung, jong):
eumjeol = _HANGUL_CODE_START +\
_CHOSUNG_LIST.index(cho) * _JUNGSUNG * _JONGSUNG
eumjeol += _JUNGSUNG_LIST.index(jung) * _JONGSUNG
eumjeol += _JONGSUNG_LIST.index(jong)
return chr(eumjeol)
if __name__ == "__main__":
try:
jaso_list = parse_eumjeol(u"한")
print(jaso_list)
except Exception:
tb = traceback.format_exc()
print(tb)
|
#!/usr/bin/env python3
# coding=utf-8
"""
debounced buttons for PyBadge
HW: Adafruit PyBadge
"""
from adafruit_pybadger import pybadger
from adafruit_debouncer import Debouncer
##########################################
# main class
class PyBadgeButtons(object):
"""PyBadgeButtons - debounced."""
def __init__(self):
super(PyBadgeButtons, self).__init__()
# https://learn.adafruit.com/debouncer-library-python-circuitpython-buttons-sensors/advanced-debouncing
self.a = Debouncer(lambda: pybadger.button.a)
self.b = Debouncer(lambda: pybadger.button.b)
self.up = Debouncer(lambda: pybadger.button.up)
self.down = Debouncer(lambda: pybadger.button.down)
self.left = Debouncer(lambda: pybadger.button.left)
self.right = Debouncer(lambda: pybadger.button.right)
self.start = Debouncer(lambda: pybadger.button.start)
self.select = Debouncer(lambda: pybadger.button.select)
self.buttons = {}
self.buttons["a"] = self.a
self.buttons["b"] = self.b
self.buttons["up"] = self.up
self.buttons["down"] = self.down
self.buttons["left"] = self.left
self.buttons["right"] = self.right
self.buttons["start"] = self.start
self.buttons["select"] = self.select
def update(self):
"""update all debouncer objects."""
for button_name, button in self.buttons.items():
button.update()
|
# install_certifi.py
#
# sample script to install or update a set of default Root Certificates
# for the ssl module. Uses the certificates provided by the certifi package:
# https://pypi.python.org/pypi/certifi
import os
import os.path
import ssl
import stat
import subprocess
import sys
STAT_0o775 = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
| stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
| stat.S_IROTH | stat.S_IXOTH)
def main():
openssl_dir, openssl_cafile = os.path.split(
ssl.get_default_verify_paths().openssl_cafile)
print(" -- pip install --upgrade certifi")
subprocess.check_call([
sys.executable, "-E", "-s", "-m", "pip", "install", "--upgrade",
"certifi"
])
import certifi
# change working directory to the default SSL directory
os.chdir(openssl_dir)
relpath_to_certifi_cafile = os.path.relpath(certifi.where())
print(" -- removing any existing file or link")
try:
os.remove(openssl_cafile)
except FileNotFoundError:
pass
print(" -- creating symlink to certifi certificate bundle")
os.symlink(relpath_to_certifi_cafile, openssl_cafile)
print(" -- setting permissions")
os.chmod(openssl_cafile, STAT_0o775)
print(" -- update complete")
if __name__ == '__main__':
main()
|
from __future__ import print_function
import time
import argparse
import grpc
from jaeger_client import Config
from grpc_opentracing import open_tracing_client_interceptor
from grpc_opentracing.grpcext import intercept_channel
import command_line_pb2
def run():
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_payloads',
action='store_true',
help='log request/response objects to open-tracing spans')
args = parser.parse_args()
config = Config(
config={
'sampler': {
'type': 'const',
'param': 1,
},
'logging': True,
},
service_name='trivial-client')
tracer = config.initialize_tracer()
tracer_interceptor = open_tracing_client_interceptor(
tracer, log_payloads=args.log_payloads)
channel = grpc.insecure_channel('localhost:50051')
channel = intercept_channel(channel, tracer_interceptor)
stub = command_line_pb2.CommandLineStub(channel)
response = stub.Echo(command_line_pb2.CommandRequest(text='Hello, hello'))
print(response.text)
time.sleep(2)
tracer.close()
time.sleep(2)
if __name__ == '__main__':
run()
|
import os
import sys
from email.mime.text import MIMEText
import smtplib
import tempfile
email_content = tempfile.NamedTemporaryFile(mode='w+t')
smtp_server = "localhost"
print >>email_content, "This is my content"
print >>email_content, smtp_server
print >>email_content, "Thank you for reading"
email_content.seek(0)
email_body = ""
for line in email_content :
email_body = email_body + line
email_content.close()
msg = MIMEText(email_body)
msg['Subject'] = "This is my email message"
msg['From'] = "sender@example.com"
msg['To'] = "recipient1@example.com,recipient2@example.com"
s = smtplib.SMTP(smtp_server,25)
s.sendmail(msg['From'], msg['To'].split(","), msg.as_string())
s.quit()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import ast
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), 'rb') as readme_file:
readme = readme_file.read().decode('utf-8')
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('webanalyzer/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
with open(path.join(here, 'requirements.txt'), 'rb') as f:
all_reqs = f.read().decode('utf-8').split('\n')
install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs if x.startswith('git+')]
setup(
name='webanalyzer',
version=version,
description="",
long_description=readme,
long_description_content_type='text/markdown',
author="fate0",
author_email='fate0@fatezero.org',
url='https://github.com/webanalyzer/webanalyzer.py',
packages=find_packages(),
package_dir={},
entry_points={
'console_scripts': [
'webanalyzer=webanalyzer.cli:main'
]
},
include_package_data=True,
install_requires=install_requires,
dependency_links=dependency_links,
license="GPLv2",
zip_safe=False,
keywords='webanalyzer',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
|
"""
Administration for photos and galleries.
"""
from django.contrib import admin
from django.db.models import Count
from django.utils.translation import gettext_lazy as _
from .forms import PhotoForm
from .models import Gallery, Photo
class PhotoInline(admin.TabularInline):
"""
Administration for photos.
"""
model = Photo
form = PhotoForm
ordering = ["created"]
def get_extra(self, request, obj=None, **kwargs):
return 0 if obj else 3
class GalleryAdmin(admin.ModelAdmin):
"""
Administration for galleries.
"""
list_display = (
"author",
"title",
"status",
# Having "description" here raises SystemCheckError (admin.E108).
# We need to remove description from list_display for Django 2.1-2.2
# See https://code.djangoproject.com/ticket/30543
# "description",
"shot_date",
"modified",
"photo_count",
)
list_display_links = ("title",)
list_editable = ("status",)
list_filter = ("status",)
date_hierarchy = "shot_date"
prepopulated_fields = {"slug": ("title",)}
inlines = [PhotoInline]
def photo_count(self, obj):
return obj.photo_count
photo_count.short_description = _("Photo count")
def get_queryset(self, request):
"""
Add number of photos to each gallery.
"""
qs = super(GalleryAdmin, self).get_queryset(request)
return qs.annotate(photo_count=Count("photos"))
def save_model(self, request, obj, form, change):
"""
Set currently authenticated user as the author of the gallery.
"""
obj.author = request.user
obj.save()
def save_formset(self, request, form, formset, change):
"""
For each photo set it's author to currently authenticated user.
"""
instances = formset.save(commit=False)
for instance in instances:
if isinstance(instance, Photo):
instance.author = request.user
instance.save()
admin.site.register(Gallery, GalleryAdmin)
|
class Caesar:
"""
Summary:
This is a class for text encryption using Caesar cipher algorithm.
Attributes:
crypto()
"""
def __init__(self, msg: str, key: int, mode: bool):
"""
The constructor for Caesar class.
:param msg: The message to be encrypted.
:param key: Encryption key (must be same for encrypting and decrypting).
:param mode: If True was used while encrypting then False must be passed while decrypting.
"""
self.msg = msg
self.key = key
self.mode = mode
def crypto(self):
"""
The function for encrypting and decrypting the message.
:return: Encrypted or Decrypted message (based on mode).
"""
if type(self.mode) is not bool:
raise ValueError(f"'mode' must be a 'boolean' value (True of False) and not {str(type(self.mode))[1:-1]}")
if type(self.msg) is not str:
raise ValueError(f"'msg' must be a 'string' value and not {str(type(self.msg))[1:-1]}")
if type(self.key) is not int:
raise ValueError(f"'key' must be an 'integer' value and not {str(type(self.key))[1:-1]}")
en_msg = ""
base = "V[3@wof9<A>z?1JH)aXqnm=6G*NBp]{#xR!; |gTy.E$DC}5j4&\dIsWF^ie/u7~lvZh_PS82kYcKULr%(0:MtOb-+Q"
if self.mode is True:
for i in (x for x in self.msg if x in base):
en_msg += base[(base.find(i) + self.key) % len(base)]
else:
for i in (x for x in self.msg if x in base):
en_msg += base[(base.find(i) - self.key) % len(base)]
return en_msg
|
from django.urls import path
from onlineLibrary.books.views import home, add_book, book_details, edit_book, delete_book
urlpatterns = [
path('', home, name='home'),
path('add', add_book, name='add book'),
path('details/<int:pk>', book_details, name='details'),
path('edit/<int:pk>', edit_book, name='edit book'),
path('delete/<int:pk>', delete_book, name='delete book'),
]
|
import cv2
# Lectura
image = cv2.imread("../images/logo_platzi.png")
# Escritura
cv2.imwrite("logo_platzi2.png", image)
# Visualización
cv2.imshow("Logo de la razon de media vida nuestra", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import pandas as pd
import numpy as np
import sys,os
import time
import biosppy
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy
from sliding.ecg_slider import ECGSlider
from sliding.slider import Slider
from statistic.wasserstein_distance import WassersteinDistance, WassersteinDistanceDeviation
from transform.indexed_transform import IndexedTransformer
from transform.interpolate import SplineInterpolate
from transform.pca import PCATransformer
from transform.scale import ScaleTransform
from transform.series_to_curve import CurveProjection, IndicesWindow
from transform.transformer import SequentialTransformer
from transform.triangle_pattern import TrianglePattern
from func.Functions import std_mean_change
from tqdm import tqdm
#here additional functions located
dist = WassersteinDistance()
def wasserstein_computation(smooth_data,size,p1,p2,n_repeats,periods=10,padding=10):
slider = ECGSlider(smooth_data, periods,padding).iterator()
dist = WassersteinDistance(p1)
dist_dev = WassersteinDistanceDeviation(p2)
projection_step=1
curve_transform = SequentialTransformer(
CurveProjection(
window=IndicesWindow.range(size=size, step=2),
step=projection_step
),
PCATransformer(10)
)
was=[]
was_deviation_median=[]
was_index=[]
curves=[]
for index, window_data in tqdm(slider):
was_deviation_wind = []
window_curve = curve_transform(window_data)
curves.append(window_curve)
h = len(window_curve) // 2
h2 = len(window_curve)
was_i = dist(window_curve[0:h], window_curve[h:h2])
was_index_i = (index[0] + index[-1]) // 2
for i in range(n_repeats):
rand = np.random.normal(1.0, 1.0, len(smooth_data) // 20)
weights = np.asarray([rand[i//20] for i in range(len(smooth_data))])
w_i = weights[index]
was_dev_i = dist_dev(window_curve[0:h], window_curve[h:h2], w_i[0:h], w_i[h:h2])
was_deviation_wind.append(max(0, was_dev_i))
was.append(max(0,was_i))
was_deviation_median.append(np.median(was_deviation_wind))
was_index.append(np.max(was_index_i))
return was,was_deviation_median,was_index,curves
def statistic(was_index,was,was_deviation_median,sig_series):
new_data=std_mean_change(was_deviation_median,was)
line=np.quantile(new_data,0.95)
sep_line1=[line]*len(sig_series)
triangle = IndexedTransformer(TrianglePattern(7), padding=1, step=1)
tr_indices, tr_was = triangle(np.asarray(was))
tr_indices_dev, tr_was_dev = triangle(np.asarray(new_data))
final_indices = np.asarray(was_index)[tr_indices]
f_i_d=np.array([])
ind=np.array([])
for i,j in enumerate(tr_was):
#if max(tr_was)>=min(tr_was):
if j>line:
f_i_d=np.append(j,f_i_d)
ind=np.append(i,ind)
fin_ind=np.array([])
for i in ind:
fin_ind=np.append(final_indices[int(i)],fin_ind)
f_i_d =f_i_d[::-1]
return sep_line1,f_i_d,final_indices,tr_was,fin_ind |
import numpy as np
from typing import Tuple
from .constants import (
DEFAULT_MAX_ITER,
DEFAULT_CONC_PARAM,
DEFAULT_SPLITSIZE,
DEFAULT_SAMPLE_SIZE,
DEFAULT_START_FRAME,
)
class StateArrayParameters:
""" Struct encapsulating settings for a complete state array analysis.
init
----
pixel_size_um : size of camera pixels in microns
frame_interval : time between frames in seconds
focal_depth : focal depth in microns
splitsize : maximum number of jumps to tolerate per trajectory
before splitting into smaller trajectories
sample_size : maximum number of trajectories to consider when
running state arrays. If exceeded, we subsample.
start_frame : disregard all jumps observed before this frame
max_iter : maximum number of iterations of state array inference
to run
conc_param : concentration parameter for prior distribution over
state occupations; number of pseudocounts per element
in the state array
progress_bar : show a progress bar, where relevant
num_workers : number of parallel processes to use
"""
def __init__(self, pixel_size_um: float, frame_interval: float,
focal_depth: float=np.inf, splitsize: int=DEFAULT_SPLITSIZE,
sample_size: int=DEFAULT_SAMPLE_SIZE, start_frame: int=DEFAULT_START_FRAME,
max_iter: int=DEFAULT_MAX_ITER, conc_param: float=DEFAULT_CONC_PARAM,
progress_bar: bool=False, num_workers: int=1, **kwargs):
self.pixel_size_um = pixel_size_um
self.frame_interval = frame_interval
self.focal_depth = focal_depth
self.splitsize = splitsize
self.sample_size = sample_size
self.start_frame = start_frame
self.max_iter = max_iter
self.conc_param = conc_param
self.progress_bar = progress_bar
self.num_workers = num_workers
@property
def parameters(self) -> Tuple[str]:
return ("pixel_size_um", "frame_interval", "focal_depth", "splitsize",
"sample_size", "start_frame", "max_iter", "conc_param")
@property
def units(self) -> dict:
return dict(pixel_size_um="µm", frame_interval="sec", focal_depth="µm",
splitsize="jumps", sample_size="tracks", start_frame="frames",
max_iter="iterations", conc_param="pseudocounts per state")
def __eq__(self, other) -> bool:
""" Test for equality of two StateArrayParameters objects """
return all(map(lambda a: getattr(self, a) == getattr(other, a), self.parameters))
def __repr__(self) -> str:
""" String representation of this StateArrayParameters object """
return "StateArrayParameters:\n {}".format("\n ".join([
f"{a}:\t{getattr(self, a)}" for a in self.parameters
]))
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def forth_index(request):
# 实例一,显示一个基本的字符串在网页上
# string = u'我交朋友不在乎你们有没有钱,反正都没我有钱,大家好,我是王思聪了解一下'
# return render(request,'app_forth/forth_home.html',{'string':string})
# 实例二,讲解了基本的for 循环 和 List内容的显示
# tutorialList = [u'HTML是么',u"CSS再说一次", u"jQuery给我", u"Python试试", "Django看"]
# return render(request,'app_forth/forth_home.html',{'tutorialList':tutorialList})
# 实例三,显示字典中内容:
# info_dict = {'site':u'自学开始了','content':u'这是我懂啦攻击力科技考虑到撒娇管理会计'}
# return render(request,'app_forth/forth_home.html',{'info_dict':info_dict})
# 实例四,在模板进行条件判断和for 循环的详细操作:
# list = map(str,range(100))
# return render(request,'app_forth/forth_home.html',{'list':list})
# 实例六,模板中的逻辑操作
# var = 93
# num = 13
# return render(request,'app_forth/forth_home.html',{'var':var,'num':num})
# 实例七,模板中获取当前网址,当前用户等
tt = 'jjj'
return render(request,'app_forth/forth_home.html',{'ttt':tt})
# 实例五,模板上得到视图对应的网址
def add(request,a,b):
c = int(a) + int(b)
return HttpResponse(str(c))
|
def trailingZero(n):
count = 0
i = 5
while (n/i >= 1):
count += n/i
i *= 5
return int(count)
if __name__ == "__main__":
print(trailingZero(12))
|
import socket
import selectors
selector = selectors.DefaultSelector()
def server():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('localhost', 5000))
server_socket.listen()
selector.register(server_socket, selectors.EVENT_READ, accept_connection)
def accept_connection(serv_socket):
client_socket, address = serv_socket.accept()
print('Connection from', address)
selector.register(client_socket, selectors.EVENT_READ, send_message)
def send_message(client_socket):
request = client_socket.recv(4096)
if request:
response = 'hello world\n'.encode()
client_socket.send(response)
else:
selector.unregister(client_socket)
client_socket.close()
def event_loop():
while True:
events = selector.select() # (key, events)
for key, _ in events:
callback = key.data
callback(key.fileobj)
if __name__ == '__main__':
server()
event_loop()
|
# Copyright 2019 SUSE Linux GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""List access rules config."""
from keystone.common import cache
from keystone.common import driver_hints
from keystone.common import manager
from keystone.common import provider_api
import keystone.conf
CONF = keystone.conf.CONF
MEMOIZE = cache.get_memoization_decorator(group='access_rules_config')
PROVIDERS = provider_api.ProviderAPIs
class Manager(manager.Manager):
driver_namespace = 'keystone.access_rules_config'
_provides_api = 'access_rules_config_api'
def __init__(self):
super(Manager, self).__init__(CONF.access_rules_config.driver)
def list_access_rules_config(self, service=None):
"""List access rules config.
:param str service: filter by service type
:returns: a list of configured access rules. Access rules are
permission objects composing of a service, a URL path, and an
HTTP method.
"""
if CONF.access_rules_config.permissive:
hints = driver_hints.Hints()
if service:
hints.add_filter('service', service)
rules = {}
services = PROVIDERS.catalog_api.list_services(hints=hints)
if service:
services = [svc for svc in services if svc['type'] == service]
for svc in services:
rules[svc['type']] = []
for method in ['HEAD', 'GET', 'POST', 'PUT', 'PATCH', 'DELETE']:
rules[svc['type']].append({
"path": "**",
"method": method
})
return rules
return self.driver.list_access_rules_config(service)
@MEMOIZE
def check_access_rule(self, service, request_path, request_method):
"""Check access rule.
:param str service: service type of rule to check
:param str request_path: endpoint path to check
:param str request_method: API HTTP method to check
:returns: boolean indicating whether the rule matches one of the
configured access rules
"""
if CONF.access_rules_config.permissive:
return True
return self.driver.check_access_rule(service, request_path,
request_method)
|
"""Implementation details to interact with the serialization means."""
import asyncio
import datetime
import json
import math
import dolon.db_conn as db_conn
import dolon.exceptions as exceptions
import dolon.impl.constants as constants
# Aliases.
DbConnection = db_conn.DbConnection
_PREFETCH_SIZE = 100
async def process_message(db, payload):
"""Processes a tracing message storing it to the db.
:param db: The database object to use.
:param dict payload: A dict representing the message to store.
Can be either a tracing run creation in the form of:
msg = {
"msg_type": "create_trace_run",
"app_name": app_name,
"uuid": identifier,
"column_names": ["v1", 'v2']
}
or for the insertion of a tracing row:
msg = {
"msg_type": "row",
"uuid": identifier,
"row_data": [12.2, 123.1]
}
raises: InvalidMessage
"""
if not isinstance(payload, dict):
assert isinstance(payload, str)
msg = json.loads(payload)
else:
msg = payload
# logging.info(str(payload)) # Will cause missed messages.
msg_type = msg.get('msg_type')
try:
if msg_type == "create_trace_run":
identifier = msg.get('uuid')
app_name = msg.get('app_name')
column_names = msg.get('column_names')
if not all([identifier, app_name, column_names]):
raise exceptions.InvalidMessage(
f"Message not supported: {str(payload)}"
)
column_names = list(column_names)
await _create_tracer(db, identifier, app_name, *column_names)
elif msg_type == 'row':
identifier = msg.get('uuid')
row_data = msg.get('row_data')
await _insert_row(db, identifier, *row_data)
else:
raise exceptions.InvalidMessage(
f"Message not supported: {str(payload)}"
)
except Exception as ex:
raise exceptions.InvalidMessage(
f"Message not supported: {str(payload)}"
) from ex
async def get_trace_as_json(uuid):
"""Returns all the tracing rows for the passed in uuid as json.
:param str uuid: The identifier for the trace run.
:returns: All the tracing rows for the passed in uuid as json.
:rtype: list[dict]
"""
data = await get_trace(uuid)
lines = data.split('\n')
field_names = lines[0].split(',')
columns = [list() for _ in range(len(field_names))]
for line in lines[1:]:
fields = line.split(',')
for index, value in enumerate(fields):
try:
columns[index].append(float(value))
except ValueError:
columns[index].append(value)
trace_as_json = {}
for field_name, values in zip(field_names, columns):
if field_name != 'time':
value_pairs = [["Time", field_name]] + \
[[i, v] for i, v in enumerate(values)]
trace_as_json[field_name] = value_pairs
return trace_as_json
async def get_trace_run_info(uuid):
"""Returns descriptive info for the passed in uuid.
:param str uuid: The identifier for the trace run.
:returns: Descriptive info for the passed in uuid.
:rtype: dict
"""
app_name = None
counter = None
from_time = None
to_time = None
async with DbConnection() as db:
conn_pool = db.get_conn_pool()
async with conn_pool.acquire() as conn:
stmt = await conn.prepare(constants.SQL_SELECT_APP_NAME)
async with conn.transaction():
async for record in stmt.cursor(uuid,
prefetch=_PREFETCH_SIZE):
app_name = record['app_name']
creation_time = _format_datetime(record['creation_time'])
stmt = await conn.prepare(constants.SQL_SELECT_RUN_INFO)
async with conn.transaction():
async for record in stmt.cursor(uuid,
prefetch=_PREFETCH_SIZE):
counter = record['counter']
from_time = record['from_time']
to_time = record['to_time']
if to_time is None or from_time is None:
# There are no rows for this run
counter = 0
started = creation_time
duration = 'n/a'
else:
started = creation_time
duration = _get_duration(to_time, from_time)
return {
'app_name': app_name,
'counter': f'{counter:,}',
'started': started,
'duration': duration
}
async def get_latest_trace(app_name):
"""Returns the latest trace for the passed in app_name.
:return: A list of objects.
"""
async with DbConnection() as db:
conn_pool = db.get_conn_pool()
async with conn_pool.acquire() as conn:
stmt = await conn.prepare(constants.SQL_SELECT_LATEST_RUN)
async with conn.transaction():
async for record in stmt.cursor(app_name,
prefetch=_PREFETCH_SIZE):
uuid = record['uuid']
return await _get_trace(uuid, db)
async def get_trace(uuid):
"""Returns all the tracing rows for the passed in uuid.
:param str uuid: The identifier for the trace run.
:returns: A list of strings representing a csv view of the tracing run.
:rtype: list[str]
"""
async with DbConnection() as db:
return await _get_trace(uuid, db)
async def _create_tracer(db, identifier, app_name, *column_names):
"""Creates a new run.
:param str uuid: The uuid of the run expressed as string.
:param str app_name: The application that is been traced.
"""
conn_pool = db.get_conn_pool()
async with conn_pool.acquire() as conn:
await conn.execute(
constants.SQL_INSERT_RUN,
identifier,
app_name,
list(column_names))
async def _insert_row(db, uuid, *row_data):
"""Inserts a tracing row to the database.
:param db: The database object to use.
:param str uuid: The identifier for the trace run.
:param row_data: Represent the values for each data point.
"""
conn_pool = db.get_conn_pool()
async with conn_pool.acquire() as conn:
await conn.execute(constants.SQL_INSERT_ROW, uuid, list(row_data))
async def get_trace_run_name(uuid):
"""Returns the name of the trace run for the passed in uuid.
:param str uuid: The identifier for the trace run.
:returns: The name of the trace run.
:rtype: str.
"""
sql = f" select app_name, creation_time from " \
f"tracing_run where uuid='{uuid}';"
async with DbConnection() as db:
async for value in db.execute_query(sql):
app_name = str(value["app_name"])
creation_time = value["creation_time"]
app_name = app_name.replace(" ", "")
t = creation_time.strftime('%Y-%m-%d-%H:%M:%S.%f')[:-7]
csv_filename = f"{app_name}-{t}.csv"
return csv_filename
async def _get_trace(uuid, db):
"""Returns all the tracing rows for the passed in uuid.
:param str uuid: The identifier for the trace run.
:returns: A list of strings representing a csv view of the tracing run.
:rtype: list[str]
"""
conn_pool = db.get_conn_pool()
assert uuid
assert conn_pool
col_names = []
async with conn_pool.acquire() as conn:
stmt = await conn.prepare(constants.SQL_SELECT_COL_NAMES)
async with conn.transaction():
async for record in stmt.cursor(uuid, prefetch=_PREFETCH_SIZE):
col_names.append(record['col_name'])
clauses = [
f"row_data[{index + 1}] as {col_name}"
for index, col_name in enumerate(col_names)
]
sql = "select to_char(date_time, 'YYYY-MM-DD HH24:MI:SS') as timestamp , " + \
','.join(clauses) + \
" from tracing_row where uuid=$1 order by 1"
stmt = await conn.prepare(sql)
lines = ['time,' + ','.join(col_names)]
async with conn.transaction():
async for record in stmt.cursor(uuid, prefetch=_PREFETCH_SIZE):
values = [str(v) for v in list(dict(record).values())]
lines.append(','.join(values))
return '\n'.join(lines)
async def get_all_tracers():
tracers = []
async with DbConnection() as db:
tracer_names = await _get_tracer_names(db)
for tracer_name in tracer_names:
tracers.append(
{
"tracer_name": tracer_name,
'runs': await _get_tracer_runs(db, tracer_name)
}
)
return tracers
async def _get_tracer_names(db):
tracer_names = []
conn_pool = db.get_conn_pool()
async with conn_pool.acquire() as conn:
stmt = await conn.prepare(constants.SQL_SELECT_ALL_TRACER_RUNS)
async with conn.transaction():
async for record in stmt.cursor(prefetch=_PREFETCH_SIZE):
tracer_names.append(record['app_name'])
return tracer_names
async def _get_tracer_runs(db, app_name):
tracer_runs = []
conn_pool = db.get_conn_pool()
async with conn_pool.acquire() as conn:
stmt = await conn.prepare(constants.SQL_SELECT_RUNS)
async with conn.transaction():
async for record in stmt.cursor(app_name, prefetch=_PREFETCH_SIZE):
creation_date = record['creation_time']
tracer_runs.append(
{
'creation_time': _format_datetime(creation_date),
'uuid': record['uuid'],
}
)
return tracer_runs
def _get_duration(start_time, end_time):
"""Returns the duration from t1 to t2.
:param datetime.datetime start_time: Start time.
:param datetime.datetime end_time: End time.
:return: The duration as a string.
:rtype: str
"""
diff = int(math.fabs(int((start_time - end_time).total_seconds())))
days = diff // constants.DAY_IN_SECONDS
diff -= days * constants.DAY_IN_SECONDS
hours = diff // constants.HOUR_IN_SECONDS
diff -= hours * constants.HOUR_IN_SECONDS
minutes = diff // constants.MINUTE_IN_SECONDS
diff -= minutes * constants.MINUTE_IN_SECONDS
seconds = diff
tokens = []
if days:
tokens.append(f"{days} days")
if hours:
tokens.append(f"{hours} hours")
if minutes:
tokens.append(f"{minutes} min")
if seconds:
tokens.append(f"{seconds} secs")
return ', '.join(tokens)
def _format_datetime(date_to_format):
"""Formats the passed in date time.
:param: datetime.datetime date_to_format: The date to format.
:return: The formatted date.
:rtype: str
"""
current_year = datetime.datetime.now().year
if date_to_format.year == current_year:
return date_to_format.strftime("%a, %b %d, %H:%M")
else:
return date_to_format.strftime("%a, %b %d %Y, %H:%M")
if __name__ == '__main__':
import os
_CONN_STR = f'postgresql://postgres:postgres123@localhost:5432/mnemic'
os.environ["POSTGRES_CONN_STR"] = _CONN_STR
loop = asyncio.get_event_loop()
loop.run_until_complete(get_trace_run_name('1e7c356f-16b3-485c-a4f0-06872919285e'))
|
with open("day08.in") as f:
INSTRUCTIONS = [line.strip() for line in f.readlines()]
def execute(instrs):
pc = 0
acc = 0
visited = set()
while pc not in visited and pc < len(instrs):
visited.add(pc)
op, arg = instrs[pc].split()
if op == "nop":
pc += 1
elif op == "acc":
acc += int(arg)
pc += 1
elif op == "jmp":
pc += int(arg)
if pc in visited:
return (acc, False)
elif pc == len(instrs):
return (acc, True)
else:
return None
print(execute(INSTRUCTIONS))
for i in range(len(INSTRUCTIONS)):
instr = INSTRUCTIONS[i]
op, arg = instr.split()
if op == "nop":
INSTRUCTIONS[i] = "jmp " + arg
elif op == "jmp":
INSTRUCTIONS[i] = "nop " + arg
else:
continue
acc, completed = execute(INSTRUCTIONS)
INSTRUCTIONS[i] = instr
if completed:
print(acc, completed)
break
|
import os, sys, torch
import os.path as osp
import numpy as np
import torchvision.datasets as dset
import torchvision.transforms as transforms
from copy import deepcopy
from PIL import Image
import random
from config_utils import load_config
import pdb
import torch, copy, random
import torch.utils.data as data
Dataset2Class = {'cifar10' : 10,
'cifar100': 100,
'imagenet-1k-s':1000,
'imagenet-1k' : 1000}
class CUTOUT(object):
def __init__(self, length):
self.length = length
def __repr__(self):
return ('{name}(length={length})'.format(name=self.__class__.__name__, **self.__dict__))
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
imagenet_pca = {
'eigval': np.asarray([0.2175, 0.0188, 0.0045]),
'eigvec': np.asarray([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
])
}
class Lighting(object):
def __init__(self, alphastd,
eigval=imagenet_pca['eigval'],
eigvec=imagenet_pca['eigvec']):
self.alphastd = alphastd
assert eigval.shape == (3,)
assert eigvec.shape == (3, 3)
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if self.alphastd == 0.:
return img
rnd = np.random.randn(3) * self.alphastd
rnd = rnd.astype('float32')
v = rnd
old_dtype = np.asarray(img).dtype
v = v * self.eigval
v = v.reshape((3, 1))
inc = np.dot(self.eigvec, v).reshape((3,))
img = np.add(img, inc)
if old_dtype == np.uint8:
img = np.clip(img, 0, 255)
img = Image.fromarray(img.astype(old_dtype), 'RGB')
return img
def __repr__(self):
return self.__class__.__name__ + '()'
class SearchDataset(data.Dataset):
def __init__(self, name, data, train_split, valid_split, check=True):
self.datasetname = name
if isinstance(data, (list, tuple)): # new type of SearchDataset
assert len(data) == 2, 'invalid length: {:}'.format( len(data) )
self.train_data = data[0]
self.valid_data = data[1]
self.train_split = train_split.copy()
self.valid_split = valid_split.copy()
self.mode_str = 'V2' # new mode
else:
self.mode_str = 'V1' # old mode
self.data = data
self.train_split = train_split.copy()
self.valid_split = valid_split.copy()
if check:
intersection = set(train_split).intersection(set(valid_split))
assert len(intersection) == 0, 'the splitted train and validation sets should have no intersection'
self.length = len(self.train_split)
def __repr__(self):
return ('{name}(name={datasetname}, train={tr_L}, valid={val_L}, version={ver})'.format(name=self.__class__.__name__, datasetname=self.datasetname, tr_L=len(self.train_split), val_L=len(self.valid_split), ver=self.mode_str))
def __len__(self):
return self.length
def __getitem__(self, index):
assert index >= 0 and index < self.length, 'invalid index = {:}'.format(index)
train_index = self.train_split[index]
valid_index = random.choice( self.valid_split )
if self.mode_str == 'V1':
train_image, train_label = self.data[train_index]
valid_image, valid_label = self.data[valid_index]
elif self.mode_str == 'V2':
train_image, train_label = self.train_data[train_index]
valid_image, valid_label = self.valid_data[valid_index]
else: raise ValueError('invalid mode : {:}'.format(self.mode_str))
return train_image, train_label, valid_image, valid_label
def get_datasets(name, root, cutout):
if name == 'cifar10':
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
elif name == 'cifar100':
mean = [x / 255 for x in [129.3, 124.1, 112.4]]
std = [x / 255 for x in [68.2, 65.4, 70.4]]
elif name.startswith('imagenet-1k'):
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
elif name.startswith('ImageNet16'):
mean = [x / 255 for x in [122.68, 116.66, 104.01]]
std = [x / 255 for x in [63.22, 61.26 , 65.09]]
else:
raise TypeError("Unknow dataset : {:}".format(name))
# Data Argumentation
if name == 'cifar10' or name == 'cifar100':#,contrast=0.05, saturation=0.05 #transforms.ColorJitter(brightness=0.06),\
lists = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4),\
transforms.ToTensor(), transforms.Normalize(mean, std)]
if cutout > 0 : lists += [CUTOUT(cutout)]
train_transform = transforms.Compose(lists)
test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
xshape = (1, 3, 32, 32)
elif name.startswith('ImageNet16'):
lists = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(16, padding=2), transforms.ToTensor(), transforms.Normalize(mean, std)]
if cutout > 0 : lists += [CUTOUT(cutout)]
train_transform = transforms.Compose(lists)
test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
xshape = (1, 3, 16, 16)
elif name == 'tiered':
lists = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(80, padding=4), transforms.ToTensor(), transforms.Normalize(mean, std)]
if cutout > 0 : lists += [CUTOUT(cutout)]
train_transform = transforms.Compose(lists)
test_transform = transforms.Compose([transforms.CenterCrop(80), transforms.ToTensor(), transforms.Normalize(mean, std)])
xshape = (1, 3, 32, 32)
elif name.startswith('imagenet-1k'):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if name == 'imagenet-1k':
xlists = [transforms.RandomResizedCrop(224)]
xlists.append(
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2))
xlists.append( Lighting(0.1))
elif name == 'imagenet-1k-s':
xlists = [transforms.RandomResizedCrop(224, scale=(0.2, 1.0))]
else: raise ValueError('invalid name : {:}'.format(name))
xlists.append( transforms.RandomHorizontalFlip(p=0.5) )
xlists.append( transforms.ToTensor() )
xlists.append( normalize )
train_transform = transforms.Compose(xlists)
test_transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])
xshape = (1, 3, 224, 224)
else:
raise TypeError("Unknow dataset : {:}".format(name))
if name == 'cifar10':
train_data = dset.CIFAR10 (root, train=True , transform=train_transform, download=True)
test_data = dset.CIFAR10 (root, train=False, transform=test_transform , download=True)
assert len(train_data) == 50000 and len(test_data) == 10000
elif name == 'cifar100':
train_data = dset.CIFAR100(root, train=True , transform=train_transform, download=True)
test_data = dset.CIFAR100(root, train=False, transform=test_transform , download=True)
assert len(train_data) == 50000 and len(test_data) == 10000
elif name.startswith('imagenet-1k'):
train_data = dset.ImageFolder(osp.join(root, 'train'), train_transform)
test_data = dset.ImageFolder(osp.join(root, 'val'), test_transform)
assert len(train_data) == 1281167 and len(test_data) == 50000, 'invalid number of images : {:} & {:} vs {:} & {:}'.format(len(train_data), len(test_data), 1281167, 50000)
else: raise TypeError("Unknow dataset : {:}".format(name))
class_num = Dataset2Class[name]
return train_data, test_data, xshape, class_num
def get_nas_search_loaders(train_data, valid_data, dataset, config_root, batch_size, workers):
if isinstance(batch_size, (list,tuple)):
batch, test_batch = batch_size
else:
batch, test_batch = batch_size, batch_size
if dataset == 'cifar10':
cifar_split = load_config('{:}/cifar-split.txt'.format(config_root), None, None)
train_split, valid_split = cifar_split.train, cifar_split.valid # search over the proposed training and validation set
xvalid_data = deepcopy(train_data)
if hasattr(xvalid_data, 'transforms'): # to avoid a print issue
xvalid_data.transforms = valid_data.transform
xvalid_data.transform = deepcopy( valid_data.transform )
search_data = SearchDataset(dataset, train_data, train_split, valid_split)
# data loader
search_loader = torch.utils.data.DataLoader(search_data, batch_size=batch, shuffle=True , num_workers=workers, pin_memory=True)
train_loader = torch.utils.data.DataLoader(train_data , batch_size=batch, sampler=torch.utils.data.sampler.SubsetRandomSampler(train_split), num_workers=workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(xvalid_data, batch_size=test_batch, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split), num_workers=workers, pin_memory=True)
elif dataset == 'cifar100':
cifar100_test_split = load_config('{:}/cifar100-test-split.txt'.format(config_root), None, None)
search_train_data = train_data
search_valid_data = deepcopy(valid_data) ; search_valid_data.transform = train_data.transform
search_data = SearchDataset(dataset, [search_train_data,search_valid_data], list(range(len(search_train_data))), cifar100_test_split.xvalid)
search_loader = torch.utils.data.DataLoader(search_data, batch_size=batch, shuffle=True , num_workers=workers, pin_memory=True)
train_loader = torch.utils.data.DataLoader(train_data , batch_size=batch, shuffle=True , num_workers=workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(valid_data , batch_size=test_batch, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar100_test_split.xvalid), num_workers=workers, pin_memory=True)
elif dataset == 'ImageNet16-120':
imagenet_test_split = load_config('{:}/imagenet-16-120-test-split.txt'.format(config_root), None, None)
search_train_data = train_data
search_valid_data = deepcopy(valid_data) ; search_valid_data.transform = train_data.transform
search_data = SearchDataset(dataset, [search_train_data,search_valid_data], list(range(len(search_train_data))), imagenet_test_split.xvalid)
search_loader = torch.utils.data.DataLoader(search_data, batch_size=batch, shuffle=True , num_workers=workers, pin_memory=True)
train_loader = torch.utils.data.DataLoader(train_data , batch_size=batch, shuffle=True , num_workers=workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(valid_data , batch_size=test_batch, sampler=torch.utils.data.sampler.SubsetRandomSampler(imagenet_test_split.xvalid), num_workers=workers, pin_memory=True)
else:
raise ValueError('invalid dataset : {:}'.format(dataset))
return search_loader, train_loader, valid_loader
|
# Generated by Django 3.1.7 on 2021-04-04 11:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('study', '0010_auto_20210404_1402'),
]
operations = [
migrations.CreateModel(
name='DerslerModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('baslik', models.CharField(max_length=20)),
('resim', models.ImageField(upload_to='ders_resimleri')),
('icerik', models.TextField(max_length=150)),
('ücret', models.CharField(max_length=20)),
('tür', models.CharField(max_length=30)),
('kısa_özet1', models.CharField(max_length=50)),
('kısa_özet2', models.CharField(max_length=50)),
('kısa_özet3', models.CharField(max_length=50)),
('kısa_özet4', models.CharField(max_length=50)),
('kısa_özet5', models.CharField(max_length=50)),
('kısa_özet1_tanıtım', models.TextField()),
('kısa_özet1_tanıtım_baslik', models.TextField()),
('kısa_özet2_tanıtım', models.TextField()),
('kısa_özet2_tanıtım_baslik', models.TextField()),
('kısa_özet3_tanıtım', models.TextField()),
('kısa_özet3_tanıtım_baslik', models.TextField()),
('kısa_özet4_tanıtım', models.TextField()),
('kısa_özet4_tanıtım_baslik', models.TextField()),
('kısa_özet5_tanıtım', models.TextField()),
('kısa_özet5_tanıtım_baslik', models.TextField()),
],
options={
'verbose_name': 'Dersler',
'verbose_name_plural': 'Ders-Ekle',
'db_table': 'DerslerTablo',
},
),
]
|
# -*- coding: utf-8 -*-
"""The Symantec AV log file event formatter."""
from __future__ import unicode_literals
from plaso.formatters import interface
from plaso.formatters import manager
from plaso.lib import errors
class SymantecAVFormatter(interface.ConditionalEventFormatter):
"""Formatter for a Symantec AV log file event."""
DATA_TYPE = 'av:symantec:scanlog'
EVENT_NAMES = {
'1': 'GL_EVENT_IS_ALERT',
'2': 'GL_EVENT_SCAN_STOP',
'3': 'GL_EVENT_SCAN_START',
'4': 'GL_EVENT_PATTERN_UPDATE',
'5': 'GL_EVENT_INFECTION',
'6': 'GL_EVENT_FILE_NOT_OPEN',
'7': 'GL_EVENT_LOAD_PATTERN',
'8': 'GL_STD_MESSAGE_INFO',
'9': 'GL_STD_MESSAGE_ERROR',
'10': 'GL_EVENT_CHECKSUM',
'11': 'GL_EVENT_TRAP',
'12': 'GL_EVENT_CONFIG_CHANGE',
'13': 'GL_EVENT_SHUTDOWN',
'14': 'GL_EVENT_STARTUP',
'16': 'GL_EVENT_PATTERN_DOWNLOAD',
'17': 'GL_EVENT_TOO_MANY_VIRUSES',
'18': 'GL_EVENT_FWD_TO_QSERVER',
'19': 'GL_EVENT_SCANDLVR',
'20': 'GL_EVENT_BACKUP',
'21': 'GL_EVENT_SCAN_ABORT',
'22': 'GL_EVENT_RTS_LOAD_ERROR',
'23': 'GL_EVENT_RTS_LOAD',
'24': 'GL_EVENT_RTS_UNLOAD',
'25': 'GL_EVENT_REMOVE_CLIENT',
'26': 'GL_EVENT_SCAN_DELAYED',
'27': 'GL_EVENT_SCAN_RESTART',
'28': 'GL_EVENT_ADD_SAVROAMCLIENT_TOSERVER',
'29': 'GL_EVENT_REMOVE_SAVROAMCLIENT_FROMSERVER',
'30': 'GL_EVENT_LICENSE_WARNING',
'31': 'GL_EVENT_LICENSE_ERROR',
'32': 'GL_EVENT_LICENSE_GRACE',
'33': 'GL_EVENT_UNAUTHORIZED_COMM',
'34': 'GL_EVENT_LOG_FWD_THRD_ERR',
'35': 'GL_EVENT_LICENSE_INSTALLED',
'36': 'GL_EVENT_LICENSE_ALLOCATED',
'37': 'GL_EVENT_LICENSE_OK',
'38': 'GL_EVENT_LICENSE_DEALLOCATED',
'39': 'GL_EVENT_BAD_DEFS_ROLLBACK',
'40': 'GL_EVENT_BAD_DEFS_UNPROTECTED',
'41': 'GL_EVENT_SAV_PROVIDER_PARSING_ERROR',
'42': 'GL_EVENT_RTS_ERROR',
'43': 'GL_EVENT_COMPLIANCE_FAIL',
'44': 'GL_EVENT_COMPLIANCE_SUCCESS',
'45': 'GL_EVENT_SECURITY_SYMPROTECT_POLICYVIOLATION',
'46': 'GL_EVENT_ANOMALY_START',
'47': 'GL_EVENT_DETECTION_ACTION_TAKEN',
'48': 'GL_EVENT_REMEDIATION_ACTION_PENDING',
'49': 'GL_EVENT_REMEDIATION_ACTION_FAILED',
'50': 'GL_EVENT_REMEDIATION_ACTION_SUCCESSFUL',
'51': 'GL_EVENT_ANOMALY_FINISH',
'52': 'GL_EVENT_COMMS_LOGIN_FAILED',
'53': 'GL_EVENT_COMMS_LOGIN_SUCCESS',
'54': 'GL_EVENT_COMMS_UNAUTHORIZED_COMM',
'55': 'GL_EVENT_CLIENT_INSTALL_AV',
'56': 'GL_EVENT_CLIENT_INSTALL_FW',
'57': 'GL_EVENT_CLIENT_UNINSTALL',
'58': 'GL_EVENT_CLIENT_UNINSTALL_ROLLBACK',
'59': 'GL_EVENT_COMMS_SERVER_GROUP_ROOT_CERT_ISSUE',
'60': 'GL_EVENT_COMMS_SERVER_CERT_ISSUE',
'61': 'GL_EVENT_COMMS_TRUSTED_ROOT_CHANGE',
'62': 'GL_EVENT_COMMS_SERVER_CERT_STARTUP_FAILED',
'63': 'GL_EVENT_CLIENT_CHECKIN',
'64': 'GL_EVENT_CLIENT_NO_CHECKIN',
'65': 'GL_EVENT_SCAN_SUSPENDED',
'66': 'GL_EVENT_SCAN_RESUMED',
'67': 'GL_EVENT_SCAN_DURATION_INSUFFICIENT',
'68': 'GL_EVENT_CLIENT_MOVE',
'69': 'GL_EVENT_SCAN_FAILED_ENHANCED',
'70': 'GL_EVENT_MAX_event_name',
'71': 'GL_EVENT_HEUR_THREAT_NOW_WHITELISTED',
'72': 'GL_EVENT_INTERESTING_PROCESS_DETECTED_START',
'73': 'GL_EVENT_LOAD_ERROR_COH',
'74': 'GL_EVENT_LOAD_ERROR_SYKNAPPS',
'75': 'GL_EVENT_INTERESTING_PROCESS_DETECTED_FINISH',
'76': 'GL_EVENT_HPP_SCAN_NOT_SUPPORTED_FOR_OS',
'77': 'GL_EVENT_HEUR_THREAT_NOW_KNOWN'}
CATEGORY_NAMES = {
'1': 'GL_CAT_INFECTION',
'2': 'GL_CAT_SUMMARY',
'3': 'GL_CAT_PATTERN',
'4': 'GL_CAT_SECURITY'}
ACTION_1_2_NAMES = {
'1': 'Quarantine infected file',
'2': 'Rename infected file',
'3': 'Delete infected file',
'4': 'Leave alone (log only)',
'5': 'Clean virus from file',
'6': 'Clean or delete macros'}
ACTION_0_NAMES = {
'1': 'Quarantined',
'2': 'Renamed',
'3': 'Deleted',
'4': 'Left alone',
'5': 'Cleaned',
'6': ('Cleaned or macros deleted (no longer used as of '
'Symantec AntiVirus 9.x)'),
'7': 'Saved file as...',
'8': 'Sent to Intel (AMS)',
'9': 'Moved to backup location',
'10': 'Renamed backup file',
'11': 'Undo action in Quarantine View',
'12': 'Write protected or lack of permissions - Unable to act on file',
'13': 'Backed up file'}
# The identifier for the formatter (a regular expression)
FORMAT_STRING_SEPARATOR = '; '
FORMAT_STRING_PIECES = [
'Event Name: {event_map}',
'Category Name: {category_map}',
'Malware Name: {virus}',
'Malware Path: {file}',
'Action0: {action0_map}',
'Action1: {action1_map}',
'Action2: {action2_map}',
'Description: {description}',
'Scan ID: {scanid}',
'Event Data: {event_data}',
'Remote Machine: {remote_machine}',
'Remote IP: {remote_machine_ip}']
FORMAT_STRING_SHORT_PIECES = [
'{file}',
'{virus}',
'{action0_map}',
'{action1_map}',
'{action2_map}']
SOURCE_LONG = 'Symantec AV Log'
SOURCE_SHORT = 'LOG'
def GetMessages(self, unused_formatter_mediator, event):
"""Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions between
formatters and other components, such as storage and Windows EventLog
resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
event = event_values.get('event', None)
if event:
event_values['event_map'] = self.EVENT_NAMES.get(event, 'Unknown')
category = event_values.get('cat', None)
if category:
event_values['category_map'] = self.CATEGORY_NAMES.get(
category, 'Unknown')
action = event_values.get('action0', None)
if action:
event_values['action0_map'] = self.ACTION_0_NAMES.get(action, 'Unknown')
action = event_values.get('action1', None)
if action:
event_values['action1_map'] = self.ACTION_1_2_NAMES.get(
action, 'Unknown')
action = event_values.get('action2', None)
if action:
event_values['action2_map'] = self.ACTION_1_2_NAMES.get(
action, 'Unknown')
return self._ConditionalFormatMessages(event_values)
manager.FormattersManager.RegisterFormatter(SymantecAVFormatter)
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, dict_output, _arg_split
@click.command('search_repositories')
@click.argument("q", type=str)
@click.option(
"--page",
help="page requested",
default="1",
show_default=True,
type=int
)
@click.option(
"--page_size",
help="page size requested",
default="10",
show_default=True,
type=int
)
@pass_context
@custom_exception
@dict_output
def cli(ctx, q, page=1, page_size=10):
"""Search for repositories in a Galaxy Tool Shed.
Output:
dictionary containing search hits as well as metadata for the
search.
For example::
{u'hits': [{u'matched_terms': [],
u'repository': {u'approved': u'no',
u'description': u'Convert export file to fastq',
u'full_last_updated': u'2015-01-18 09:48 AM',
u'homepage_url': u'',
u'id': u'bdfa208f0cf6504e',
u'last_updated': u'less than a year',
u'long_description': u'This is a simple too to convert Solexas Export files to FASTQ files.',
u'name': u'export_to_fastq',
u'remote_repository_url': u'',
u'repo_owner_username': u'louise',
u'times_downloaded': 164},
u'score': 4.92},
{u'matched_terms': [],
u'repository': {u'approved': u'no',
u'description': u'Convert BAM file to fastq',
u'full_last_updated': u'2015-04-07 11:57 AM',
u'homepage_url': u'',
u'id': u'175812cd7caaf439',
u'last_updated': u'less than a month',
u'long_description': u'Use Picards SamToFastq to convert a BAM file to fastq. Useful for storing reads as BAM in Galaxy and converting to fastq when needed for analysis.',
u'name': u'bam_to_fastq',
u'remote_repository_url': u'',
u'repo_owner_username': u'brad-chapman',
u'times_downloaded': 138},
u'score': 4.14}],
u'hostname': u'https://testtoolshed.g2.bx.psu.edu/',
u'page': u'1',
u'page_size': u'2',
u'total_results': u'64'}
"""
return ctx.ti.repositories.search_repositories(q, page=page, page_size=page_size)
|
# -*- coding:utf-8 -*-
#!/usr/bin/env python
import gevent
from gevent import monkey
monkey.patch_all(thread=False)
from requests import get
from filetype import guess
from os import rename
from os import makedirs
from os.path import exists
from json import loads
from contextlib import closing
from lxml import etree
from threading import Thread
import json
fp = 'config.conf'
# r = redis.Redis(connection_pool=pool)
import copy
import re
import time
import random
import json
import logging
downloadedPIC = 0
random_str = ''
import configparser
cf = configparser.ConfigParser()
cf.read('./config.conf')
datalist = []
# 文件下载器
def Down_load(file_url, file_full_name, now_photo_count, all_photo_count):
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36","Referer":"https://www.mzitu.com/"}
# 开始下载图片
with closing(get(file_url, headers=headers, stream=True)) as response:
chunk_size = 1024 # 单次请求最大值
content_size = int(response.headers['content-length']) # 文件总大小
data_count = 0 # 当前已传输的大小
with open(file_full_name, "wb") as file:
for data in response.iter_content(chunk_size=chunk_size):
file.write(data)
done_block = int((data_count / content_size) * 50)
data_count = data_count + len(data)
now_jd = (data_count / content_size) * 100
print("\r %s:[%s%s] %d%% %d/%d" % (file_full_name, done_block * '█ ', ' ' * (50 - 1 - done_block), now_jd, now_photo_count, all_photo_count), end=" ")
# 下载完图片后获取图片扩展名,并为其增加扩展名
# file_type = guess(file_full_name)
rename(file_full_name, file_full_name)
#
def generate_random_str(randomlength=16):
"""
生成一个指定长度的随机字符串
"""
random_str = ''
base_str = 'ABCDEFGHIGKLMNOPQRSTUVWXYZabcdefghigklmnopqrstuvwxyz0123456789'
length = len(base_str) - 1
for i in range(randomlength):
random_str += base_str[random.randint(0, length)]
return random_str
def crawler_link(url):
global datalist
print('当前已获取条目数量'+str(len(datalist)))
# 获取图片列表数据
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36","Referer":"https://www.mzitu.com/"}
respond = get(url, headers=headers)
html = etree.HTML(respond.text)
photo_data = html.xpath('//ul[@id="pins"]/li/a[1]')
# 已经下载的图片张数
nextLink= ''
if len(html.xpath('//a[@class="next page-numbers"]'))>0 :
nextLink = html.xpath('//a[@class="next page-numbers"]/attribute::href')[0]
# 开始下载并保存5K分辨率壁纸
for photo in photo_data:
dir = photo.xpath('img/attribute::alt')[0]
dir = re.compile('[!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~\s]+').sub('',dir)
link = photo.xpath('attribute::href')[0]
datalist.append({"dir":dir,"link":link})
if nextLink!='' :
# time.sleep(0.2)
crawler_link(nextLink)
else:
save_data()
cf.set('str', 'list_finish', 'true')
with open(fp, 'w') as fw:
cf.write(fw)
print('全站已经采集完毕共计'+str(len(datalist)))
data = open('./datalist.txt', mode='r')
data = data.read()
datalist = data.split('\n')
loop_picture()
def save_data():
data = open('./datalist.txt', mode='w+')
str_datalist = '\n'.join(json.dumps(item) for item in datalist)
data.write(str_datalist)
data.close()
def loop_picture():
global datalist
while len(datalist) > 0:
copy_result = copy.deepcopy(datalist[0:4])
for photo in copy_result:
print(photo)
photo = json.loads(photo)
url = photo['link']
dir = photo['dir']
# 创建一个文件夹存放我们下载的图片
if not exists('./pic/' + str(dir)):
makedirs('./pic/' + str(dir))
try:
gevent.joinall([
gevent.spawn(crawler_photo,url,dir),
])
except:
# 捕获到错误放到队列末尾
datalist.append(json.dumps({"dir": dir, "link": url}))
datalist.pop(0)
save_data()
# 爬取不同类型图片
def crawler_photo(url,dir):
# 获取图片列表数据
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36","Referer":"https://www.mzitu.com/"}
respond = get(url, headers=headers)
html = etree.HTML(respond.text)
file_url = html.xpath('//img[@class="blur"]/attribute::src')
if len(file_url)>0 and file_url[0]:
file_url = file_url[0]
file_name_only = file_url.split('/')
file_name_only = file_name_only[len(file_name_only) - 1]
# 准备保存到本地的完整路径
file_full_name = './pic/' + dir + '/' + file_name_only
# 开始下载图片da
Down_load(file_url, file_full_name, 1, 1)
nextLink = ''
if len(html.xpath('//div[@class="pagenavi"]//a[last()]/attribute::href'))>0 and html.xpath('//div[@class="pagenavi"]//a[last()]/span/text()')[0]!='下一组»':
nextLink = html.xpath('//div[@class="pagenavi"]//a[last()]/attribute::href')[0]
if nextLink!='' :
time.sleep(0.5)
crawler_photo(nextLink,dir)
# 将【标准库-阻塞IO实现】替换为【gevent-非阻塞IO实现】
if __name__ == '__main__':
if cf.has_option('str','random_str'):
random_str = cf.get("str", "random_str")
else:
random_str = generate_random_str()
cf.set('str','random_str',random_str)
with open(fp, 'w') as fw:
cf.write(fw)
if cf.has_option('str','list_finish'):
print('您已经采集过目录了,正在恢复到上一次的进度。。。')
data = open('./datalist.txt',mode='r')
data = data.read()
datalist = data.split('\n')
gevent.joinall([
gevent.spawn(loop_picture),
])
else:
print('正在开始采集数据目录,此阶段大约需要10分钟,请勿退出或关闭窗口')
gevent.joinall([
gevent.spawn(crawler_link, 'https://www.mzitu.com'),
])
print('\n下载成功!')
|
VENV_PREFIX = "pipenv run"
_COMMON_TARGETS = ["pycon_archive_past_website", "tests"]
COMMON_TARGETS_AS_STR = " ".join(_COMMON_TARGETS)
|
# post processing, add sequence and additional annoation info if available
from six.moves.urllib.parse import urlencode
from galaxy.datatypes.images import create_applet_tag_peek
def exec_after_process(app, inp_data, out_data, param_dict, tool, stdout, stderr):
primary_data = next(iter(out_data.values()))
# default params for LAJ type
params = {
"alignfile1": "display?id=%s" % primary_data.id,
"buttonlabel": "Launch LAJ",
"title": "LAJ in Galaxy",
"posturl": "history_add_to?%s" % urlencode({'history_id': primary_data.history_id, 'ext': 'lav', 'name': 'LAJ Output', 'info': 'Added by LAJ', 'dbkey': primary_data.dbkey})
}
for name, data in inp_data.items():
if name == "maf_input":
params["alignfile1"] = "display?id=%s" % data.id
elif name == "seq_file1" and data.state == data.states.OK and data.has_data():
params["file1seq1"] = "display?id=%s" % data.id
elif name == "seq_file2" and data.state == data.states.OK and data.has_data():
params["file1seq2"] = "display?id=%s" % data.id
elif name == "exonfile" and data.state == data.states.OK and data.has_data():
params["exonfile"] = "display?id=%s" % data.id
elif name == "repeatfile" and data.state == data.states.OK and data.has_data():
params["repeatfile"] = "display?id=%s" % data.id
elif name == "annotationfile" and data.state == data.states.OK and data.has_data():
params["annotationfile"] = "display?id=%s" % data.id
elif name == "underlayfile" and data.state == data.states.OK and data.has_data():
params["underlayfile"] = "display?id=%s" % data.id
elif name == "highlightfile" and data.state == data.states.OK and data.has_data():
params["highlightfile"] = "display?id=%s" % data.id
if "file1seq1" not in params and "file1seq2" not in params:
params["noseq"] = "true"
class_name = "edu.psu.cse.bio.laj.LajApplet.class"
archive = "/static/laj/laj.jar"
primary_data.peek = create_applet_tag_peek(class_name, archive, params)
app.model.context.add(primary_data)
app.model.context.flush()
|
import json
from datetime import datetime
from django.test import TestCase
from django.utils import timezone
from twitter_stream import settings
from twitter_stream.models import Tweet
class TweetCreateFromJsonTest(TestCase):
def validate_json(self, tweet_json, correct_data):
"""
create_from_json() should return a Tweet object with
the fields set to their proper values.
Checks that all the fields match up.
The tweet_json is raw JSON text from the Twitter api and documentation,
The correct_data is corresponding manually-extracted data.
"""
raw_tweet = json.loads(tweet_json)
tweet = Tweet.create_from_json(raw_tweet)
self.assertIsInstance(tweet, Tweet)
# check for model validity
tweet.clean_fields()
self.assertEqual(tweet.tweet_id, correct_data['tweet_id'], 'tweet_id matches')
self.assertEqual(tweet.text, correct_data['text'], 'text matches')
self.assertEqual(tweet.truncated, correct_data['truncated'], 'truncated matches')
self.assertEqual(tweet.lang, correct_data['lang'], 'lang matches')
# Basic user info
self.assertEqual(tweet.user_id, correct_data['user_id'], 'user_id matches')
self.assertEqual(tweet.user_screen_name, correct_data['user_screen_name'], 'user_screen_name matches')
self.assertEqual(tweet.user_name, correct_data['user_name'], 'user_name matches')
self.assertEqual(tweet.user_verified, correct_data['user_verified'], 'user_verified matches')
# Timing parameters
# May need to convert the date depending on timezone settings
if settings.USE_TZ:
correct_data['created_at'] = timezone.make_aware(correct_data['created_at'], timezone.get_current_timezone())
self.assertEqual(tweet.created_at, correct_data['created_at'], 'created_at matches')
self.assertEqual(tweet.user_utc_offset, correct_data['user_utc_offset'], 'user_utc_offset matches')
self.assertEqual(tweet.user_time_zone, correct_data['user_time_zone'], 'user_time_zone matches')
# none, low, or medium
self.assertEqual(tweet.filter_level, correct_data['filter_level'], 'filter_level matches')
# Geo parameters
self.assertEqual(tweet.latitude, correct_data['latitude'], 'latitude matches')
self.assertEqual(tweet.longitude, correct_data['longitude'], 'longitude matches')
self.assertEqual(tweet.user_geo_enabled, correct_data['user_geo_enabled'], 'user_geo_enabled matches')
self.assertEqual(tweet.user_location, correct_data['user_location'], 'user_location matches')
# Engagement - not likely to be very useful for streamed tweets but whatever
self.assertEqual(tweet.favorite_count, correct_data['favorite_count'], 'favorite_count matches')
self.assertEqual(tweet.retweet_count, correct_data['retweet_count'], 'retweet_count matches')
self.assertEqual(tweet.user_followers_count, correct_data['user_followers_count'], 'user_followers_count matches')
self.assertEqual(tweet.user_friends_count, correct_data['user_friends_count'], 'user_friends_count matches')
# Relation to other tweets
self.assertEqual(tweet.in_reply_to_status_id, correct_data['in_reply_to_status_id'],
'in_reply_to_status_id matches')
self.assertEqual(tweet.retweeted_status_id, correct_data['retweeted_status_id'], 'retweeted_status_id matches')
@classmethod
def add_test(cls, name, json, correct_data):
setattr(cls, "test_%s" % name, lambda self: self.validate_json(json, correct_data))
# This example has lots of stuff that is null
# Example tweet from https://dev.twitter.com/docs/api/1.1/get/statuses/show/%3Aid
TweetCreateFromJsonTest.add_test('null_fields', r"""{
"coordinates": null,
"favorited": false,
"truncated": false,
"created_at": "Wed Jun 06 20:07:10 +0000 2012",
"id_str": "210462857140252672",
"entities": {
"urls": [
{
"expanded_url": "https://dev.twitter.com/terms/display-guidelines",
"url": "https://t.co/Ed4omjYs",
"indices": [
76,
97
],
"display_url": "dev.twitter.com/terms/display-\u2026"
}
],
"hashtags": [
{
"text": "Twitterbird",
"indices": [
19,
31
]
}
],
"user_mentions": [
]
},
"in_reply_to_user_id_str": null,
"contributors": [
14927800
],
"text": "Along with our new #Twitterbird, we've also updated our Display Guidelines: https://t.co/Ed4omjYs ^JC",
"retweet_count": 66,
"in_reply_to_status_id_str": null,
"id": 210462857140252672,
"geo": null,
"retweeted": true,
"possibly_sensitive": false,
"in_reply_to_user_id": null,
"place": null,
"user": {
"profile_sidebar_fill_color": "DDEEF6",
"profile_sidebar_border_color": "C0DEED",
"profile_background_tile": false,
"name": "Twitter API",
"profile_image_url": "http://a0.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_normal.png",
"created_at": "Wed May 23 06:01:13 +0000 2007",
"location": "San Francisco, CA",
"follow_request_sent": false,
"profile_link_color": "0084B4",
"is_translator": false,
"id_str": "6253282",
"entities": {
"url": {
"urls": [
{
"expanded_url": null,
"url": "http://dev.twitter.com",
"indices": [
0,
22
]
}
]
},
"description": {
"urls": [
]
}
},
"default_profile": true,
"contributors_enabled": true,
"favourites_count": 24,
"url": "http://dev.twitter.com",
"profile_image_url_https": "https://si0.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_normal.png",
"utc_offset": -28800,
"id": 6253282,
"profile_use_background_image": true,
"listed_count": 10774,
"profile_text_color": "333333",
"lang": "en",
"followers_count": 1212963,
"protected": false,
"notifications": null,
"profile_background_image_url_https": "https://si0.twimg.com/images/themes/theme1/bg.png",
"profile_background_color": "C0DEED",
"verified": true,
"geo_enabled": true,
"time_zone": "Pacific Time (US & Canada)",
"description": "The Real Twitter API. I tweet about API changes, service issues and happily answer questions about Twitter and our API. Don't get an answer? It's on my website.",
"default_profile_image": false,
"profile_background_image_url": "http://a0.twimg.com/images/themes/theme1/bg.png",
"statuses_count": 3333,
"friends_count": 31,
"following": true,
"show_all_inline_media": false,
"screen_name": "twitterapi"
},
"in_reply_to_screen_name": null,
"source": "web",
"in_reply_to_status_id": null
}""", {
# Basic tweet info
'tweet_id': 210462857140252672,
'text': "Along with our new #Twitterbird, we've also updated "
"our Display Guidelines: https://t.co/Ed4omjYs ^JC",
'truncated': False,
'lang': None,
# Basic user info
'user_id': 6253282,
'user_screen_name': 'twitterapi',
'user_name': 'Twitter API',
'user_verified': True,
# Timing parameters
'created_at': datetime(2012, 6, 6, hour=20, minute=7, second=10, microsecond=0),
'user_utc_offset': -28800,
'user_time_zone': "Pacific Time (US & Canada)",
# none, low, or medium
'filter_level': None,
# Geo parameters
'latitude': None,
'longitude': None,
'user_geo_enabled': True,
'user_location': "San Francisco, CA",
# Engagement - not likely to be very useful for streamed tweets but whatever
'favorite_count': None,
'retweet_count': 66,
'user_followers_count': 1212963,
'user_friends_count': 31,
'in_reply_to_status_id': None,
'retweeted_status_id': None
})
# A captured tweet (anonymized)
# This example has location data
TweetCreateFromJsonTest.add_test('location_data', r"""{
"contributors": null,
"coordinates": {
"coordinates": [
-118.722583202,
34.983424651
],
"type": "Point"
},
"created_at": "Tue Feb 11 18:43:27 +0000 2014",
"entities": {
"hashtags": [],
"symbols": [],
"urls": [],
"user_mentions": []
},
"favorite_count": 0,
"favorited": false,
"filter_level": "medium",
"geo": {
"coordinates": [
34.983424651,
-118.722583202
],
"type": "Point"
},
"id": 458121938375806432,
"id_str": "458121938375806432",
"in_reply_to_screen_name": null,
"in_reply_to_status_id": null,
"in_reply_to_status_id_str": null,
"in_reply_to_user_id": null,
"in_reply_to_user_id_str": null,
"lang": "en",
"place": {
"attributes": {},
"bounding_box": {
"coordinates": [
[
[
-118.0,
34.0
],
[
-118.0,
34.0
],
[
-118.0,
34.0
],
[
-118.0,
34.0
]
]
],
"type": "Polygon"
},
"contained_within": [],
"country": "United States",
"country_code": "US",
"full_name": "Place, CA",
"id": "540563418",
"name": "Place",
"place_type": "city",
"url": "https://api.twitter.com/1.1/geo/id/540563418.json"
},
"retweet_count": 0,
"retweeted": false,
"source": "<a href=\"http://twitter.com/download/iphone\" rel=\"nofollow\">Twitter for iPhone</a>",
"text": "Blah blah blah blah blah blah blah blah!",
"truncated": false,
"user": {
"contributors_enabled": false,
"created_at": "Thu Jul 26 14:02:08 +0000 2012",
"default_profile": true,
"default_profile_image": false,
"description": null,
"favourites_count": 2,
"follow_request_sent": null,
"followers_count": 4,
"following": null,
"friends_count": 13,
"geo_enabled": true,
"id": 687069798,
"id_str": "687069798",
"is_translation_enabled": false,
"is_translator": false,
"lang": "en",
"listed_count": 0,
"location": "",
"name": "some_user_name",
"notifications": null,
"profile_background_color": "C0DEED",
"profile_background_image_url": "http://abs.twimg.com/images/themes/theme1/bg.png",
"profile_background_image_url_https": "https://abs.twimg.com/images/themes/theme1/bg.png",
"profile_background_tile": false,
"profile_image_url": "http://pbs.twimg.com/profile_images/fake_fake_fake.jpeg",
"profile_image_url_https": "https://pbs.twimg.com/profile_images/fake_fake_fake.jpeg",
"profile_link_color": "0084B4",
"profile_sidebar_border_color": "C0DEED",
"profile_sidebar_fill_color": "DDEEF6",
"profile_text_color": "333333",
"profile_use_background_image": true,
"protected": false,
"screen_name": "some_screen_name",
"statuses_count": 7,
"time_zone": "Pacific Time (US & Canada)",
"url": null,
"utc_offset": null,
"verified": false
}
}""", {
# Basic tweet info
'tweet_id': 458121938375806432,
'text': "Blah blah blah blah blah blah blah blah!",
'truncated': False,
'lang': "en",
# Basic user info
'user_id': 687069798,
'user_screen_name': 'some_screen_name',
'user_name': 'some_user_name',
'user_verified': False,
# Timing parameters
'created_at': datetime(2014, 2, 11, hour=18, minute=43, second=27, microsecond=0),
'user_utc_offset': None,
'user_time_zone': "Pacific Time (US & Canada)",
# none, low, or medium
'filter_level': 'medium',
# Geo parameters
'latitude': 34.983424651,
'longitude': -118.722583202,
'user_geo_enabled': True,
'user_location': "",
# Engagement - not likely to be very useful for streamed tweets but whatever
'favorite_count': 0,
'retweet_count': 0,
'user_followers_count': 4,
'user_friends_count': 13,
'in_reply_to_status_id': None,
'retweeted_status_id': None
})
# A captured tweet (anonymized)
# This example is a retweet
TweetCreateFromJsonTest.add_test('retweet', r"""{
"contributors": null,
"coordinates": null,
"created_at": "Tue Feb 11 18:43:27 +0000 2014",
"entities": {
"hashtags": [],
"symbols": [],
"urls": [],
"user_mentions": [
{
"id": 600695731,
"id_str": "600695731",
"indices": [
3,
12
],
"name": "somebody",
"screen_name": "somebody124"
}
]
},
"favorite_count": 0,
"favorited": false,
"filter_level": "medium",
"geo": null,
"id": 664439253345490274,
"id_str": "664439253345490274",
"in_reply_to_screen_name": null,
"in_reply_to_status_id": null,
"in_reply_to_status_id_str": null,
"in_reply_to_user_id": null,
"in_reply_to_user_id_str": null,
"lang": "en",
"place": null,
"retweet_count": 0,
"retweeted": false,
"retweeted_status": {
"contributors": null,
"coordinates": null,
"created_at": "Tue Feb 11 18:28:05 +0000 2014",
"entities": {
"hashtags": [],
"symbols": [],
"urls": [],
"user_mentions": []
},
"favorite_count": 12,
"favorited": false,
"geo": null,
"id": 552293876248595761,
"id_str": "552293876248595761",
"in_reply_to_screen_name": null,
"in_reply_to_status_id": null,
"in_reply_to_status_id_str": null,
"in_reply_to_user_id": null,
"in_reply_to_user_id_str": null,
"lang": "en",
"place": null,
"retweet_count": 10,
"retweeted": false,
"source": "<a href=\"http://twitter.com/download/iphone\" rel=\"nofollow\">Twitter for iPhone</a>",
"text": "I am an amazing tweet blah blah blah blah blah blah blah",
"truncated": false,
"user": {
"contributors_enabled": false,
"created_at": "Thu Jan 26 21:45:50 +0000 2012",
"default_profile": false,
"default_profile_image": false,
"description": "my user description goes here",
"favourites_count": 12772,
"follow_request_sent": null,
"followers_count": 5201,
"following": null,
"friends_count": 836,
"geo_enabled": false,
"id": 557753453,
"id_str": "557753453",
"is_translation_enabled": false,
"is_translator": false,
"lang": "en",
"listed_count": 10,
"location": "some place",
"name": "my name",
"notifications": null,
"profile_background_color": "090A0A",
"profile_background_image_url": "http://pbs.twimg.com/profile_background_images/fake_fake_fake.jpeg",
"profile_background_image_url_https": "https://pbs.twimg.com/profile_background_images/fake_fake_fake.jpeg",
"profile_background_tile": true,
"profile_banner_url": "https://pbs.twimg.com/profile_banners/fake_fake_fake",
"profile_image_url": "http://pbs.twimg.com/profile_images/fake_fake_fake.jpeg",
"profile_image_url_https": "https://pbs.twimg.com/profile_images/fake_fake_fake.jpeg",
"profile_link_color": "2CC7C7",
"profile_sidebar_border_color": "000000",
"profile_sidebar_fill_color": "E6E4E4",
"profile_text_color": "404040",
"profile_use_background_image": false,
"protected": false,
"screen_name": "my_screen_name",
"statuses_count": 15670,
"time_zone": "Central Time (US & Canada)",
"url": null,
"utc_offset": -21600,
"verified": false
}
},
"source": "<a href=\"http://twitter.com/download/iphone\" rel=\"nofollow\">Twitter for iPhone</a>",
"text": "RT @my_screen_name: I am an amazing tweet blah blah blah blah blah blah blah",
"truncated": false,
"user": {
"contributors_enabled": false,
"created_at": "Fri Nov 13 23:51:33 +0000 2009",
"default_profile": false,
"default_profile_image": false,
"description": "An inspiring quote, #belieber",
"favourites_count": 6009,
"follow_request_sent": null,
"followers_count": 442,
"following": null,
"friends_count": 380,
"geo_enabled": true,
"id": 165087803,
"id_str": "165087803",
"is_translation_enabled": false,
"is_translator": false,
"lang": "en",
"listed_count": 2,
"location": "",
"name": "My Real Name",
"notifications": null,
"profile_background_color": "642D8B",
"profile_background_image_url": "http://abs.twimg.com/images/themes/theme10/bg.gif",
"profile_background_image_url_https": "https://abs.twimg.com/images/themes/theme10/bg.gif",
"profile_background_tile": true,
"profile_banner_url": "https://pbs.twimg.com/profile_banners/fake_fake_fake",
"profile_image_url": "http://pbs.twimg.com/profile_images/fake_fake_fake.jpeg",
"profile_image_url_https": "https://pbs.twimg.com/profile_images/fake_fake_fake.jpeg",
"profile_link_color": "FF0000",
"profile_sidebar_border_color": "65B0DA",
"profile_sidebar_fill_color": "7AC3EE",
"profile_text_color": "3D1957",
"profile_use_background_image": true,
"protected": false,
"screen_name": "screen_name",
"statuses_count": 8006,
"time_zone": "Central Time (US & Canada)",
"url": null,
"utc_offset": -21600,
"verified": false
}
}""", {
# Basic tweet info
'tweet_id': 664439253345490274,
'text': "RT @my_screen_name: I am an amazing tweet blah blah blah blah blah blah blah",
'truncated': False,
'lang': "en",
# Basic user info
'user_id': 165087803,
'user_screen_name': "screen_name",
'user_name': 'My Real Name',
'user_verified': False,
# Timing parameters
'created_at': datetime(2014, 2, 11, hour=18, minute=43, second=27, microsecond=0),
'user_utc_offset': -21600,
'user_time_zone': "Central Time (US & Canada)",
# none, low, or medium
'filter_level': 'medium',
# Geo parameters
'latitude': None,
'longitude': None,
'user_geo_enabled': True,
'user_location': "",
# Engagement - not likely to be very useful for streamed tweets but whatever
'favorite_count': 0,
'retweet_count': 0,
'user_followers_count': 442,
'user_friends_count': 380,
'in_reply_to_status_id': None,
'retweeted_status_id': 552293876248595761
})
# A captured tweet (anonymized)
# This example has negative counts
# a la https://dev.twitter.com/docs/streaming-apis/processing#Missing_counts
TweetCreateFromJsonTest.add_test('negative_counts', r"""{
"contributors": null,
"coordinates": null,
"created_at": "Tue Feb 11 18:43:27 +0000 2014",
"entities": {
"hashtags": [],
"symbols": [],
"urls": [],
"user_mentions": [
{
"id": 600695731,
"id_str": "600695731",
"indices": [
3,
12
],
"name": "somebody",
"screen_name": "somebody124"
}
]
},
"favorite_count": -1,
"favorited": false,
"filter_level": "medium",
"geo": null,
"id": 664439253345490274,
"id_str": "664439253345490274",
"in_reply_to_screen_name": null,
"in_reply_to_status_id": null,
"in_reply_to_status_id_str": null,
"in_reply_to_user_id": null,
"in_reply_to_user_id_str": null,
"lang": "en",
"place": null,
"retweet_count": -1,
"retweeted": false,
"retweeted_status": null,
"source": "<a href=\"http://twitter.com/download/iphone\" rel=\"nofollow\">Twitter for iPhone</a>",
"text": "RT @my_screen_name: I am an amazing tweet blah blah blah blah blah blah blah",
"truncated": false,
"user": {
"contributors_enabled": false,
"created_at": "Fri Nov 13 23:51:33 +0000 2009",
"default_profile": false,
"default_profile_image": false,
"description": "An inspiring quote, #belieber",
"favourites_count": -1,
"follow_request_sent": null,
"followers_count": -1,
"following": null,
"friends_count": -1,
"geo_enabled": true,
"id": 165087803,
"id_str": "165087803",
"is_translation_enabled": false,
"is_translator": false,
"lang": "en",
"listed_count": -1,
"location": "",
"name": "My Real Name",
"notifications": null,
"profile_background_color": "642D8B",
"profile_background_image_url": "http://abs.twimg.com/images/themes/theme10/bg.gif",
"profile_background_image_url_https": "https://abs.twimg.com/images/themes/theme10/bg.gif",
"profile_background_tile": true,
"profile_banner_url": "https://pbs.twimg.com/profile_banners/fake_fake_fake",
"profile_image_url": "http://pbs.twimg.com/profile_images/fake_fake_fake.jpeg",
"profile_image_url_https": "https://pbs.twimg.com/profile_images/fake_fake_fake.jpeg",
"profile_link_color": "FF0000",
"profile_sidebar_border_color": "65B0DA",
"profile_sidebar_fill_color": "7AC3EE",
"profile_text_color": "3D1957",
"profile_use_background_image": true,
"protected": false,
"screen_name": "screen_name",
"statuses_count": -1,
"time_zone": "Central Time (US & Canada)",
"url": null,
"utc_offset": -21600,
"verified": false
}
}""", {
# Basic tweet info
'tweet_id': 664439253345490274,
'text': "RT @my_screen_name: I am an amazing tweet blah blah blah blah blah blah blah",
'truncated': False,
'lang': "en",
# Basic user info
'user_id': 165087803,
'user_screen_name': "screen_name",
'user_name': 'My Real Name',
'user_verified': False,
# Timing parameters
'created_at': datetime(2014, 2, 11, hour=18, minute=43, second=27, microsecond=0),
'user_utc_offset': -21600,
'user_time_zone': "Central Time (US & Canada)",
# none, low, or medium
'filter_level': 'medium',
# Geo parameters
'latitude': None,
'longitude': None,
'user_geo_enabled': True,
'user_location': "",
# Engagement - not likely to be very useful for streamed tweets but whatever
'favorite_count': None,
'retweet_count': None,
'user_followers_count': None,
'user_friends_count': None,
'in_reply_to_status_id': None,
'retweeted_status_id': None
})
|
from sqlalchemy import (engine_from_config, MetaData, Table,
Column, ForeignKey, PrimaryKeyConstraint, Index)
from sqlalchemy.types import (SmallInteger, String, Integer,
DateTime, Float, Enum, BINARY, Text, Date)
from geoalchemy import (GeometryExtensionColumn, Point, GeometryDDL,
MultiPolygon)
engine = None
metadata = MetaData()
def connect(engine_config):
'''Call this before trying to use anything else'''
global engine
engine = engine_from_config(engine_config, prefix='db.')
metadata.bind = engine
classification_enum = Enum('unknown', 'invalid', 'historic', 'vagrant',
'irruptive', 'core', 'introduced');
basis_enum = Enum('Preserved specimen', 'Human observation',
'Machine observation')
species = Table('species', metadata,
Column('id', Integer(), primary_key=True),
Column('scientific_name', String(256), nullable=False),
Column('common_name', String(256), nullable=True),
Column('num_dirty_occurrences', Integer(), nullable=False, default=0),
Column('needs_vetting_since', DateTime(), nullable=True, default=None)
)
sources = Table('sources', metadata,
Column('id', Integer(), primary_key=True),
Column('name', String(256), nullable=False),
Column('url', String(256), nullable=False),
Column('last_import_time', DateTime(), nullable=True)
)
occurrences = Table('occurrences', metadata,
Column('id', Integer(), primary_key=True),
GeometryExtensionColumn('location', Point(2, srid=4326), nullable=False),
Column('uncertainty', Integer(), nullable=False),
Column('date', Date(), nullable=True),
Column('classification', classification_enum, nullable=False),
Column('basis', basis_enum, nullable=True),
Column('species_id', SmallInteger(), ForeignKey('species.id'), nullable=False),
Column('source_id', SmallInteger(), ForeignKey('sources.id'), nullable=False),
Column('source_record_id', BINARY(16), nullable=True),
Column('source_classification', classification_enum, nullable=False)
)
GeometryDDL(occurrences)
sensitive_occurrences = Table('sensitive_occurrences', metadata,
Column('occurrence_id', Integer(), ForeignKey('occurrences.id'), nullable=False),
GeometryExtensionColumn('sensitive_location', Point(2, srid=4326), nullable=False)
)
GeometryDDL(sensitive_occurrences)
vettings = Table('vettings', metadata,
Column('id', Integer(), primary_key=True),
Column('user_id', Integer(), ForeignKey('users.id'), nullable=False),
Column('species_id', Integer(), ForeignKey('species.id'), nullable=False),
Column('comment', Text(), nullable=False),
Column('classification', classification_enum, nullable=False),
GeometryExtensionColumn('area', MultiPolygon(2, srid=4326), nullable=False)
)
GeometryDDL(vettings)
# table only available after using shp2pgsql on BLA shapefile:
# shp2pgsql TaxonPolys1.shp birdlife_import | sudo -u postgres psql edgar
birdlife_import = Table('birdlife_import', metadata,
Column('spno', SmallInteger()),
Column('rnge', Integer()),
Column('brrnge', Integer()),
GeometryExtensionColumn('the_geom', MultiPolygon(2, srid=-1))
)
GeometryDDL(birdlife_import)
|
import os
import platform
import re
import subprocess
import sys
import sysconfig
from distutils.version import LooseVersion
from pathlib import Path
from typing import List
from setuptools import Extension
class CMakeExtension(Extension):
def __init__(self, name: str, sourcedir: str = "") -> None:
super().__init__(name, sources=[])
self.sourcedir = str(Path(sourcedir).resolve())
def prepare_cmake_extensions(extensions: List[Extension]) -> None:
cmake_extensions = [x for x in extensions if isinstance(x, CMakeExtension)]
if cmake_extensions:
try:
out = subprocess.check_output(["cmake", "--version"])
except OSError: # pragma: no cover
raise RuntimeError(
"CMake must be installed to build the following extensions: "
+ ", ".join(e.name for e in cmake_extensions)
)
# TODO: Add Windows test coverage
if platform.system() == "Windows": # pragma: no cover
cmake_version = LooseVersion(re.search(r"version\s*([\d.]+)", out.decode()).group(1)) # type: ignore
if cmake_version < "3.1.0":
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
def build_cmake_extension(
ext: CMakeExtension, ext_full_path: str, dist_version: str, build_temp: str, debug: bool,
) -> None:
extdir = Path(ext_full_path).parent.resolve()
cmake_args = [
f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}",
f"-DPYTHON_EXECUTABLE={sys.executable}",
]
cfg = "Debug" if debug else "Release"
build_args = ["--config", cfg]
# TODO: Add Windows test coverage
if platform.system() == "Windows": # pragma: no cover
cmake_args += ["-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format(cfg.upper(), extdir)]
if sys.maxsize > 2 ** 32:
cmake_args += ["-A", "x64"]
build_args += ["--", "/m"]
else:
cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg]
build_args += ["--", "-j4"]
cmake_args += ["-DPYTHON_INCLUDE_DIR={}".format(sysconfig.get_path("include"))]
env = os.environ.copy()
env["CXXFLAGS"] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get("CXXFLAGS", ""), dist_version)
Path(build_temp).mkdir(parents=True, exist_ok=True)
subprocess.check_call(["cmake", ext.sourcedir] + cmake_args, cwd=build_temp, env=env)
subprocess.check_call(["cmake", "--build", "."] + build_args, cwd=build_temp)
|
from student_code.simple_baseline_net import SimpleBaselineNet
from student_code.experiment_runner_base import ExperimentRunnerBase
from student_code.vqa_dataset import VqaDataset
import torch
from torchvision import transforms
class SimpleBaselineExperimentRunner(ExperimentRunnerBase):
"""
Sets up the Simple Baseline model for training. This class is specifically responsible for creating the model and optimizing it.
"""
def __init__(self, train_image_dir, train_question_path, train_annotation_path,
test_image_dir, test_question_path,test_annotation_path, batch_size, num_epochs,
num_data_loader_workers, cache_location, lr, log_validation):
############ 2.3 TODO: set up transform
transform = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
############
train_dataset = VqaDataset(image_dir=train_image_dir,
question_json_file_path=train_question_path,
annotation_json_file_path=train_annotation_path,
image_filename_pattern="COCO_train2014_{}.jpg",
transform=transform,
############ 2.4 TODO: fill in the arguments
question_word_to_id_map= None,
answer_to_id_map= None,
############
)
val_dataset = VqaDataset(image_dir=test_image_dir,
question_json_file_path=test_question_path,
annotation_json_file_path=test_annotation_path,
image_filename_pattern="COCO_val2014_{}.jpg",
transform=transform,
############ 2.4 TODO: fill in the arguments
question_word_to_id_map=train_dataset.question_word_to_id_map,
answer_to_id_map=train_dataset.answer_to_id_map,
############
)
model = SimpleBaselineNet()
super().__init__(train_dataset, val_dataset, model, batch_size, num_epochs, num_data_loader_workers)
############ 2.5 TODO: set up optimizer
self.optimizer = torch.optim.SGD([{'params':model.word_feature.parameters(),'lr':0.8},
{'params':model.output_layer.parameters(),'lr':0.01} ])
############
def _optimize(self, predicted_answers, true_answer_ids):
############ 2.7 TODO: compute the loss, run back propagation, take optimization step.
self.optimizer.zero_grad()
self.loss_fn = torch.nn.CrossEntropyLoss()
loss = self.loss_fn(predicted_answers, true_answer_ids)
loss.backward()
torch.nn.utils.clip_grad_value_(self._model.parameters(), 20)
with torch.no_grad():
self._model.output_layer.weight.clamp_(min= -20, max=20)
self._model.word_feature.weight.clamp_(min= -1500, max=1500)
self.optimizer.step()
return loss
############
|
from .antispoof_processor import ProcessAntiSpoof
|
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from toolz import merge, curry
from sklearn.preprocessing import LabelEncoder
@curry
def elast(data, y, t):
return (np.sum((data[t] - data[t].mean())*(data[y] - data[y].mean())) /
np.sum((data[t] - data[t].mean())**2))
def elast_ci(df, y, t, z=1.96):
n = df.shape[0]
t_bar = df[t].mean()
beta1 = elast(df, y, t)
beta0 = df[y].mean() - beta1 * t_bar
e = df[y] - (beta0 + beta1*df[t])
se = np.sqrt(((1/(n-2))*np.sum(e**2))/np.sum((df[t]-t_bar)**2))
return np.array([beta1 - z*se, beta1 + z*se])
def cumulative_elast_curve_ci(dataset, prediction, y, t, min_periods=30, steps=100):
size = dataset.shape[0]
ordered_df = dataset.sort_values(prediction, ascending=False).reset_index(drop=True)
n_rows = list(range(min_periods, size, size // steps)) + [size]
return np.array([elast_ci(ordered_df.head(rows), y, t) for rows in n_rows])
def cumulative_gain_ci(dataset, prediction, y, t, min_periods=30, steps=100):
size = dataset.shape[0]
ordered_df = dataset.sort_values(prediction, ascending=False).reset_index(drop=True)
n_rows = list(range(min_periods, size, size // steps)) + [size]
return np.array([elast_ci(ordered_df.head(rows), y, t) * (rows/size) for rows in n_rows]) |
#!/usr/bin/env python
"""
This simple script scans website pai.pt for a particular category and prints
the list of emails found.
The emails found are printed one per line in the output (along with status messages)
and also writen to a csv file called emails.csv with <email,category> structure
It uses BeautifulSoup to parse html responses given by the server.
Usage:
python main.py <category> [<first_page>]
e.g.:
python main.py http://www.pai.pt/ourivesarias-joalharias/
python main.py http://www.pai.pt/ourivesarias-joalharias/ 10
Copyright:
Tiago Almeida
tiago.b.almeida@gmail.com
http://stackoverflow.com/users/1590025/jumpifzero
License: MIT
"""
import re
import time
import csv
import sys
from urllib import urlopen
import bs4
__author__ = "Tiago Almeida"
__copyright__ = "Copyright 2015, Tiago Almeida"
__credits__ = ["Everyone who contributed to python and BeautifulSoup"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Tiago Almeida"
__email__ = "tiago.b.almeida@gmail.com"
__status__ = "Prototype"
def extract_emails_from_page(soup):
"""
Returns a list of emails found in page. Using a regex to scan
"""
email_pattern = re.compile('([\w\-\.+]+@(\w[\w\-]+\.)+[\w\-]+)')
try:
page_content = str(soup)
except:
print('Error parsing page. Skipped\n')
return []
matches = email_pattern.findall(page_content)
if matches:
return [ match[0] for match in matches ]
return []
def write_emails_to_set(emails_lst, s):
for email in emails_lst:
s.add(email)
def write_emails_to_file(result_emails, category):
"""
Generates emails.csv file with email,category
"""
f = open('emails.csv', 'wb')
csvWriter = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for email in result_emails:
csvWriter.writerow([email, category])
f.close()
def print_emails(emails_list):
for email in emails_list:
print(email)
def extract_emails_from_category(initial_url, first_page=int(1)):
"""
Returns a list of emails contained in all pages of a category
initial_url: string like http://www.pai.pt/ourivesarias-joalharias/
and it should point to the top level page of a category
first_page: optional. Allows skipping the first <first_page> pages
"""
result_emails = set() #we will return this
#last page regex
lp_regex = re.compile('[0-9]+/;')
#Open URL
soup = bs4.BeautifulSoup(urlopen(initial_url), "html5lib")
#extract the link to the last page. It is inside div.paging-bottom > ul > li with text ">>"
navigation = soup.find_all("div",id="paging-bottom")
if not navigation:
print("This page is weird. It has no navigation. Aborting\n")
return result_emails
txt_elem = navigation[0].ul.find_all(text=">>")[0]
#link to last page
link = txt_elem.parent
#Get its url.. smthg like /ourivesarias-joalharias/134/;jsessionid=67E1932531B84B3E77AAF47A29B263CE
url = link['href']
#Pick the number of the last page
match = lp_regex.search(url)
if match:
last_page = match.group()[0:-2]
last_page_i = int(last_page)
else:
print("This category has no navigation to the last page\n")
last_page_i = first_page
#Sanity Check
if last_page_i < first_page:
last_page_i = first_page
print("Working on category %s" % initial_url)
#Now that we have the last page. Time to iterate on each one and get the emails
for page in xrange( first_page, last_page_i ):
page_url = initial_url + str(page) + '/' #This is fragile
print("Scanning page %d of %d (%s)." % (page, last_page_i, page_url))
try:
emails = extract_emails_from_page(bs4.BeautifulSoup( unicode(urlopen(page_url).read(),'utf-8','ignore'), "html5lib"))
write_emails_to_set(emails, result_emails)
time.sleep(5)
except IOError:
print("Coult not fetch url %s. Skipped\n" % page_url)
return result_emails
def main():
if (len(sys.argv) > 1):
initial_url = sys.argv[1]
try:
first_page = int(sys.argv[2])
except IndexError:
first_page = int(1)
result_emails = extract_emails_from_category(initial_url, first_page)
write_emails_to_file(result_emails, initial_url)
print_emails(result_emails)
else:
print("usage: %s <category url> [<skip_first_pages>]" % sys.argv[0])
if __name__ == '__main__':
main()
|
# Generated by Django 3.1.5 on 2021-01-13 03:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('supermap', '0003_auto_20210112_1506'),
]
operations = [
migrations.AddField(
model_name='businesscircle',
name='count_shops',
field=models.FloatField(blank=True, default=0, null=True),
),
]
|
from . import AWSObject, AWSProperty
from .validators import *
from .constants import *
# -------------------------------------------
class KinesisStream(AWSObject):
"""# AWS::Kinesis::Stream - CloudFormationResourceSpecification version: 1.4.0
{
"Attributes": {
"Arn": {
"PrimitiveType": "String"
}
},
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html",
"Properties": {
"Name": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-name",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Immutable"
},
"ShardCount": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-shardcount",
"PrimitiveType": "Integer",
"Required": true,
"UpdateType": "Immutable"
},
"Tags": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-tags",
"DuplicatesAllowed": true,
"ItemType": "Tag",
"Required": false,
"Type": "List",
"UpdateType": "Mutable"
}
}
}
"""
resource_type = "AWS::Kinesis::Stream"
props = {
'Name': (basestring, False, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-name'),
'ShardCount': (positive_integer, True, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-shardcount'),
'Tags': ([Tag], False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-tags')
}
|
import pytest
from meltano.core.m5o.m5o_file_parser import MeltanoAnalysisFileParser
class TestMeltanoAnalysisFileParser:
@pytest.fixture
def subject(self, project):
return MeltanoAnalysisFileParser(project)
def test_parse(self, add_model, subject):
topics = subject.parse_packages()
assert len(topics) == 4
for topic in topics:
if topic["name"] == "carbon":
assert len(topic["designs"]) == 1
assert topic["namespace"] == "model-carbon-intensity"
elif topic["name"] == "sfdc":
assert len(topic["designs"]) == 1
assert topic["namespace"] == "model-salesforce"
def test_compile(self, project, add_model, subject):
topics = subject.parse_packages()
subject.compile(topics)
models = project.run_dir("models")
subfolders = [f.name for f in models.glob("**/*") if f.is_dir()]
compiled = [f.name for f in models.glob("**/*.topic.m5oc")]
assert "model-gitflix" in subfolders
assert "gitflix.topic.m5oc" in compiled
assert "model-carbon-intensity" in subfolders
assert "carbon.topic.m5oc" in compiled
assert "model-salesforce" in subfolders
assert "sfdc.topic.m5oc" in compiled
|
from flask import Flask
from delivery.extensions import config
def create_app():
"""Create main factory"""
app = Flask(__name__)
config.init_app(app)
return app
|
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
# Embedded-solutions 2017-2020, www.microdaq.org
import microdaq
# connect to MicroDAQ device
mdaq = microdaq.Device("10.10.1.1")
# read data from channels 1..4, input range from -10V to 10V, single ended
data = mdaq.ai_read([1, 2, 3, 4], [-10, 10], False)
# print data
for i, volt in enumerate(data):
print("Channel[%d]: %f V" % (i, volt))
|
# -*- coding: utf-8 -*-
"""The module finding similarity ratio between two strings."""
|
import keras
import numpy as np
from keras import optimizers
from keras.models import Sequential, load_model
from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from keras.callbacks import LearningRateScheduler, TensorBoard, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
import random
def perturb_image(xs, img):
# If this function is passed just one perturbation vector,
# pack it in a list to keep the computation the same
if xs.ndim < 2:
xs = np.array([xs])
# Copy the image n == len(xs) times so that we can
# create n new perturbed images
tile = [len(xs)] + [1]*(xs.ndim+1)
imgs = np.tile(img, tile)
# Make sure to floor the members of xs as int types
xs = xs.astype(int)
for x,img in zip(xs, imgs):
# Split x into an array of 5-tuples (perturbation pixels)
# i.e., [[x,y,r,g,b], ...]
pixels = np.split(x, len(x) // 5)
for pixel in pixels:
# At each pixel's x,y position, assign its rgb value
x_pos, y_pos, *rgb = pixel
img[x_pos, y_pos] = rgb
return imgs
def pdAttack(data, numP):
tempData = np.copy(data)
(x_train,y_train),(x_test,y_test) = tempData
for i in range(len(x_train)):
temp = x_train[i]
for k in range(numP):
x1 = random.randint(0,31) # Get random values for two pixel coordiantes and colors
y1 = random.randint(0,31)
r1 = random.randint(0,255)
g1 = random.randint(0,255)
b1 = random.randint(0,255)
pixel = ([x1,y1,r1,g1,b1])
perturb_image(pixel, temp)
x_train[i] = temp
data1 = (x_train,y_train),(x_test,y_test)
return data1
|
#!/usr/bin/env python3
import rospy, tf, actionlib
import actionlib_msgs.msg
from rosplan_planning_system.ActionInterfacePy.RPActionInterface import RPActionInterface
from std_srvs.srv import Empty
from geometry_msgs.msg import PoseStamped
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from high_level_robot_api import robot as robot_class
class RPMoveBasePy(RPActionInterface):
def __init__(self):
# call parent constructor
RPActionInterface.__init__(self)
# get waypoints reference frame from param server
self.waypoint_frameid = rospy.get_param('~waypoint_frameid', 'map')
self.wp_namespace = rospy.get_param('~wp_namespace', '/rosplan_demo_waypoints/wp')
# instantiating Robot object
self.robot = robot_class.Robot(enabled_components=['navigation', 'manipulation'])
# symbolic, subymbolic dictionary
# x, y, z, q1, q2, q3, q4
self.dic = {'left_table': [18.022, 4.014, 0.0, 0.0, 0.0, 1.0, 0.008],
'right_table': [20.0, 8.0, 0.0, 0.0, 0.0, -0.9999762992466819, 0.006884834414179017] }
def concreteCallback(self, action_msg):
symbolic_pose = None
for param in action_msg.parameters:
if param.key == 'to':
symbolic_pose = param.value
if not symbolic_pose:
rospy.logerr('cannot find navigation goal inside action parameters, to? key does not exist')
return False
subsymbolic_pose = self.dic[symbolic_pose]
# move arm to a pose within the robot footprint
self.robot.manipulation.go_to_pose('transport', wait=True)
# send navigation goal
if self.robot.navigation.go_to_2d_pose(x=subsymbolic_pose[0], y=subsymbolic_pose[1], quaternion=subsymbolic_pose[3:], timeout=40.0):
rospy.loginfo('goal was achieved!')
return True
else:
rospy.loginfo('goal was not achieved')
return False
def main():
rospy.init_node('rosplan_interface_movebase', anonymous=False)
rpmb = RPMoveBasePy()
rpmb.runActionInterface()
|
import csv
import glob
import math
import os
import sys
from random import random, seed
import socket
from timeit import default_timer as timer
import time
from statistics import mean
from pathlib import Path
import networkx as nx
import numpy as np
from scapy.layers.inet import IP, UDP
from scapy.utils import PcapWriter, PcapReader
import tkinter as tk
from tkinter import filedialog
import zat
from zat.log_to_dataframe import LogToDataFrame
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib.pyplot import cm
import matplotlib.transforms as mtrans
class Adding_More_Information_Netflow():
@staticmethod
def adding_name_info_and_rename_labels_of_benign_devices_for_netflow(path_to_filtered_files, path_to_name_info):
path_to_filtered_files = path_to_filtered_files
path_to_name_info = path_to_name_info
name_info_df = pd.read_csv(path_to_name_info)
scan_file_order_path = path_to_filtered_files + "/scan_order.txt"
with open(scan_file_order_path, 'r') as inputfile:
scanned_files = inputfile.readlines()
scanned_files_list = [x.strip() for x in scanned_files]
scanned_files_list = [(x.split(",")[0], x.split(",")[1]) for x in scanned_files_list]
for index, (scenario, file_name) in enumerate(scanned_files_list):
print("Scenario " + str(index + 1) + "/" + str(len(scanned_files_list)))
name = name_info_df[name_info_df["scenario_name"] == scenario]["name"].values[0]
path_to_csv_file = path_to_filtered_files + "/" + scenario + "/" + file_name + "/" + file_name + "_summary.csv"
summary_csv_df = pd.read_csv(path_to_csv_file)
summary_csv_df["name"] = name
columns_list = ["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol", "scenario", "file",
"connection_length", "label", "detailed_label",
"detailed_label_count", "name", "status"]
summary_csv_df = summary_csv_df.reindex(columns=columns_list)
summary_csv_df.to_csv(path_to_csv_file, index=False)
@staticmethod
def create_summary_from_separate_files_for_netflow(path_to_iot_scenarios_folder, folder_to_filtered_files, filename_addition):
path_to_iot_scenarios_folder = path_to_iot_scenarios_folder
folder_to_filtered_files = folder_to_filtered_files
filename_addition = filename_addition
scan_file_order_path = folder_to_filtered_files + "/" + "scan_order.txt"
scanned_files = []
with open(scan_file_order_path, 'r') as inputfile:
scanned_files = inputfile.readlines()
scanned_files_list = [x.strip() for x in scanned_files]
scanned_files_list = list(map(lambda x: (x.split(",")[0], x.split(",")[1]), scanned_files_list))
scanned_files_list = sorted(list(set(scanned_files_list)))
for index, (scenario_name, file_name) in enumerate(scanned_files_list):
print("Scenario name: " + scenario_name)
print("File name : " + file_name)
print("Number: " + str(index + 1) + "/" + str(len(scanned_files_list)))
log_order_path = folder_to_filtered_files + "/" + "log_order.txt"
with open(log_order_path, 'a') as log_order_file:
log_order_file.write(scenario_name + "," + file_name + "\n")
log_order_file.close()
print("Reading PCAP File")
path_to_csv_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_summary.csv"
path_to_pcap_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_" + filename_addition + ".pcap"
path_to_original_folder = path_to_iot_scenarios_folder + "/" + scenario_name
path_to_old_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_old.csv"
path_to_bro_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_bro.csv"
path_to_merge_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_merge.csv"
file_packet_dic = {}
with PcapReader(path_to_pcap_file) as packets:
for packet_count, packet in enumerate(packets):
packet_string = packet.show(dump=True)
packet_for_print = packet_string
packet_string = packet_string.split("\n")
packet_string = [x.replace(" ", "") for x in packet_string]
current_layer = "none"
packet_dic = {}
for line in packet_string:
if len(line) > 0:
if line[0] == '#':
new_layer = line.split('[')[1].split(']')[0]
current_layer = new_layer
packet_dic[current_layer] = {}
elif (line[0] != '\\') & (line[0] != '|'):
key = line.split("=")[0]
value = line.split("=")[1]
packet_dic[current_layer][key] = value
src_ip = packet_dic["IP"]["src"]
dst_ip = packet_dic["IP"]["dst"]
ip_protocol = packet_dic["IP"]["proto"].upper()
if ip_protocol == "UDP" and "UDP" in packet_dic:
src_port = packet_dic["UDP"]["sport"]
dst_port = packet_dic["UDP"]["dport"]
elif ip_protocol == "TCP" and "TCP" in packet_dic:
src_port = packet_dic["TCP"]["sport"]
dst_port = packet_dic["TCP"]["dport"]
elif ip_protocol == "ICMP" and "ICMP" in packet_dic:
src_port = 0
dst_port = str(packet_dic["ICMP"]["type"]) + "/" + str(packet_dic["ICMP"]["code"])
else:
src_port = 0
dst_port = 0
if not isinstance(src_port, int):
if not all(char.isdigit() for char in src_port):
try:
src_port = socket.getservbyname(src_port, ip_protocol)
except:
src_port = src_port
if not isinstance(dst_port, int) or ():
if not all(char.isdigit() for char in dst_port):
try:
dst_port = socket.getservbyname(dst_port, ip_protocol)
except:
dst_port = dst_port
ip_tos = packet_dic["IP"]["tos"]
if (src_ip, dst_ip, ip_protocol, src_port, dst_port, ip_tos) in file_packet_dic:
old_value = file_packet_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port, ip_tos)]
new_value = old_value + 1
file_packet_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port, ip_tos)] = new_value
else:
file_packet_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port, ip_tos)] = 1
packets.close()
src_ip_list = []
dst_ip_list = []
ip_protocol_list = []
src_port_list = []
dst_port_list = []
ip_tos_list = []
connection_length_list = []
for (src_ip, dst_ip, ip_protocol, src_port, dst_port, ip_tos), connection_length in file_packet_dic.items():
src_ip_list.append(src_ip)
dst_ip_list.append(dst_ip)
ip_protocol_list.append(ip_protocol)
src_port_list.append(src_port)
dst_port_list.append(dst_port)
ip_tos_list.append(ip_tos)
connection_length_list.append(connection_length)
data = {"src_ip": src_ip_list, "dst_ip": dst_ip_list, "ip_protocol" : ip_protocol_list, "src_port" : src_port_list,
"dst_port" : dst_port_list, "ip_tos" : ip_tos_list, "connection_length": connection_length_list}
old_info_df = pd.DataFrame(data)
old_info_df["scenario"] = scenario_name
old_info_df["file"] = file_name
print("Adding Logg Data")
sub_folders = [f.path for f in os.scandir(path_to_original_folder) if f.is_dir()]
bro_folder_found = False
for sub_folder in sub_folders:
base_name = str(os.path.basename(sub_folder))
if base_name == "bro":
labeled_files = glob.glob(sub_folder + "/*.labeled")
bro_folder_found = True
break
if bro_folder_found and len(labeled_files) > 0:
logg_file = labeled_files[0]
zat = LogToDataFrame()
bro_original_df = zat.create_dataframe(logg_file)
bro_original_df["label"] = bro_original_df["tunnel_parents label detailed-label"].apply(
lambda x: x.split(" ")[1].strip())
bro_original_df["detailed_label"] = bro_original_df["tunnel_parents label detailed-label"].apply(
lambda x: x.split(" ")[2].strip())
bro_original_df = bro_original_df.rename(columns={"id.orig_h": "src_ip", "id.resp_h": "dst_ip", "id.orig_p" : "src_port", "id.resp_p" : "dst_port", "proto" : "ip_protocol"})
bro_original_df = bro_original_df.drop(
columns=['uid', 'service', 'duration', 'orig_bytes', 'resp_bytes', 'conn_state', 'local_orig',
'local_resp', 'missed_bytes', 'history', 'orig_pkts', 'orig_ip_bytes',
'resp_pkts', 'resp_ip_bytes', 'tunnel_parents label detailed-label'])
bro_original_df["ip_protocol"] = bro_original_df["ip_protocol"].str.upper()
bro_original_df.sort_values(["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"], inplace=True)
bro_original_df = bro_original_df.groupby(["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"])[
'detailed_label'].value_counts().to_frame()
bro_original_df = bro_original_df.rename(columns={"detailed_label": "detailed_label_count"})
bro_original_df = bro_original_df.reset_index()
bro_original_df["src_ip"] = bro_original_df["src_ip"].apply(lambda x: str(x).strip())
bro_original_df["dst_ip"] = bro_original_df["dst_ip"].apply(lambda x: str(x).strip())
bro_original_df["src_port"] = bro_original_df["src_port"].apply(lambda x: str(x).strip())
bro_original_df["dst_port"] = bro_original_df["dst_port"].apply(lambda x: str(x).strip())
bro_original_df["ip_protocol"] = bro_original_df["ip_protocol"].apply(lambda x: str(x).strip())
bro_original_df["src_ip"] = bro_original_df["src_ip"].astype(str)
bro_original_df["dst_ip"] = bro_original_df["dst_ip"].astype(str)
bro_original_df["src_port"] = bro_original_df["src_port"].astype(str)
bro_original_df["dst_port"] = bro_original_df["dst_port"].astype(str)
bro_original_df["ip_protocol"] = bro_original_df["ip_protocol"].astype(str)
bro_original_df = bro_original_df.sort_values(by=["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"])
bro_original_df = bro_original_df.set_index(["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"])
old_info_df = old_info_df.sort_values(["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"])
old_info_df = old_info_df.set_index(["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"])
merged_df = old_info_df.merge(on=["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"], right=bro_original_df, how="inner")
merged_df = merged_df.reset_index()
old_info_df = old_info_df.reset_index()
detailed_label_df = merged_df.drop_duplicates(subset=["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"], keep=False)
detailed_label_df["status"] = "Found"
deleted_df = merged_df[merged_df.duplicated(["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"], keep=False)]
deleted_df["status"] = "Mixed"
to_check_df = pd.concat(
[old_info_df, merged_df.drop_duplicates(subset=["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"], keep='last')]).drop_duplicates(
subset=["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"], keep=False)
to_check_df = to_check_df.reset_index()
to_check_df = to_check_df.rename(columns={"src_ip": "dst_ip", "dst_ip": "src_ip", "src_port" : "dst_port", "dst_port" : "src_port"}).drop(
columns=["detailed_label", "detailed_label_count"])
to_check_df["src_ip"] = to_check_df["src_ip"].apply(lambda x: str(x).strip())
to_check_df["dst_ip"] = to_check_df["dst_ip"].apply(lambda x: str(x).strip())
to_check_df["src_port"] = to_check_df["src_port"].apply(lambda x: str(x).strip())
to_check_df["dst_port"] = to_check_df["dst_port"].apply(lambda x: str(x).strip())
to_check_df["ip_protocol"] = to_check_df["ip_protocol"].apply(lambda x: str(x).strip())
to_check_df["src_ip"] = to_check_df["src_ip"].astype(str)
to_check_df["dst_ip"] = to_check_df["dst_ip"].astype(str)
to_check_df["src_port"] = to_check_df["src_port"].astype(str)
to_check_df["dst_port"] = to_check_df["dst_port"].astype(str)
to_check_df["ip_protocol"] = to_check_df["ip_protocol"].astype(str)
to_check_df = to_check_df.set_index(["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"])
merged_df_2 = to_check_df.merge(on=["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"], right=bro_original_df, how="left")
merged_df_2 = merged_df_2.reset_index()
merged_df_2 = merged_df_2.rename(columns={"src_ip": "dst_ip", "dst_ip": "src_ip", "src_port" : "dst_port", "dst_port" : "src_port"})
detailed_label_2_df = merged_df_2.dropna()
detailed_label_2_df["status"] = "Response"
deleted_2_df = merged_df_2[merged_df_2.duplicated(["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"], keep=False)]
deleted_2_df["status"] = "Mixed"
unknown_df = merged_df_2[merged_df_2.isnull().any(axis=1)]
unknown_df["status"] = "Unknown"
combined_detailed_label_df = detailed_label_df.append(detailed_label_2_df)
combined_detailed_label_2_df = combined_detailed_label_df.drop_duplicates(subset=["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"],
keep=False)
# combined_detailed_label_2_df["status"] = "Keep"
deleted_3_df = combined_detailed_label_df[
combined_detailed_label_df.duplicated(["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"], keep=False)]
combined_deleted_df = deleted_df.append(deleted_2_df).append(deleted_3_df)
combined_deleted_df = combined_deleted_df.drop_duplicates(subset=["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol", 'detailed_label'],
keep='last')
combined_deleted_df["status"] = "Mixed"
combined_df = combined_detailed_label_2_df.append(combined_deleted_df).append(unknown_df)
combined_df["detailed_label"] = combined_df.detailed_label.astype(str)
combined_df["detailed_label"] = combined_df["detailed_label"].fillna(value="Unknown")
combined_df["detailed_label_count"] = combined_df["detailed_label_count"].fillna(value="0")
combined_df["detailed_label"] = combined_df["detailed_label"].replace(to_replace="nan", value="Unknown")
combined_df["detailed_label"] = combined_df["detailed_label"].replace(to_replace="-", value="Benign")
combined_df["label"] = np.where(combined_df["detailed_label"] == "Benign", "Benign", "Malicious")
combined_df["label"] = np.where(combined_df["detailed_label"] == "Unknown", "Unknown",
combined_df["label"])
columns_list = ["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol", "ip_tos", "scenario", "file",
"connection_length", "label", "detailed_label", "detailed_label_count", "status"]
combined_df = combined_df.reindex(columns=columns_list)
combined_df.to_csv(path_to_csv_file, index=False)
else:
old_info_df["label"] = "Unknown"
old_info_df["detailed_label"] = "Unknown"
old_info_df["detailed_label_count"] = 0
old_info_df["status"] = "Unknown"
columns_list = ["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol", "ip_tos", "scenario", "file",
"connection_length", "label", "detailed_label", "detailed_label_count", "status"]
old_info_df = combined_df.reindex(columns=columns_list)
old_info_df.to_csv(path_to_csv_file, index=False)
@staticmethod
def restart_creating_summary_from_separate_files_for_netflow(path_to_iot_scenarios_folder, folder_to_filtered_files,
filename_addition):
path_to_iot_scenarios_folder = path_to_iot_scenarios_folder
folder_to_filtered_files = folder_to_filtered_files
filename_addition = filename_addition
scan_file_order_path = folder_to_filtered_files + "/scan_order.txt"
log_order_path = folder_to_filtered_files + "/log_order.txt"
with open(scan_file_order_path, 'r') as inputfile:
scanned_files = inputfile.readlines()
with open(log_order_path, 'r') as inputfile:
logged_files = inputfile.readlines()
scanned_files_list = [x.strip() for x in scanned_files]
logged_files_list = [x.strip() for x in logged_files]
folders_still_to_scan = []
for scanned_file in scanned_files_list:
if scanned_file not in logged_files_list:
folders_still_to_scan.append(scanned_file)
folders = folders_still_to_scan
folders = list(map(lambda x: (x.split(",")[0], x.split(",")[1]), folders))
for index, (scenario_name, file_name) in enumerate(folders):
print("Scenario name: " + scenario_name)
print("File name : " + file_name)
print("Number: " + str(index + 1) + "/" + str(len(folders)))
log_order_path = folder_to_filtered_files + "/" + "log_order.txt"
with open(log_order_path, 'a') as log_order_file:
log_order_file.write(scenario_name + "," + file_name + "\n")
log_order_file.close()
print("Reading PCAP File")
path_to_csv_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_summary.csv"
path_to_pcap_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_" + filename_addition + ".pcap"
path_to_original_folder = path_to_iot_scenarios_folder + "/" + scenario_name
path_to_old_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_old.csv"
path_to_bro_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_bro.csv"
path_to_merge_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_merge.csv"
file_packet_dic = {}
with PcapReader(path_to_pcap_file) as packets:
for packet_count, packet in enumerate(packets):
packet_string = packet.show(dump=True)
packet_for_print = packet_string
packet_string = packet_string.split("\n")
packet_string = [x.replace(" ", "") for x in packet_string]
current_layer = "none"
packet_dic = {}
for line in packet_string:
if len(line) > 0:
if line[0] == '#':
new_layer = line.split('[')[1].split(']')[0]
current_layer = new_layer
packet_dic[current_layer] = {}
elif (line[0] != '\\') & (line[0] != '|'):
key = line.split("=")[0]
value = line.split("=")[1]
packet_dic[current_layer][key] = value
src_ip = packet_dic["IP"]["src"]
dst_ip = packet_dic["IP"]["dst"]
ip_protocol = packet_dic["IP"]["proto"].upper()
if ip_protocol == "UDP" and "UDP" in packet_dic:
src_port = packet_dic["UDP"]["sport"]
dst_port = packet_dic["UDP"]["dport"]
elif ip_protocol == "TCP" and "TCP" in packet_dic:
src_port = packet_dic["TCP"]["sport"]
dst_port = packet_dic["TCP"]["dport"]
elif ip_protocol == "ICMP" and "ICMP" in packet_dic:
src_port = 0
dst_port = str(packet_dic["ICMP"]["type"]) + "/" + str(packet_dic["ICMP"]["code"])
else:
src_port = 0
dst_port = 0
if not isinstance(src_port, int):
if not all(char.isdigit() for char in src_port):
try:
src_port = socket.getservbyname(src_port, ip_protocol)
except:
src_port = src_port
if not isinstance(dst_port, int) or ():
if not all(char.isdigit() for char in dst_port):
try:
dst_port = socket.getservbyname(dst_port, ip_protocol)
except:
dst_port = dst_port
ip_tos = packet_dic["IP"]["tos"]
if (src_ip, dst_ip, ip_protocol, src_port, dst_port, ip_tos) in file_packet_dic:
old_value = file_packet_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port, ip_tos)]
new_value = old_value + 1
file_packet_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port, ip_tos)] = new_value
else:
file_packet_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port, ip_tos)] = 1
packets.close()
src_ip_list = []
dst_ip_list = []
ip_protocol_list = []
src_port_list = []
dst_port_list = []
ip_tos_list = []
connection_length_list = []
for (src_ip, dst_ip, ip_protocol, src_port, dst_port, ip_tos), connection_length in file_packet_dic.items():
src_ip_list.append(src_ip)
dst_ip_list.append(dst_ip)
ip_protocol_list.append(ip_protocol)
src_port_list.append(src_port)
dst_port_list.append(dst_port)
ip_tos_list.append(ip_tos)
connection_length_list.append(connection_length)
data = {"src_ip": src_ip_list, "dst_ip": dst_ip_list, "ip_protocol": ip_protocol_list,
"src_port": src_port_list,
"dst_port": dst_port_list, "ip_tos": ip_tos_list, "connection_length": connection_length_list}
old_info_df = pd.DataFrame(data)
old_info_df["scenario"] = scenario_name
old_info_df["file"] = file_name
print("Adding Logg Data")
sub_folders = [f.path for f in os.scandir(path_to_original_folder) if f.is_dir()]
bro_folder_found = False
for sub_folder in sub_folders:
base_name = str(os.path.basename(sub_folder))
if base_name == "bro":
labeled_files = glob.glob(sub_folder + "/*.labeled")
bro_folder_found = True
break
if bro_folder_found and len(labeled_files) > 0:
logg_file = labeled_files[0]
zat = LogToDataFrame()
bro_original_df = zat.create_dataframe(logg_file)
bro_original_df["label"] = bro_original_df["tunnel_parents label detailed-label"].apply(
lambda x: x.split(" ")[1].strip())
bro_original_df["detailed_label"] = bro_original_df["tunnel_parents label detailed-label"].apply(
lambda x: x.split(" ")[2].strip())
bro_original_df = bro_original_df.rename(
columns={"id.orig_h": "src_ip", "id.resp_h": "dst_ip", "id.orig_p": "src_port",
"id.resp_p": "dst_port", "proto": "ip_protocol"})
bro_original_df = bro_original_df.drop(
columns=['uid', 'service', 'duration', 'orig_bytes', 'resp_bytes', 'conn_state', 'local_orig',
'local_resp', 'missed_bytes', 'history', 'orig_pkts', 'orig_ip_bytes',
'resp_pkts', 'resp_ip_bytes', 'tunnel_parents label detailed-label'])
bro_original_df["ip_protocol"] = bro_original_df["ip_protocol"].str.upper()
bro_original_df.sort_values(["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"], inplace=True)
bro_original_df = bro_original_df.groupby(["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"])[
'detailed_label'].value_counts().to_frame()
bro_original_df = bro_original_df.rename(columns={"detailed_label": "detailed_label_count"})
bro_original_df = bro_original_df.reset_index()
bro_original_df["src_ip"] = bro_original_df["src_ip"].apply(lambda x: str(x).strip())
bro_original_df["dst_ip"] = bro_original_df["dst_ip"].apply(lambda x: str(x).strip())
bro_original_df["src_port"] = bro_original_df["src_port"].apply(lambda x: str(x).strip())
bro_original_df["dst_port"] = bro_original_df["dst_port"].apply(lambda x: str(x).strip())
bro_original_df["ip_protocol"] = bro_original_df["ip_protocol"].apply(lambda x: str(x).strip())
# bro_original_df["src_ip"] = bro_original_df["src_ip"].astype(str)
# bro_original_df["dst_ip"] = bro_original_df["dst_ip"].astype(str)
# bro_original_df["src_port"] = bro_original_df["src_port"].astype(str)
# bro_original_df["dst_port"] = bro_original_df["dst_port"].astype(str)
# bro_original_df["ip_protocol"] = bro_original_df["ip_protocol"].astype(str)
bro_original_df = bro_original_df.sort_values(
by=["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"])
# bro_original_df = bro_original_df.set_index(["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"])
old_info_df = old_info_df.sort_values(["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"])
# old_info_df = old_info_df.set_index(["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"])
merged_df = old_info_df.merge(on=["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"],
right=bro_original_df, how="inner")
merged_df = merged_df.reset_index()
old_info_df = old_info_df.reset_index()
detailed_label_df = merged_df.drop_duplicates(
subset=["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"], keep=False)
detailed_label_df["status"] = "Found"
deleted_df = merged_df[
merged_df.duplicated(["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"], keep=False)]
deleted_df["status"] = "Mixed"
to_check_df = pd.concat(
[old_info_df,
merged_df.drop_duplicates(subset=["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"],
keep='last')]).drop_duplicates(
subset=["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"], keep=False)
to_check_df = to_check_df.reset_index()
to_check_df = to_check_df.rename(
columns={"src_ip": "dst_ip", "dst_ip": "src_ip", "src_port": "dst_port",
"dst_port": "src_port"}).drop(
columns=["detailed_label", "detailed_label_count"])
to_check_df["src_ip"] = to_check_df["src_ip"].apply(lambda x: str(x).strip())
to_check_df["dst_ip"] = to_check_df["dst_ip"].apply(lambda x: str(x).strip())
to_check_df["src_port"] = to_check_df["src_port"].apply(lambda x: str(x).strip())
to_check_df["dst_port"] = to_check_df["dst_port"].apply(lambda x: str(x).strip())
to_check_df["ip_protocol"] = to_check_df["ip_protocol"].apply(lambda x: str(x).strip())
to_check_df["src_ip"] = to_check_df["src_ip"].astype(str)
to_check_df["dst_ip"] = to_check_df["dst_ip"].astype(str)
to_check_df["src_port"] = to_check_df["src_port"].astype(str)
to_check_df["dst_port"] = to_check_df["dst_port"].astype(str)
to_check_df["ip_protocol"] = to_check_df["ip_protocol"].astype(str)
to_check_df = to_check_df.set_index(["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"])
merged_df_2 = to_check_df.merge(on=["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"],
right=bro_original_df, how="left")
#merged_df_2 = merged_df_2.reset_index()
merged_df_2 = merged_df_2.rename(
columns={"src_ip": "dst_ip", "dst_ip": "src_ip", "src_port": "dst_port", "dst_port": "src_port"})
detailed_label_2_df = merged_df_2.dropna()
detailed_label_2_df["status"] = "Response"
deleted_2_df = merged_df_2[
merged_df_2.duplicated(["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"], keep=False)]
deleted_2_df["status"] = "Mixed"
unknown_df = merged_df_2[merged_df_2.isnull().any(axis=1)]
unknown_df["status"] = "Unknown"
combined_detailed_label_df = detailed_label_df.append(detailed_label_2_df)
combined_detailed_label_2_df = combined_detailed_label_df.drop_duplicates(
subset=["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"],
keep=False)
# combined_detailed_label_2_df["status"] = "Keep"
deleted_3_df = combined_detailed_label_df[
combined_detailed_label_df.duplicated(["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol"],
keep=False)]
combined_deleted_df = deleted_df.append(deleted_2_df).append(deleted_3_df)
combined_deleted_df = combined_deleted_df.drop_duplicates(
subset=["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol", 'detailed_label'],
keep='last')
combined_deleted_df["status"] = "Mixed"
combined_df = combined_detailed_label_2_df.append(combined_deleted_df).append(unknown_df)
combined_df["detailed_label"] = combined_df.detailed_label.astype(str)
combined_df["detailed_label"] = combined_df["detailed_label"].fillna(value="Unknown")
combined_df["detailed_label_count"] = combined_df["detailed_label_count"].fillna(value="0")
combined_df["detailed_label"] = combined_df["detailed_label"].replace(to_replace="nan", value="Unknown")
combined_df["detailed_label"] = combined_df["detailed_label"].replace(to_replace="-", value="Benign")
combined_df["label"] = np.where(combined_df["detailed_label"] == "Benign", "Benign", "Malicious")
combined_df["label"] = np.where(combined_df["detailed_label"] == "Unknown", "Unknown",
combined_df["label"])
columns_list = ["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol", "ip_tos", "scenario", "file",
"connection_length", "label", "detailed_label", "detailed_label_count", "status"]
combined_df = combined_df.reindex(columns=columns_list)
combined_df.to_csv(path_to_csv_file, index=False)
else:
old_info_df["label"] = "Unknown"
old_info_df["detailed_label"] = "Unknown"
old_info_df["detailed_label_count"] = 0
old_info_df["status"] = "Unknown"
columns_list = ["src_ip", "dst_ip", "src_port", "dst_port", "ip_protocol", "ip_tos", "scenario", "file",
"connection_length", "label", "detailed_label", "detailed_label_count", "status"]
old_info_df = combined_df.reindex(columns=columns_list)
old_info_df.to_csv(path_to_csv_file, index=False)
|
def negate(x: float):
return -x
def add(x: float, y: float):
return x + y
def subtract(x: float, y: float):
return x - y
def multiply(x: float, y: float):
return x * y
def divide(x: float, y: float):
return x / y
|
# 6-svep.py
import time
import board
import neopixel
pixel_pin = board.A0 # På vilken pinne sitter pixeln
num_pixels = 1 # Hur många pixlar
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=0.3, auto_write=False)
def lysa(color):
pixels.fill(color) # Fyll pixeln med färg
pixels.show() # Tänd pixeln
color = 0 # color är samma som 0
color_max = 255 # color_max är samma som 255
color_min = 0 # color_min är samma som 0
while True: # Loopa medans sant är sant (alltså för alltid)
current_color = (color, color, color)
lysa(current_color) # Lys färg
if(color == color_max):
color = color_min
color = color + 1 # Addera 1 till color |
from Jumpscale import j
from Jumpscale.core.InstallTools import Tools
import os
import sys
Tools = j.core.tools
MyEnv = j.core.myenv
class SSHAgent(j.application.JSBaseClass):
__jslocation__ = "j.clients.sshagent"
def _init(self):
if MyEnv.sshagent:
self._default_key = None
self.ssh_socket_path = MyEnv.sshagent.ssh_socket_path
self.available = MyEnv.sshagent.available
self.keys_list = MyEnv.sshagent.keys_list
self.key_names = MyEnv.sshagent.key_names
self.key_paths = MyEnv.sshagent.key_paths
self.key_default_name = MyEnv.sshagent.key_default
self.profile_js_configure = MyEnv.sshagent.profile_js_configure
self.kill = MyEnv.sshagent.kill
self.start = MyEnv.sshagent.start
else:
raise RuntimeError("cannot use sshagent, maybe not initted?")
@property
def key_default(self):
"""
see if we can find the default sshkey using sshagent
j.clients.sshagent.key_default
:raises RuntimeError: sshkey not found in sshagent
:raises RuntimeError: more than one sshkey is found in sshagent
:return: j.clients.sshkey.new() ...
:rtype: sshkey object or None
"""
if not self._default_key:
raise RuntimeError("not implemented yet")
self._default_key = j.clients.sshkey.get(name=self.key_default_name, pubkey=key)
# for path, key in self.keys_list(True):
# name = j.sal.fs.getBaseName(path).lower()
# if name == MyEnv.config["SSH_KEY_DEFAULT"]:
# if Tools.exists(path):
# self._default_key = j.clients.sshkey.get(name=self.key_default_name, pubkey=key)
# else:
# self._default_key = j.clients.sshkey.get(name=self.key_default_name, pubkey=key, path=path)
#
# return self._default_key
# return None
return self._default_key
def key_path_get(self, keyname="", die=True):
"""
Returns Path of public key that is loaded in the agent
:param keyname: name of key loaded to agent to get its path, if empty will check if there is 1 loaded, defaults to ""
:type keyname: str, optional
:param die:Raise error if True,else do nothing, defaults to True
:type die: bool, optional
:raises RuntimeError: Key not found with given keyname
:return: path of public key
:rtype: str
"""
keyname = j.sal.fs.getBaseName(keyname)
for item in self.keys_list():
if item.endswith(keyname):
return item
if die:
raise RuntimeError("Did not find key with name:%s, check its loaded in ssh-agent with ssh-add -l" % keyname)
def key_pub_get(self, keyname=None):
"""
Returns Content of public key that is loaded in the agent
:param keyname: name of key loaded to agent to get content from, if not specified is default
:type keyname: str
:raises RuntimeError: Key not found with given keyname
:return: Content of public key
:rtype: str
"""
key = self._paramiko_key_get(keyname)
j.shell()
def _paramiko_keys_get(self):
import paramiko.agent
a = paramiko.agent.Agent()
return [key for key in a.get_keys()]
def _paramiko_key_get(self, keyname=None):
if not keyname:
keyname = j.core.myenv.sshagent.key_default
for key in self._paramiko_keys_get():
# ISSUE, is always the same name, there is no way how to figure out which sshkey to use?
if key.name == keyname:
# maybe we can get this to work using comparing of the public keys?
return key
raise RuntimeError("could not find key:%s" % keyname)
def sign(self, data, keyname=None, hash=True):
"""
will sign the data with the ssh-agent loaded
:param data: the data to sign
:param hash, if True, will use
:param keyname is the name of the key to use to sign, if not specified will be the default key
:return:
"""
if not j.data.types.bytes.check(data):
data = data.encode()
self._init()
import hashlib
key = self._paramiko_key_get(keyname)
data_sha1 = hashlib.sha1(data).digest()
res = key.sign_ssh_data(data_sha1)
if hash:
m = hashlib.sha256()
m.update(res)
return m.digest()
else:
return res
def _start(self):
"""
start ssh-agent, kills other agents if more than one are found
:raises RuntimeError: Couldn't start ssh-agent
:raises RuntimeError: ssh-agent was not started while there was no error
:raises RuntimeError: Could not find pid items in ssh-add -l
"""
socketpath = self.ssh_socket_path
ssh_agents = j.sal.process.getPidsByFilter("ssh-agent")
for pid in ssh_agents:
p = j.sal.process.getProcessObject(pid)
if socketpath not in p.cmdline():
j.sal.process.kill(pid)
if not Tools.exists(socketpath):
j.sal.fs.createDir(j.sal.fs.getParent(socketpath))
# ssh-agent not loaded
self._log_info("start ssh agent")
rc, out, err = Tools.execute("ssh-agent -a %s" % socketpath, die=False, showout=False, timeout=20)
if rc > 0:
raise RuntimeError("Could not start ssh-agent, \nstdout:%s\nstderr:%s\n" % (out, err))
else:
if not Tools.exists(socketpath):
err_msg = "Serious bug, ssh-agent not started while there was no error, " "should never get here"
raise RuntimeError(err_msg)
# get pid from out of ssh-agent being started
piditems = [item for item in out.split("\n") if item.find("pid") != -1]
# print(piditems)
if len(piditems) < 1:
self._log_debug("results was: %s", out)
raise RuntimeError("Cannot find items in ssh-add -l")
self._init()
pid = int(piditems[-1].split(" ")[-1].strip("; "))
socket_path = j.sal.fs.joinPaths("/tmp", "ssh-agent-pid")
j.sal.fs.writeFile(socket_path, str(pid))
# self.sshagent_init()
j.clients.sshkey._sshagent = None
self._available = None
return
j.clients.sshkey._sshagent = None
def test(self):
"""
kosmos 'j.clients.sshagent.test()'
"""
self._log_info("sshkeys:%s" % j.clients.sshkey.listnames())
if self.available():
self._log_info("sshkeys:%s" % self.key_paths)
j.clients.sshagent.kill() # goal is to kill & make sure it get's loaded automatically
j.clients.sshagent.start()
# lets generate an sshkey with a passphrase
passphrase = "12345"
path = "/root/.ssh/test_key"
skey = j.clients.sshkey.get(name="test", path=path, passphrase=passphrase)
skey.save()
# this will reload the key from the db
skey_loaded = j.clients.sshkey.get(name="test")
assert skey_loaded.data._ddict == skey.data._ddict
skey.generate(reset=True)
skey.load()
assert skey.is_loaded()
if not j.core.platformtype.myplatform.platform_is_osx:
# on mac does not seem to work
skey.unload()
assert skey.is_loaded() is False
path = "/root/.ssh/test_key_2"
skey2 = j.clients.sshkey.get(name="test2", path=path)
skey2.generate(reset=True)
skey2.load()
assert skey2.is_loaded()
skey2.unload()
assert skey2.is_loaded() is False
assert self.available()
self.kill()
self.start()
assert self.available()
# Clean up after test
self.kill()
skey.delete_from_sshdir()
skey2.delete_from_sshdir()
skey.delete()
skey2.delete()
|
import os
import lmdb # install lmdb by "pip install lmdb"
from PIL import Image
def checkImageIsValid(file):
valid = True
try:
Image.open(file).load()
except OSError:
valid = False
return valid
def writeCache(env, cache):
with env.begin(write=True) as txn:
for k, v in cache.items():
txn.put(k.encode(), v)
def is_alpha_numeric(label, alphabet):
for i in label:
if not i in alphabet:
return False
return True
def main(outputPath, checkValid=True):
root_path = "/root/IC13_Task3_Test/"
alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
file = open("/root/IC13_Task3_Test/gt.txt", 'r')
env = lmdb.open(outputPath, map_size=1099511627776)
cache = {}
cnt = 1
line = file.readline()
while line:
tmp = line.split(',')
path_key = tmp[0].split('.')[0]
img_path = root_path + tmp[0]
label = tmp[1].strip().strip('"')
if not is_alpha_numeric(label, alphabet) or len(label) < 3:
# print("ignore", label)
line = file.readline()
continue
if not os.path.exists(img_path):
print('%s does not exist' % img_path)
continue
if checkValid:
if not checkImageIsValid(img_path):
print('%s is not a valid image' % img_path)
line = file.readline()
continue
with open(img_path, 'rb') as f:
imageBin = f.read()
imageKey = "image_{:09d}".format(cnt)
labelKey = "label_{:09d}".format(cnt)
pathKey = "path_{:09d}".format(cnt)
cache[imageKey] = imageBin
cache[labelKey] = (label).encode(encoding="utf-8")
cache[pathKey] = (path_key).encode(encoding="utf-8")
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d' % (cnt))
cnt += 1
line = file.readline()
nSamples = cnt - 1
cache['num-samples'] = str(nSamples).encode()
writeCache(env, cache)
print('Created dataset with %d samples' % nSamples)
if __name__ == '__main__':
main("/root/project/data/lmdb_ic13") # 这里改是 test 还是 train
|
import sys
sys.path.append('../../wrapper/')
import cxxnet
import numpy as np
data = cxxnet.DataIter("""
iter = mnist
path_img = "./data/train-images-idx3-ubyte.gz"
path_label = "./data/train-labels-idx1-ubyte.gz"
shuffle = 1
iter = end
input_shape = 1,1,784
batch_size = 100
""")
print 'init data iter'
deval = cxxnet.DataIter("""
iter = mnist
path_img = "./data/t10k-images-idx3-ubyte.gz"
path_label = "./data/t10k-labels-idx1-ubyte.gz"
iter = end
input_shape = 1,1,784
batch_size = 100
""")
cfg = """
netconfig=start
layer[+1:fc1] = fullc:fc1
nhidden = 100
init_sigma = 0.01
layer[+1:sg1] = sigmoid:se1
layer[sg1->fc2] = fullc:fc2
nhidden = 10
init_sigma = 0.01
layer[+0] = softmax
netconfig=end
input_shape = 1,1,784
batch_size = 100
random_type = gaussian
"""
param = {}
param['eta'] = 0.1
param['dev'] = 'cpu'
param['momentum'] = 0.9
param['metric[label]'] = 'error'
net = cxxnet.train(cfg, data, 1, param, eval_data = deval)
weights = []
for layer in ['fc1', 'fc2']:
for tag in ['wmat', 'bias']:
weights.append((layer, tag, net.get_weight(layer, tag)))
data.before_first()
data.next()
# extract
print 'predict'
pred = net.predict(data)
print 'predict finish'
dbatch = data.get_data()
print dbatch.shape
print 'get data'
pred2 = net.predict(dbatch)
print np.sum(np.abs(pred - pred2))
print np.sum(np.abs(net.extract(data, 'sg1') - net.extract(dbatch, 'sg1')))
# evaluate
deval.before_first()
werr = 0
wcnt = 0
while deval.next():
label = deval.get_label()
pred = net.predict(deval)
werr += np.sum(label[:,0] != pred[:])
wcnt += len(label[:,0])
print 'eval-error=%f' % (float(werr) / wcnt)
# training
data.before_first()
while data.next():
label = data.get_label()
batch = data.get_data()
net.update(batch, label)
# evaluate
deval.before_first()
werr = 0
wcnt = 0
while deval.next():
label = deval.get_label()
pred = net.predict(deval)
werr += np.sum(label[:,0] != pred[:])
wcnt += len(label[:,0])
print 'eval-error2=%f' % (float(werr) / wcnt)
for layer, tag, wt in weights:
net.set_weight(wt, layer, tag)
# evaluate
deval.before_first()
werr = 0
wcnt = 0
while deval.next():
label = deval.get_label()
pred = net.predict(deval)
werr += np.sum(label[:,0] != pred[:])
wcnt += len(label[:,0])
print 'eval-error-after-setback=%f' % (float(werr) / wcnt)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Setup package."""
from setuptools import setup, find_packages
from setuptools.command.sdist import sdist
from setuptools.command.build_py import build_py
import sys
import os
import importlib.util
class BuildPy(build_py):
"""Custom ``build_py`` command to always build mo files for wheels."""
def run(self):
"""Run the python build process."""
self.run_command('compile_catalog')
build_py.run(self)
class Sdist(sdist):
"""Custom `sdist` command to ensure that we don't include `.m0` files in source."""
def _clean_mo_files(self, path):
for root, dirs, files in os.walk(path):
for f in files:
if f.lower().endswith('.mo'):
os.remove(os.path.join(root, f))
def run(self):
"""Run the `sdist` build process."""
self._clean_mo_files("rummage/lib/gui/localization/locale")
sdist.run(self)
def get_version():
"""Get `__version__` and `__version_info__` without importing the entire module."""
path = os.path.join(os.path.dirname(__file__), 'rummage', 'lib', '__meta__.py')
spec = importlib.util.spec_from_file_location("__meta__", path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
vi = module.__version_info__
return vi._get_canonical(), vi._get_dev_status()
def get_requirements(req):
"""Load list of dependencies."""
install_requires = []
with open(req) as f:
for line in f:
if not line.startswith("#"):
install_requires.append(line.strip())
return install_requires
def get_description():
"""Get long description."""
with open("README.md", 'r') as f:
desc = f.read()
return desc
VER, DEVSTATUS = get_version()
entry_points = {
'gui_scripts': [
'rummage=rummage.__main__:main',
'rummage%d.%d=rummage.__main__:main' % sys.version_info[:2]
]
}
setup(
name='rummage',
cmdclass={
'build_py': BuildPy,
'sdist': Sdist
},
python_requires=">=3.6",
version=VER,
keywords='grep search find replace gui',
description='A GUI search and replace app.',
long_description=get_description(),
long_description_content_type='text/markdown',
author='Isaac Muse',
author_email='Isaac.Muse@gmail.com',
url='https://github.com/facelessuser/Rummage',
packages=find_packages(exclude=['tests', 'tools']),
setup_requires=get_requirements("requirements/setup.txt"),
install_requires=get_requirements("requirements/project.txt"),
extras_require={
'extras': get_requirements("requirements/extras.txt")
},
zip_safe=False,
entry_points=entry_points,
include_package_data=True,
license='MIT License',
classifiers=[
'Development Status :: %s' % DEVSTATUS,
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
import os
import pytest
from endpoint import app
HOT_DOG_URL='https://upload.wikimedia.org/wikipedia/commons/thumb/f/fb/Hotdog_-_Evan_Swigart.jpg/320px-Hotdog_-_Evan_Swigart.jpg'
TACO_URL='https://upload.wikimedia.org/wikipedia/commons/thumb/7/73/001_Tacos_de_carnitas%2C_carne_asada_y_al_pastor.jpg/320px-001_Tacos_de_carnitas%2C_carne_asada_y_al_pastor.jpg'
@pytest.fixture
def client():
app.config['TESTING'] = True
with app.test_client() as client:
yield client
def test_empty_request(client):
rv = client.post('/classify')
assert rv.status_code != 200
def test_invalid_url(client):
rv = client.post('/classify', data={ 'url': 'https://this-is-not-a-domain.xyz' })
assert rv.status_code != 200
def test_hot_dog_url(client):
rv = client.post('/classify', data={ 'url': HOT_DOG_URL })
assert rv.get_json()['class'] == 'hot_dogs'
def test_taco_url(client):
rv = client.post('/classify', data={ 'url': TACO_URL })
assert rv.get_json()['class'] == 'tacos' |
import numpy as np
from .core import Signal, signal
@signal
class Operation(Signal):
left: Signal
right: Signal
class Plus(Operation):
"""Add a signal and a signal or value.
>>> one = Value(1)
>>> two = Value(2)
>>> result = 3 + one + two + 3
>>> result.render_frame()
>>> assert np.all(result.output == 9.0)
"""
def __call__(self):
np.add(self.left.output, self.right.output, out=self.output)
signal("__add__")(Plus)
signal("__radd__")(Plus)
@signal("__sub__")
class Minus(Operation):
"""Subtract a signal or value from a signal.
>>> one = Value(1)
>>> result = one - one - 1
>>> result.render_frame()
>>> assert np.all(result.output == -1.0)
"""
def __call__(self):
np.subtract(self.left.output, self.right.output, out=self.output)
@signal("__rsub__")
class RMinus(Operation):
"""Subtract a signal from a value.
>>> one = Value(1)
>>> result = 2 - one
>>> result.render_frame()
>>> assert np.all(result.output == 1.0)
"""
def __call__(self):
np.subtract(self.right.output, self.left.output, out=self.output)
class Mul(Operation):
"""Multiply a signal and a signal or value.
>>> two = Value(2)
>>> three = Value(3)
>>> result = 1 * two * three * 4
>>> result.render_frame()
>>> assert np.all(result.output == 24.0)
"""
def __call__(self):
np.multiply(self.left.output, self.right.output, out=self.output)
signal("__mul__")(Mul)
signal("__rmul__")(Mul)
@signal("__mod__")
class Mod(Operation):
"""Calculate the modulus of a signal.
>>> two = Value(2)
>>> three = Value(3)
>>> result = three % two
>>> result.render_frame()
>>> assert np.all(result.output == 1)
"""
def __call__(self):
np.mod(self.left.output, self.right.output, out=self.output)
@signal("__lt__")
class LT(Operation):
"""Calculate the modulus of a signal.
>>> two = Value(2)
>>> three = Value(3)
>>> result = three < two
>>> result.render_frame()
>>> assert np.all(result.output == 0)
"""
def __call__(self):
np.less(self.left.output, self.right.output, out=self.output)
@signal("__gt__")
class GT(Operation):
"""Calculate the modulus of a signal.
>>> two = Value(2)
>>> three = Value(3)
>>> result = three > two
>>> result.render_frame()
>>> assert np.all(result.output == 1)
"""
def __call__(self):
np.greater(self.left.output, self.right.output, out=self.output)
@signal("__abs__")
class Abs(Signal):
"""Subtract a signal from a value.
>>> one = Value(-1)
>>> result = abs(one)
>>> result.render_frame()
>>> assert np.all(result.output == 1.0)
"""
signal: Signal
def __call__(self):
np.abs(self.signal.output, out=self.output)
@signal("__pow__")
class Pow(Operation):
"""Calculate the modulus of a signal.
>>> two = Value(2)
>>> three = Value(3)
>>> result = three ** two
>>> result.render_frame()
>>> assert np.all(result.output == 3 ** 2)
"""
def __call__(self):
np.power(self.left.output, self.right.output, out=self.output)
@signal("__rpow__")
class RPow(Operation):
"""Calculate the modulus of a signal.
>>> two = Value(2)
>>> result = 2 ** two
>>> result.render_frame()
>>> assert np.all(result.output == 2 ** 2)
"""
def __call__(self):
np.power(self.right.output, self.left.output, out=self.output)
|
import numpy as np
import time
import cv2
import copy
import os
import os.path as path
import imageio
from scipy.spatial.transform import Rotation as R
import matplotlib.pyplot as plt
import argparse
from numpy.linalg import inv
import torch
from train_network import data_transform
import train_network
import tools
desc = 'Test reconstruction network'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-d', '--device_no',
type=int,
choices=[0, 1, 2, 3, 4, 5, 6, 7],
help='GPU device number [0-7]',
default=0)
parser.add_argument('-avg', '--average_dof',
type=bool,
help='give the average bof within a sample',
default=False)
args = parser.parse_args()
device_no = args.device_no
train_ids = np.loadtxt('infos/train_ids.txt').astype(np.int64)
val_ids = np.loadtxt('infos/val_ids.txt').astype(np.int64)
test_ids = np.loadtxt('infos/test_ids.txt').astype(np.int64)
all_ids = np.concatenate((train_ids, val_ids, test_ids), axis=0)
mask_img = cv2.imread('data/US_mask.png', 0)
# frames_folder = '/home/guoh9/tmp/US_vid_frames'
# pos_folder = '/home/guoh9/tmp/US_vid_pos'
# frames_folder = '/zion/guoh9/US_recon/US_vid_frames'
# pos_folder = '/zion/guoh9/US_recon/US_vid_pos'
# frames_folder = 'data/US_vid_frames'
# pos_folder = 'data/US_vid_pos'
# cali_folder = 'data/US_cali_mats'
data_folder = 'data'
def read_aurora(file_path):
"""
Read the Aurora position file and formatly reorganize the shape
:param file_path: path of Aurora position file
:return: (frame_number * 9) matrix, each row is a positioning vector
"""
file = open(file_path, 'r')
lines = file.readlines()
pos_np = []
for line_index in range(1, len(lines) - 1): # exclude the first line and last line
line = lines[line_index]
values = line.split()
values_np = np.asarray(values[1:]).astype(np.float32)
pos_np.append(values_np)
pos_np = np.asarray(pos_np)
return pos_np
def save_all_aurora_pos():
"""
This function uses read_aurora function to convert Aurora.pos file into (N x 9) matrix
Save such txt files for all 640 cases
"""
check_folder = '/home/guoh9/tmp/US_vid_frames'
project_folder = '/zion/common/data/uronav_data'
dst_folder = '/home/guoh9/tmp/US_vid_pos'
case_list = os.listdir(check_folder)
case_list.sort()
for case_index in range(len(case_list)):
case_id = case_list[case_index]
pos_path = path.join(project_folder, case_id, '{}_Aurora.pos'.format(case_id))
pos_np = read_aurora(file_path=pos_path)
# print(pos_np.shape)
dst_path = path.join(dst_folder, '{}.txt'.format(case_id))
np.savetxt(dst_path, pos_np)
print('{} {} saved'.format(case_id, pos_np.shape))
print('ALL FINISHED')
def save_vid_gifs():
"""
Convert the frames of video to a gif
"""
project_folder = '/home/guoh9/tmp/US_vid_frames'
dst_folder = '/home/guoh9/tmp/US_vid_gif'
case_list = os.listdir(project_folder)
case_list.sort()
kargs = {'duration': 0.05}
for case in case_list:
case_folder = os.path.join(project_folder, case)
frames_list = os.listdir(case_folder)
frames_list.sort()
imgs = []
for frame in frames_list:
frame_path = path.join(case_folder, frame)
frame_img = cv2.imread(frame_path)
imgs.append(frame_img)
imageio.mimsave(path.join(dst_folder, '{}.gif'.format(case)), imgs, **kargs)
print('{}.gif saved'.format(case))
print('ALL CASES FINISHED!!!')
def segmentation_us(input_img):
# mask_img = cv2.imread('data/US_mask.png', 0)
# mask_img[mask_img > 50] = 255
# mask_img[mask_img <= 50] = 0
#
# # input_img[mask_img > 50] = 255
# input_img[mask_img <= 50] = 0
#
# cv2.imshow('mask', input_img)
# cv2.waitKey(0)
img = np.log2(input_img, dtype=np.float32)
img = cv2.medianBlur(img, 5)
ret, thresh = cv2.threshold(img, 0.5, 255, cv2.THRESH_BINARY_INV)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
closed_copy = copy.copy(closed)
cv2.imwrite('closed.jpg', closed)
def mask_us(input_img):
"""
Use the manually created mask to segment useful US areas
:param input_img:
:return: masked US image
"""
# mask_img[mask_img > 50] = 255
# mask_img[mask_img <= 50] = 0
# input_img[mask_img > 50] = 255
input_img[mask_img <= 20] = 0
return input_img
def params_to_mat44(trans_params, cam_cali_mat):
"""
Transform the parameters in Aurora files into 4 x 4 matrix
:param trans_params: transformation parameters in Aurora.pos. Only the last 7 are useful
3 are translations, 4 are the quaternion (x, y, z, w) for rotation
:return: 4 x 4 transformation matrix
"""
if trans_params.shape[0] == 9:
trans_params = trans_params[2:]
translation = trans_params[:3]
quaternion = trans_params[3:]
""" Transform quaternion to rotation matrix"""
r_mat = R.from_quat(quaternion).as_matrix()
trans_mat = np.zeros((4, 4))
trans_mat[:3, :3] = r_mat
trans_mat[:3, 3] = translation
trans_mat[3, 3] = 1
trans_mat = np.dot(cam_cali_mat, trans_mat)
trans_mat = inv(trans_mat)
# new_qua = np.zeros((4, ))
# new_qua[0] = quaternion[3]
# new_qua[1:] = quaternion[:3]
# eulers_from_mat = tfms.euler_from_matrix(r_mat)
# eulers_from_qua = tfms.euler_from_quaternion(new_qua, axes='sxyz')
# print('eulers mat\n{}'.format(eulers_from_mat))
# print('eulers qua\n{}'.format(eulers_from_qua))
#
# recon_R = tfms.euler_matrix(eulers_from_mat[0],
# eulers_from_mat[1],
# eulers_from_mat[2])
# print('R\n{}'.format(r_mat))
# print('recon_R\n{}'.format(recon_R))
return trans_mat
def plot_2d_in_3d(trans_params, frame_color='b', input_img=np.ones((480, 640))):
"""
Plot a 2D frame into 3D space for sequence visualization
:param input_img: input image frame
:param trans_params: Aurora position file line of position
"""
h, w = input_img.shape
corner_pts = np.asarray([[0, 0, 0],
[0, w, 0],
[h, w, 0],
[h, 0, 0]])
corner_pts = np.concatenate((corner_pts, np.ones((4, 1))), axis=1)
corner_pts = np.transpose(corner_pts)
print('imgshape {}'.format(input_img.shape))
print('corner_pts:\n{}'.format(corner_pts))
trans_mat = params_to_mat44(trans_params=trans_params)
print('trans_mat:\n{}'.format(trans_mat))
transformed_corner_pts = np.dot(trans_mat, corner_pts)
print('transformed_corner_pts:\n{}'.format(transformed_corner_pts))
# dst = np.linalg.norm(transformed_corner_pts[:, 0] - transformed_corner_pts[:, 2])
# print(dst)
fig = plt.figure()
ax = fig.gca(projection='3d')
# w_weights, h_weights = np.meshgrid(np.linspace(0, 1, w),
# np.linspace(0, 1, h))
# X = (1 - w_weights - h_weights) * transformed_corner_pts[0, 0] + \
# h_weights * transformed_corner_pts[0, 3] + w_weights * transformed_corner_pts[0, 1]
# Y = (1 - w_weights - h_weights) * transformed_corner_pts[1, 0] + \
# h_weights * transformed_corner_pts[1, 3] + w_weights * transformed_corner_pts[1, 1]
# Z = (1 - w_weights - h_weights) * transformed_corner_pts[2, 0] + \
# h_weights * transformed_corner_pts[2, 3] + w_weights * transformed_corner_pts[2, 1]
# input_img = cv2.cvtColor(input_img, cv2.COLOR_GRAY2RGB)
# input_img = input_img / 255
# ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
# facecolors=input_img)
# plt.show()
# time.sleep(30)
for i in range(-1, 3):
xs = transformed_corner_pts[0, i], transformed_corner_pts[0, i+1]
ys = transformed_corner_pts[1, i], transformed_corner_pts[1, i+1]
zs = transformed_corner_pts[2, i], transformed_corner_pts[2, i+1]
# line = plt3d.art3d.Line3D(xs, ys, zs)
# ax.add_line(line)
ax.plot(xs, ys, zs, color=frame_color)
# ax.plot(pt1, pt2, color='b')
# ax.scatter()
# ax.plot(transformed_corner_pts[:3, 0], transformed_corner_pts[:3, 1], color='b')
# ax.plot(transformed_corner_pts[:3, 1], transformed_corner_pts[:3, 2], color='b')
# ax.plot(transformed_corner_pts[:3, 2], transformed_corner_pts[:3, 3], color='b')
# ax.plot(transformed_corner_pts[:3, 3], transformed_corner_pts[:3, 0], color='b')
plt.show()
def plot_2d_in_3d_test(trans_params1, trans_params2,
frame_color='b', input_img=np.ones((480, 640))):
"""
Plot a 2D frame into 3D space for sequence visualization
:param input_img: input image frame
:param trans_params: Aurora position file line of position
"""
h, w = input_img.shape
corner_pts = np.asarray([[0, 0, 0],
[0, w, 0],
[h, w, 0],
[h, 0, 0]])
corner_pts = np.concatenate((corner_pts, np.ones((4, 1))), axis=1)
corner_pts = np.transpose(corner_pts)
print('imgshape {}'.format(input_img.shape))
print('corner_pts:\n{}'.format(corner_pts))
trans_mat1 = params_to_mat44(trans_params=trans_params1)
trans_mat2 = params_to_mat44(trans_params=trans_params2)
print('trans_mat1 shape {}, trans_mat2 shape {}'.format(trans_mat1.shape, trans_mat2.shape))
print('trans_mat1 shape\n{}\ntrans_mat2 shape\n{}'.format(trans_mat1, trans_mat2))
# time.sleep(30)
relative_mat = np.dot(inv(trans_mat1), trans_mat2)
original_mat2 = np.dot(trans_mat1, relative_mat)
print('relative_mat\n{}'.format(relative_mat))
print('original_mat2\n{}'.format(original_mat2))
transformed_corner_pts = np.dot(trans_mat1, corner_pts)
print('transformed_corner_pts:\n{}'.format(transformed_corner_pts))
# dst = np.linalg.norm(transformed_corner_pts[:, 0] - transformed_corner_pts[:, 2])
# print(dst)
fig = plt.figure()
ax = fig.gca(projection='3d')
for i in range(-1, 3):
xs = transformed_corner_pts[0, i], transformed_corner_pts[0, i+1]
ys = transformed_corner_pts[1, i], transformed_corner_pts[1, i+1]
zs = transformed_corner_pts[2, i], transformed_corner_pts[2, i+1]
# line = plt3d.art3d.Line3D(xs, ys, zs)
# ax.add_line(line)
ax.plot(xs, ys, zs, color=frame_color)
# ax.plot(pt1, pt2, color='b')
# ax.scatter()
# ax.plot(transformed_corner_pts[:3, 0], transformed_corner_pts[:3, 1], color='b')
# ax.plot(transformed_corner_pts[:3, 1], transformed_corner_pts[:3, 2], color='b')
# ax.plot(transformed_corner_pts[:3, 2], transformed_corner_pts[:3, 3], color='b')
# ax.plot(transformed_corner_pts[:3, 3], transformed_corner_pts[:3, 0], color='b')
plt.show()
def visualize_frames(case_id):
case_frames_path = path.join(frames_folder, 'Case{:04}'.format(case_id))
frames_list = os.listdir(case_frames_path)
frames_list.sort()
case_pos_path = path.join(pos_folder, 'Case{:04}.txt'.format(case_id))
case_pos = np.loadtxt(case_pos_path)
print('frames_list {}, case_pos {}'.format(len(frames_list), case_pos.shape))
frames_num = case_pos.shape[0]
colors_R = np.linspace(0, 255, frames_num).astype(np.int16).reshape((frames_num, 1))
colors_G = np.zeros((frames_num, 1))
colors_B = np.linspace(255, 0, frames_num).astype(np.int16).reshape((frames_num, 1))
colors = np.concatenate((colors_R, colors_G, colors_B), axis=1)
for frame_id in range(frames_num):
frame_pos = case_pos[frame_id, :]
frame_color = tuple(colors[frame_id, :])
time.sleep(30)
class VisualizeSequence():
def __init__(self, case_id):
super(VisualizeSequence, self).__init__()
self.case_frames_path = path.join(frames_folder, 'Case{:04}'.format(case_id))
self.frames_list = os.listdir(self.case_frames_path)
self.frames_list.sort()
self.cam_cali_mat = np.loadtxt('/zion/common/data/uronav_data/Case{:04}/'
'Case{:04}_USCalib.txt'.format(case_id, case_id))
case_pos_path = path.join(pos_folder, 'Case{:04}.txt'.format(case_id))
self.case_pos = np.loadtxt(case_pos_path)
print('frames_list {}, case_pos {}'.format(len(self.frames_list), self.case_pos.shape))
self.frames_num = self.case_pos.shape[0]
colors_R = np.linspace(0, 1, self.frames_num).reshape((self.frames_num, 1))
colors_G = np.zeros((self.frames_num, 1))
colors_B = np.linspace(1, 0, self.frames_num).reshape((self.frames_num, 1))
self.colors = np.concatenate((colors_R, colors_G, colors_B), axis=1)
self.fig = plt.figure()
self.ax = self.fig.gca(projection='3d')
def plot_frame3d(trans_params, frame_color=(255, 0, 0),
input_img=np.ones((480, 640)), plot_img=False):
"""
Plot a 2D frame into 3D space for sequence visualization
:param input_img: input image frame
:param trans_params: Aurora position file line of position
"""
h, w = input_img.shape
# corner_pts = np.asarray([[0, 0, 0],
# [0, w, 0],
# [h, w, 0],
# [h, 0, 0]])
corner_pts = np.asarray([[-h, 0, 0],
[-h, -w, 0],
[0, -w, 0],
[0, 0, 0]])
corner_pts = np.concatenate((corner_pts, np.ones((4, 1))), axis=1)
corner_pts = np.transpose(corner_pts)
print('imgshape {}'.format(input_img.shape))
print('corner_pts:\n{}'.format(corner_pts))
print('h {}, w {}'.format(h, w))
trans_mat = params_to_mat44(trans_params=trans_params,
cam_cali_mat=self.cam_cali_mat)
# trans_mat = trans_mat.transpose()
# trans_mat = np.dot(self.cam_cali_mat, trans_mat)
# trans_mat = inv(trans_mat)
# trans_mat = np.dot(trans_mat, inv(self.cam_cali_mat))
# trans_mat = np.dot(trans_mat, self.cam_cali_mat)
print('trans_mat:\n{}'.format(trans_mat))
transformed_corner_pts = np.dot(trans_mat, corner_pts)
# time.sleep(30)
print('transformed_corner_pts:\n{}'.format(transformed_corner_pts))
# dst = np.linalg.norm(transformed_corner_pts[:, 0] - transformed_corner_pts[:, 1])
# dst2 = np.linalg.norm(transformed_corner_pts[:, 1] - transformed_corner_pts[:, 2])
# print(dst, dst2)
for i in range(-1, 3):
xs = transformed_corner_pts[0, i], transformed_corner_pts[0, i + 1]
ys = transformed_corner_pts[1, i], transformed_corner_pts[1, i + 1]
zs = transformed_corner_pts[2, i], transformed_corner_pts[2, i + 1]
if i == 0 or i == 2:
linewidth = 10
else:
linewidth = 1
self.ax.plot(xs, ys, zs, color=frame_color, lw=linewidth)
if plot_img:
w_weights, h_weights = np.meshgrid(np.linspace(0, 1, w),
np.linspace(0, 1, h))
X = (1 - w_weights - h_weights) * transformed_corner_pts[0, 0] + \
h_weights * transformed_corner_pts[0, 3] + w_weights * transformed_corner_pts[0, 1]
Y = (1 - w_weights - h_weights) * transformed_corner_pts[1, 0] + \
h_weights * transformed_corner_pts[1, 3] + w_weights * transformed_corner_pts[1, 1]
Z = (1 - w_weights - h_weights) * transformed_corner_pts[2, 0] + \
h_weights * transformed_corner_pts[2, 3] + w_weights * transformed_corner_pts[2, 1]
input_img = cv2.cvtColor(input_img, cv2.COLOR_GRAY2RGB)
input_img = input_img / 255
self.ax.plot_surface(X, Y, Z, rstride=20, cstride=20, facecolors=input_img)
for frame_id in range(self.frames_num):
frame_pos = self.case_pos[frame_id, :]
frame_color = tuple(self.colors[frame_id, :])
frame_img = cv2.imread(path.join(self.case_frames_path, '{:04}.jpg'.format(frame_id)), 0)
plot_frame3d(trans_params=frame_pos, frame_color=frame_color,
input_img=frame_img, plot_img=False)
print('{} frame'.format(frame_id))
plt.show()
def get_6dof_label(trans_params1, trans_params2, cam_cali_mat):
"""
Given two Aurora position lines of two frames, return the relative 6 degrees of freedom label
Aurora position line gives the transformation from the ultrasound tracker to Aurora
:param trans_params1: Aurora position line of the first frame
:param trans_params2: Aurora position line of the second frame
:param cam_cali_mat: Camera calibration matrix of this case, which is the transformation from
the ultrasound image upper left corner (in pixel) to the ultrasound tracker (in mm).
:return: the relative 6 degrees of freedom (3 translations and 3 rotations xyz) as training label
Note that this dof is based on the position of the first frame
"""
trans_mat1 = params_to_mat44(trans_params1, cam_cali_mat=cam_cali_mat)
trans_mat2 = params_to_mat44(trans_params2, cam_cali_mat=cam_cali_mat)
relative_mat = np.dot(trans_mat2, inv(trans_mat1))
translations = relative_mat[:3, 3]
rotations = R.from_matrix(relative_mat[:3, :3])
rotations_eulers = rotations.as_euler('xyz')
dof = np.concatenate((translations, rotations_eulers), axis=0)
return dof
def get_next_pos(trans_params1, dof, cam_cali_mat):
"""
Given the first frame's Aurora position line and relative 6dof, return second frame's position line
:param trans_params1: Aurora position line of the first frame
:param dof: 6 degrees of freedom based on the first frame
:param cam_cali_mat: Camera calibration matrix of this case
:return: Aurora position line of the second frame
"""
trans_mat1 = params_to_mat44(trans_params1, cam_cali_mat=cam_cali_mat)
relative_mat = np.identity(4)
r_recon = R.from_euler('xyz', dof[3:])
relative_mat[:3, :3] = r_recon.as_matrix()
relative_mat[:3, 3] = dof[:3]
next_mat = np.dot(inv(cam_cali_mat), inv(np.dot(relative_mat, trans_mat1)))
next_params = np.zeros(7)
next_params[:3] = next_mat[:3, 3]
next_params[3:] = R.from_matrix(next_mat[:3, :3]).as_quat()
return next_params
def center_crop(input_img, crop_size=480):
h, w = input_img.shape
if crop_size > 480:
crop_size = 480
x_start = int((h - crop_size) / 2)
y_start = int((w - crop_size) / 2)
patch_img = input_img[x_start:x_start+crop_size, y_start:y_start+crop_size]
return patch_img
class TestNetwork():
def __init__(self, case_id):
super(TestNetwork, self).__init__()
if isinstance(case_id, int) or isinstance(case_id, float):
self.case_name = 'Case{:04}'.format(case_id)
else:
self.case_name = case_id
self.case_folder = path.join(data_folder, self.case_name)
""" Instead of loading frames JPEG, here load the entire npy file """
self.case_frames_npy_path = path.join(self.case_folder, '{}_frames.npy'.format(self.case_name))
self.case_frames_npy = np.load(self.case_frames_npy_path)
# print('case_frames_npy shape {}'.format(self.case_frames_npy.shape))
self.cam_cali_mat_path = path.join(self.case_folder, '{}_USCalib.txt'.format(self.case_name))
self.cam_cali_mat = np.loadtxt(self.cam_cali_mat_path)
self.case_pos_path = path.join(self.case_folder, '{}_pos.txt'.format(self.case_name))
self.case_pos = np.loadtxt(self.case_pos_path)
# self.case_pos = self.case_pos[:10, :]
''' IF we resample the video to 100 frames'''
# if self.case_pos.shape[0] >= 110 or self.case_pos.shape[0] <= 150:
# self.slice_ids = np.linspace(0, self.case_pos.shape[0]-1, 90).astype(np.uint64)
# self.case_pos = self.case_pos[self.slice_ids]
# else:
# self.slice_ids = np.linspace(0, self.case_pos.shape[0]-1, self.case_pos.shape[0]).astype(np.uint64)
self.slice_ids = np.linspace(0, self.case_pos.shape[0]-1, self.case_pos.shape[0]).astype(np.uint64)
# print(self.slice_ids)
self.frames_num = self.case_pos.shape[0]
colors_R = np.linspace(0, 1, self.frames_num).reshape((self.frames_num, 1))
colors_G = np.zeros((self.frames_num, 1))
colors_B = np.linspace(1, 0, self.frames_num).reshape((self.frames_num, 1))
self.colors = np.concatenate((colors_R, colors_G, colors_B), axis=1)
self.fig = plt.figure()
self.ax = self.fig.gca(projection='3d')
# def divide_batch(slice_num=end_frame_index, batch_size=32):
# """
# Divide all the slices into batches for torch parallel computing
# :param slice_num: number of slices in a video
# :param batch_size: default 32
# :return: a list of array, each array is a batch that contains the index of frames
# """
# end_frame_index = slice_ids.shape[0] - neighbour_slice + 1
# print(end_frame_index)
# time.sleep(30)
# batches_num = slice_ids.shape[0] // batch_size
# last_batch_size = slice_ids.shape[0] % batch_size
# print('slice_num {}, batch_size {}'.format(slice_ids.shape[0], batch_size))
# print('batches_num {}, last_batch_size {}'.format(batches_num, last_batch_size))
# batch_ids = []
# for i in range(batches_num):
# # this_batch_id = np.arange(i * batch_size, (i + 1) * batch_size)
# this_batch_id = slice_ids[i * batch_size: (i + 1) * batch_size]
# batch_ids.append(this_batch_id)
# if last_batch_size != 0:
# last_batch_id = np.arange(batches_num * batch_size, batches_num * batch_size + last_batch_size)
# # last_batch_id = np.flip(last_batch_id)
# batch_ids.append(last_batch_id)
# print(batch_ids)
# time.sleep(30)
# return batch_ids
def divide_batch(slice_num, batch_size=32):
"""
Divide all the slices into batches for torch parallel computing
:param slice_num: number of slices in a video
:param batch_size: default 32
:return: a list of array, each array is a batch that contains the index of frames
"""
batches_num = slice_num // batch_size
last_batch_size = slice_num % batch_size
print('slice_num {}, batch_size {}'.format(slice_num, batch_size))
print('batches_num {}, last_batch_size {}'.format(batches_num, last_batch_size))
batch_ids = []
for i in range(batches_num):
# this_batch_id = np.arange(i * batch_size, (i + 1) * batch_size)
this_batch_id = self.slice_ids[i * batch_size: (i + 1) * batch_size]
# this_batch_id = np.flip(this_batch_id)
batch_ids.append(this_batch_id)
if last_batch_size != 0:
# last_batch_id = np.arange(batches_num * batch_size, batches_num * batch_size + last_batch_size)
last_batch_id = self.slice_ids[batches_num * batch_size:slice_num]
# last_batch_id = np.flip(last_batch_id)
batch_ids.append(last_batch_id)
# print(batch_ids)
# time.sleep(30)
return batch_ids
def get_batch_dofs():
"""
Give the batches as input
:return: (frames_num - neighbour_slice + 1) x (neighbour_slice - 1) x 6
contains the relative motion between two slices within a sample group.
For example, if a neighbouring sample contains 10 slices, then there are 9 relative
motions within this group
"""
end_frame_index = self.frames_num - neighbour_slice + 1
print('end_frame_index/frame_num {}/{}'.format(end_frame_index, self.frames_num))
batch_groups = divide_batch(slice_num=end_frame_index, batch_size=batch_size)
# time.sleep(30)
if output_type == 'sum_dof':
result_dof = np.zeros((1, 6))
else:
result_dof = np.zeros((1, neighbour_slice - 1, 6))
for batch_index in range(len(batch_groups)):
this_batch = batch_groups[batch_index]
batch_imgs = []
for group_index in range(len(this_batch)):
group_id = this_batch[group_index]
sample_slices = []
# print(group_id)
frame_index = batch_index * neighbour_slice + group_index
for i in range(neighbour_slice):
frame_id = int(self.slice_ids[frame_index + i])
# print('frame_id {}'.format(frame_id))
""" Load JPEG images seperately"""
# frame_path = path.join(self.case_frames_path, '{:04}.jpg'.format(frame_id))
# frame_img = cv2.imread(frame_path, 0)
# frame_img = data_transform(frame_img, masked_full=False)
""" Load npy cube images """
frame_img = self.case_frames_npy[:, :, frame_id]
frame_img = data_transform(frame_img, normalize=True)
# print('cubeimage shape {}'.format(self.case_frames_npy.shape))
# cv2.imshow('frame{:04}'.format(frame_id), frame_img)
# cv2.waitKey(0)
# frame_img = data_transform(frame_img)
sample_slices.append(frame_img)
if input_type == 'diff_img':
diff_imgs = []
for sample_id in range(1, len(sample_slices)):
diff_imgs.append(sample_slices[sample_id] - sample_slices[sample_id - 1])
sample_slices = np.asarray(diff_imgs)
else:
sample_slices = np.asarray(sample_slices)
batch_imgs.append(sample_slices)
batch_imgs = np.asarray(batch_imgs)
if network_type in train_network.networks3D:
batch_imgs = np.expand_dims(batch_imgs, axis=1)
batch_imgs = torch.from_numpy(batch_imgs).float().to(device)
outputs, maps = model_ft(batch_imgs)
""" Visualize attention heatmaps """
# tools.visualize_attention(case_id=self.case_id,
# batch_ids=this_batch,
# batch_imgs=batch_imgs,
# maps=maps, weights=fc_weights)
# print('this_batch {}'.format(this_batch))
# print('maps shape {}'.format(maps.shape))
# print('fc_weights shape {}'.format(fc_weights.shape))
# print('input shape {}'.format(batch_imgs.shape))
# print('outputs shape {}'.format(outputs.shape))
outputs = outputs.data.cpu().numpy()
if output_type == 'average_dof':
outputs = np.expand_dims(outputs, axis=1)
outputs_reshape = np.repeat(outputs, neighbour_slice - 1, axis=1)
elif output_type == 'sum_dof':
outputs_reshape = outputs
else:
outputs_reshape = np.reshape(outputs, (outputs.shape[0],
int(outputs.shape[1] / 6),
int(outputs.shape[1] / (neighbour_slice - 1))))
result_dof = np.concatenate((result_dof, outputs_reshape), axis=0)
if output_type == 'sum_dof':
result_dof = result_dof[1:, :]
else:
result_dof = result_dof[1:, :, :]
return result_dof
def get_format_dofs(batch_dofs, merge_option='average_dof'):
"""
Based on the network outputs, here reformat the result into one row for each frame
(Because there are many overlapping frames due to the input format)
:return:
1) gen_dofs is (slice_num - 1) x 6dof. It is the relative 6dof motion comparing to
the former frame
2) pos_params is slice_num x 7params. It is the absolute position, exactly the same
format as Aurora.pos file
"""
print('Use <{}> formatting dofs'.format(merge_option))
if merge_option == 'one':
gen_dofs = np.zeros((self.frames_num - 1, 6))
gen_dofs[:batch_dofs.shape[0], :] = batch_dofs[:, 0, :]
gen_dofs[batch_dofs.shape[0], :] = batch_dofs[-1, 1, :]
print('gen_dof shape {}'.format(gen_dofs.shape))
print('not average method')
elif merge_option == 'baton':
print('baton batch_dofs shape {}'.format(batch_dofs.shape))
print('slice_num {}'.format(self.frames_num))
print('neighboring {}'.format(neighbour_slice))
gen_dofs = []
slice_params = []
for slice_idx in range(self.frames_num):
if slice_idx == 0:
this_params = self.case_pos[slice_idx, :]
slice_params.append(this_params)
elif slice_idx < neighbour_slice:
this_dof = batch_dofs[0, :] / 4
this_params = tools.get_next_pos(trans_params1=slice_params[slice_idx-1],
dof=this_dof,
cam_cali_mat=self.cam_cali_mat)
gen_dofs.append(this_dof)
slice_params.append(this_params)
else:
baton_idx = slice_idx - neighbour_slice + 1
baton_params = slice_params[baton_idx]
sample_dof = batch_dofs[baton_idx, :]
this_params = tools.get_next_pos(trans_params1=baton_params,
dof=sample_dof,
cam_cali_mat=self.cam_cali_mat)
this_dof = tools.get_6dof_label(trans_params1=slice_params[slice_idx-1],
trans_params2=this_params,
cam_cali_mat=self.cam_cali_mat)
gen_dofs.append(this_dof)
slice_params.append(this_params)
gen_dofs = np.asarray(gen_dofs)
slice_params = np.asarray(slice_params)
print('gen_dof shape {}'.format(gen_dofs.shape))
print('slice_params shape {}'.format(slice_params.shape))
# time.sleep(30)
else:
frames_pos = []
for start_sample_id in range(batch_dofs.shape[0]):
for relative_id in range(batch_dofs.shape[1]):
this_pos_id = start_sample_id + relative_id + 1
# print('this_pos_id {}'.format(this_pos_id))
this_pos = batch_dofs[start_sample_id, relative_id, :]
this_pos = np.expand_dims(this_pos, axis=0)
if len(frames_pos) < this_pos_id:
frames_pos.append(this_pos)
else:
frames_pos[this_pos_id - 1] = np.concatenate((frames_pos[this_pos_id - 1],
this_pos), axis=0)
gen_dofs = []
for i in range(len(frames_pos)):
gen_dof = np.mean(frames_pos[i], axis=0)
"""This is for Linear Motion"""
# gen_dof = train_network.dof_stats[:, 0]
# gen_dof = np.asarray([-0.07733258, -1.28508398, 0.37141262,
# -0.57584312, 0.20969176, 0.51404395]) + 0.1
gen_dofs.append(gen_dof)
gen_dofs = np.asarray(gen_dofs)
print('batch_dofs {}'.format(batch_dofs.shape))
print('gen_dofs {}'.format(gen_dofs.shape))
# time.sleep(30)
# for dof_id in range(6):
# gen_dofs[:, dof_id] = tools.smooth_array(gen_dofs[:, dof_id])
# time.sleep(30)
return gen_dofs
def dof2params(format_dofs):
gen_param_results = []
for i in range(format_dofs.shape[0]):
if i == 0:
base_param = self.case_pos[i, :]
else:
base_param = gen_param_results[i-1]
gen_dof = format_dofs[i, :]
gen_param = tools.get_next_pos(trans_params1=base_param,
dof=gen_dof, cam_cali_mat=self.cam_cali_mat)
gen_param_results.append(gen_param)
# time.sleep(30)
gen_param_results = np.asarray(gen_param_results)
pos_params = np.zeros((self.frames_num, 7))
pos_params[0, :] = self.case_pos[0, 2:]
pos_params[1:, :] = gen_param_results
print('pos_params shape {}'.format(pos_params.shape))
# time.sleep(30)
return pos_params
def plot_frame3d(trans_params, frame_color=(255, 0, 0),
input_img=np.ones((480, 640)), plot_img=False):
"""
Plot a 2D frame into 3D space for sequence visualization
:param input_img: input image frame
:param trans_params: Aurora position file line of position
"""
h, w = input_img.shape
# corner_pts = np.asarray([[0, 0, 0],
# [0, w, 0],
# [h, w, 0],
# [h, 0, 0]])
corner_pts = np.asarray([[-h, 0, 0],
[-h, -w, 0],
[0, -w, 0],
[0, 0, 0]])
corner_pts = np.concatenate((corner_pts, np.ones((4, 1))), axis=1)
corner_pts = np.transpose(corner_pts)
print('imgshape {}'.format(input_img.shape))
print('corner_pts:\n{}'.format(corner_pts))
print('h {}, w {}'.format(h, w))
trans_mat = params_to_mat44(trans_params=trans_params,
cam_cali_mat=self.cam_cali_mat)
# trans_mat = trans_mat.transpose()
# trans_mat = np.dot(self.cam_cali_mat, trans_mat)
# trans_mat = inv(trans_mat)
# trans_mat = np.dot(trans_mat, inv(self.cam_cali_mat))
# trans_mat = np.dot(trans_mat, self.cam_cali_mat)
print('trans_mat:\n{}'.format(trans_mat))
transformed_corner_pts = np.dot(trans_mat, corner_pts)
print('transformed_corner_pts:\n{}'.format(transformed_corner_pts))
print('transformed_corner_pts shape {}'.format(transformed_corner_pts.shape))
# dst = np.linalg.norm(transformed_corner_pts[:, 0] - transformed_corner_pts[:, 1])
# dst2 = np.linalg.norm(transformed_corner_pts[:, 1] - transformed_corner_pts[:, 2])
# print(dst, dst2)
# time.sleep(30)
for i in range(-1, 3):
xs = transformed_corner_pts[0, i], transformed_corner_pts[0, i + 1]
ys = transformed_corner_pts[1, i], transformed_corner_pts[1, i + 1]
zs = transformed_corner_pts[2, i], transformed_corner_pts[2, i + 1]
if i == 0 or i == 2:
linewidth = 10
else:
linewidth = 1
self.ax.plot(xs, ys, zs, color=frame_color, lw=linewidth)
if plot_img:
w_weights, h_weights = np.meshgrid(np.linspace(0, 1, w),
np.linspace(0, 1, h))
X = (1 - w_weights - h_weights) * transformed_corner_pts[0, 0] + \
h_weights * transformed_corner_pts[0, 3] + w_weights * transformed_corner_pts[0, 1]
Y = (1 - w_weights - h_weights) * transformed_corner_pts[1, 0] + \
h_weights * transformed_corner_pts[1, 3] + w_weights * transformed_corner_pts[1, 1]
Z = (1 - w_weights - h_weights) * transformed_corner_pts[2, 0] + \
h_weights * transformed_corner_pts[2, 3] + w_weights * transformed_corner_pts[2, 1]
input_img = cv2.cvtColor(input_img, cv2.COLOR_GRAY2RGB)
input_img = input_img / 255
self.ax.plot_surface(X, Y, Z, rstride=10, cstride=10, facecolors=input_img)
def params2corner_pts(params, input_img=np.ones((480, 640))):
"""
Transform the Aurora params to corner points coordinates of each frame
:param params: slice_num x 7(or 9) params matrix
:param input_img: just use for size
:return: slice_num x 4 x 3. 4 corner points 3d coordinates (x, y, z)
"""
h, w = input_img.shape
corner_pts = np.asarray([[-h, 0, 0],
[-h, -w, 0],
[0, -w, 0],
[0, 0, 0]])
corner_pts = np.concatenate((corner_pts, np.ones((4, 1))), axis=1)
corner_pts = np.transpose(corner_pts)
transformed_pts = []
for frame_id in range(params.shape[0]):
trans_mat = params_to_mat44(trans_params=params[frame_id, :],
cam_cali_mat=self.cam_cali_mat)
transformed_corner_pts = np.dot(trans_mat, corner_pts)
transformed_corner_pts = np.moveaxis(transformed_corner_pts[:3, :], 0, 1)
transformed_pts.append(transformed_corner_pts)
transformed_pts = np.asarray(transformed_pts)
return transformed_pts
def draw_img_sequence(corner_pts):
for frame_id in range(corner_pts.shape[0]):
w_weights, h_weights = np.meshgrid(np.linspace(0, 1, 224),
np.linspace(0, 1, 224))
# print('corner_pts shape {}'.format(corner_pts.shape))
# time.sleep(30)
X = (1 - w_weights - h_weights) * corner_pts[frame_id, 0, 0] + \
h_weights * corner_pts[frame_id, 3, 0] + w_weights * corner_pts[frame_id, 1, 0]
Y = (1 - w_weights - h_weights) * corner_pts[frame_id, 0, 1] + \
h_weights * corner_pts[frame_id, 3, 1] + w_weights * corner_pts[frame_id, 1, 1]
Z = (1 - w_weights - h_weights) * corner_pts[frame_id, 0, 2] + \
h_weights * corner_pts[frame_id, 3, 2] + w_weights * corner_pts[frame_id, 1, 2]
img_path = path.join(self.case_frames_path, self.frames_list[frame_id])
input_img = cv2.imread(img_path, 0)
input_img = train_network.data_transform(input_img)
print('frame_path\n{}'.format(self.frames_list[frame_id]))
# time.sleep(30)
input_img = cv2.cvtColor(input_img, cv2.COLOR_GRAY2RGB)
input_img = input_img / 255
if frame_id == 0 or frame_id == corner_pts.shape[0] - 1:
stride = 2
else:
stride = 10
# self.ax.plot_surface(X, Y, Z, rstride=20, cstride=20, facecolors=input_img)
self.ax.plot_surface(X, Y, Z, rstride=stride, cstride=stride,
facecolors=input_img, zorder=0.1)
def draw_one_sequence(corner_pts, name, colorRGB=(255, 0, 0), line_width=3, constant=True):
colorRGB = tuple(channel/255 for channel in colorRGB)
seg_num = corner_pts.shape[0] + 1
if constant:
constant_color = np.asarray(colorRGB)
constant_color = np.expand_dims(constant_color, axis=0)
colors = np.repeat(constant_color, seg_num, axis=0)
else:
colors_R = np.linspace(0, colorRGB[0], seg_num).reshape((seg_num, 1))
colors_G = np.linspace(0, colorRGB[1], seg_num).reshape((seg_num, 1))
colors_B = np.linspace(1, colorRGB[2], seg_num).reshape((seg_num, 1))
colors = np.concatenate((colors_R, colors_G, colors_B), axis=1)
# for frame_id in range(int(corner_pts.shape[0] * 0.5), corner_pts.shape[0]):
# if frame_id == int(corner_pts.shape[0] * 0.5):
for frame_id in range(corner_pts.shape[0]):
if frame_id == 0:
""" First frame draw full bounds"""
for pt_id in range(-1, 3):
xs = corner_pts[frame_id, pt_id, 0], corner_pts[frame_id, pt_id + 1, 0]
ys = corner_pts[frame_id, pt_id, 1], corner_pts[frame_id, pt_id + 1, 1]
zs = corner_pts[frame_id, pt_id, 2], corner_pts[frame_id, pt_id + 1, 2]
self.ax.plot(xs, ys, zs, color=tuple(colors[frame_id, :]), lw=line_width, zorder=1)
elif frame_id == corner_pts.shape[0] - 1:
""" Connect to the former frame """
for pt_id in range(-1, 3):
xs = corner_pts[frame_id, pt_id, 0], corner_pts[frame_id - 1, pt_id, 0]
ys = corner_pts[frame_id, pt_id, 1], corner_pts[frame_id - 1, pt_id, 1]
zs = corner_pts[frame_id, pt_id, 2], corner_pts[frame_id - 1, pt_id, 2]
self.ax.plot(xs, ys, zs, color=tuple(colors[frame_id, :]), lw=line_width)
""" Last frame draw full bounds"""
for pt_id in range(-1, 3):
xs = corner_pts[frame_id, pt_id, 0], corner_pts[frame_id, pt_id + 1, 0]
ys = corner_pts[frame_id, pt_id, 1], corner_pts[frame_id, pt_id + 1, 1]
zs = corner_pts[frame_id, pt_id, 2], corner_pts[frame_id, pt_id + 1, 2]
self.ax.plot(xs, ys, zs, color=tuple(colors[-1, :]), lw=line_width)
if pt_id == -1:
self.ax.plot(xs, ys, zs, color=tuple(colors[-1, :]), lw=line_width, label=name)
else:
""" Connect to the former frame """
for pt_id in range(-1, 3):
xs = corner_pts[frame_id, pt_id, 0], corner_pts[frame_id - 1, pt_id, 0]
ys = corner_pts[frame_id, pt_id, 1], corner_pts[frame_id - 1, pt_id, 1]
zs = corner_pts[frame_id, pt_id, 2], corner_pts[frame_id - 1, pt_id, 2]
self.ax.plot(xs, ys, zs, color=tuple(colors[frame_id, :]), lw=line_width, zorder=1)
# if plot_img and frame_id==0:
def visualize_sequences():
# draw_img_sequence(corner_pts=self.gt_pts1)
draw_one_sequence(corner_pts=self.gt_pts1, name='Groundtruth',
colorRGB=(0, 153, 76), line_width=3)
draw_one_sequence(corner_pts=self.trans_pts1, name='DCL-Net ({:.4f}mm)'.format(self.trans_pts1_error),
colorRGB=(255, 0, 0))
plt.axis('off')
self.ax.set_xticklabels([])
self.ax.set_yticklabels([])
self.ax.set_zticklabels([])
plt.legend(loc='lower left')
plt.tight_layout()
# views_id = np.linspace(0, 360, 36)
# for ii in views_id:
# self.ax.view_init(elev=10., azim=ii)
# plt.savefig('views/{}_img.jpg'.format(ii))
# # plt.savefig('views/{}.jpg'.format(ii))
# print('{} saved'.format(ii))
self.ax.view_init(elev=10., azim=0)
# plt.savefig('views/{}_img.jpg'.format(0))
plt.savefig('views/all_cases/{}_{}.jpg'.format(model_string, case_id))
plt.title(self.case_name)
plt.savefig('results/plots/{}_vis.pdf'.format(self.case_name))
plt.show()
def get_gt_dofs():
gt_dofs = []
for slice_id in range(1, self.frames_num):
params1 = self.case_pos[slice_id-1, :]
params2 = self.case_pos[slice_id, :]
this_dof = tools.get_6dof_label(trans_params1=params1,
trans_params2=params2,
cam_cali_mat=self.cam_cali_mat)
gt_dofs.append(this_dof)
gt_dofs = np.asarray(gt_dofs)
print('gt_dof shape {}, frames_num {}'.format(gt_dofs.shape, self.frames_num))
return gt_dofs
def visualize_dofs():
frees = ['tX', 'tY', 'tZ', 'aX', 'aY', 'aZ']
fig, axes = plt.subplots(2, 3, figsize=(15, 10))
fig.suptitle('Case{:04}'.format(self.case_id))
for dof_id in range(len(frees)):
plot_x = dof_id // 3
plot_y = dof_id % 3
axes[plot_x, plot_y].plot(self.gt_dofs[:, dof_id], color='g', label='Groundtruth', alpha=0.5)
axes[plot_x, plot_y].plot(self.format_dofs[:, dof_id], color='r', label='CNN', alpha=0.5)
corrcoef = np.corrcoef(self.gt_dofs[:, dof_id], self.format_dofs[:, dof_id])[0, 1]
axes[plot_x, plot_y].set_title('{}: corrcoef {:.4f}'.format(frees[dof_id], corrcoef))
axes[plot_x, plot_y].legend(loc='lower left')
# axes[plot_x, plot_y].show()
np.savetxt('figures/dof_values/{}_{}_gt.txt'.format(self.case_id, frees[dof_id]),
self.gt_dofs[:, dof_id])
np.savetxt('figures/dof_values/{}_{}_{}_pd.txt'.format(model_string, self.case_id, frees[dof_id]),
self.format_dofs[:, dof_id])
plt.savefig('figures/dof_pred/Case{:04}.jpg'.format(self.case_id))
# plt.show()
self.batch_dofs = get_batch_dofs()
if output_type == 'sum_dof':
self.format_dofs = get_format_dofs(self.batch_dofs, merge_option='baton')
else:
self.format_dofs = get_format_dofs(self.batch_dofs, merge_option='average')
if normalize_dof:
self.format_dofs = self.format_dofs * train_network.dof_stats[:, 1] \
+ train_network.dof_stats[:, 0]
print('format_dofs\n{}'.format(np.around(self.format_dofs, decimals=2)))
self.gt_dofs = get_gt_dofs()
# print('mean gt_dof\n{}'.format(np.mean(self.gt_dofs, axis=0)))
# time.sleep(30)
# np.savetxt('infos/gt_dofs.txt', self.gt_dofs)
# np.savetxt('infos/format_dofs.txt', self.format_dofs)
# print('saved')
# time.sleep(30)
# self.gt_means = np.mean(self.gt_dofs, axis=0)
# np.savetxt('infos/linear_motion.txt', self.gt_means)
# self.format_dofs = np.zeros((1, 6))
# self.format_dofs[0, :] = np.loadtxt('infos/linear_motion.txt')
# self.format_dofs = np.repeat(self.format_dofs, self.gt_dofs.shape[0], axis=0)
# print('shapes gt {}, linear {}'.format(self.gt_dofs.shape, self.format_dofs.shape))
# visualize_dofs()
self.result_params = dof2params(self.format_dofs)
print('frame_position shape {}'.format(self.result_params.shape))
print('self.case_pos shape {}'.format(self.case_pos.shape))
self.imgs_pts1 = tools.params2corner_pts(params=self.case_pos, cam_cali_mat=self.cam_cali_mat,
shrink=1)
self.gt_pts1 = tools.params2corner_pts(params=self.case_pos, cam_cali_mat=self.cam_cali_mat)
self.trans_pts1 = tools.params2corner_pts(params=self.result_params, cam_cali_mat=self.cam_cali_mat)
np.save('results/trans_pts/{}_{}.npy'.format(model_string, self.case_name), self.trans_pts1)
np.save('results/trans_pts/GT_{}.npy'.format(self.case_name), self.gt_pts1)
# time.sleep(30)
self.trans_pts1_error = tools.evaluate_dist(pts1=self.gt_pts1, pts2=self.trans_pts1)
self.final_drift = tools.final_drift(pts1=self.gt_pts1[-1, :, :], pts2=self.trans_pts1[-1, :, :])
self.cor_coe = tools.evaluate_correlation(dof1=self.format_dofs, dof2=self.gt_dofs, abs=True)
print('self.gt_pts1 shape {}'.format(self.gt_pts1.shape))
print('self.trans_pts1 shape {}'.format(self.trans_pts1.shape))
print('{} distance error {:.4f}mm'.format(self.case_name, self.trans_pts1_error))
print('{} final drift {:.4f}mm'.format(self.case_name, self.final_drift))
# print('Case{:04} correlation: {}'.format(self.case_id, self.cor_coe))
print('*' * 50)
visualize_sequences()
#
if __name__ == '__main__':
batch_size = 5
neighbour_slice = 5
network_type = 'resnext50'
input_type = 'org_img'
output_type = 'average_dof'
normalize_dof = True
device = torch.device("cuda:{}".format(device_no))
model_string = '0312-185335'
model_folder = 'pretrained_networks'
model_path = path.join(model_folder, '3d_best_Generator_{}.pth'.format(model_string))
model_ft = train_network.define_model(model_type=network_type,
pretrained_path=model_path,
input_type=input_type,
output_type=output_type,
neighbour_slice=neighbour_slice)
print('torch model loaded')
params = model_ft.state_dict()
fc_weights = params['fc.weight'].data.cpu().numpy()
# print(fc_weights.shape)
since = time.time()
case = TestNetwork(case_id='Demo')
time_elapsed = time.time() - since
print('One case testing complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
""" Following parts are for our full dataset testing """
# errors = []
# final_drifts = []
# frame_nums = []
# corr_coefs = []
#
# for i in test_ids:
# # for i in val_ids:
# # for i in train_ids:
# case = TestNetwork(case_id=i)
# results_pos = case.result_params
# case_error = case.trans_pts1_error
# case_corr = case.cor_coe
# errors.append(case_error)
# final_drifts.append(case.final_drift)
# frame_nums.append([i, case.frames_num])
# corr_coefs.append(case_corr)
# # np.savetxt('results/pos/Case{:04}_Aurora_result.pos'.format(i), results_pos, fmt='%.6f')
# errors = np.asarray(errors)
# avg_error = np.mean(errors)
# np.savetxt('results/{}.txt'.format(model_string), errors)
# np.savetxt('infos/frame_nums.txt', np.asarray(frame_nums))
# np.savetxt('data/{}_corrcoef.txt'.format(model_string), np.asarray(corr_coefs))
# print('mean corrcoef {}'.format(np.mean(np.asarray(corr_coefs))))
#
# final_drifts = np.asarray(final_drifts)
# avg_final_drift = np.mean(final_drifts)
# np.savetxt('results/{}_final_drifts.txt'.format(model_string), final_drifts)
# print(errors)
# print('neighbour_slice {} average error {:.4f}'.format(neighbour_slice, avg_error))
# print('neighbour_slice {} average final drift {:.4f}'.format(neighbour_slice, avg_final_drift))
#
# print('This model is {}'.format(model_string))
#
# med = np.median(errors)
# maximum = np.max(errors)
# minimum = np.min(errors)
# average = np.mean(errors)
# print('Error: med {:.2f}, max {:.2f}, min {:.2f}, avg {:.2f}'.format(med, maximum, minimum, average))
#
# med = np.median(final_drifts)
# maximum = np.max(final_drifts)
# minimum = np.min(final_drifts)
# average = np.mean(final_drifts)
# print('Drift: med {:.2f}, max {:.2f}, min {:.2f}, avg {:.2f}'.format(med, maximum, minimum, average))
|
import json
import logging
import os
import socketio
from threading import Thread
from flask import Flask
from urllib.parse import parse_qs, urlparse
from jose import jwt
import jose.exceptions
from models import Option
from monitoring.constants import LOG_SOCKETIO
from monitoring.database import Session
session = Session()
noip_config = session.query(Option).filter_by(name="network", section="dyndns").first()
if noip_config:
noip_config = json.loads(noip_config.value)
if noip_config and noip_config.get("restrict_host", False) and noip_config.get("hostname", None):
allowed_origins = f"https://{noip_config['hostname']}"
else:
allowed_origins = "*"
if len(allowed_origins) == 1:
allowed_origins = allowed_origins[0]
sio = socketio.Server(async_mode="threading", cors_allowed_origins=allowed_origins)
logger = logging.getLogger(LOG_SOCKETIO)
logging.getLogger("werkzeug").setLevel(logging.DEBUG)
def start_socketio():
logger.info("Server CORS allowed on '%s'", allowed_origins)
app = Flask(__name__)
# wrap Flask application with socketio's middleware
app.wsgi_app = socketio.WSGIApp(sio, app.wsgi_app)
# start on a thread to avoid blocking the main thread (health check)
Thread(target=app.run, kwargs={
"threaded": True,
"debug": False, # avoid starting application twice in development
"host": os.environ["MONITOR_HOST"],
"port": int(os.environ["MONITOR_PORT"])}
).start()
@sio.on("connect")
def connect(sid, environ):
logger.debug('Client info "%s": %s', sid, environ)
query_string = parse_qs(environ["QUERY_STRING"])
remote_address = environ.get("HTTP_X_REAL_IP", environ.get("REMOTE_ADDR", ""))
try:
device_info = jwt.decode(query_string["token"][0], os.environ.get("SECRET"), algorithms="HS256")
logger.info("Connecting with device info: %s", device_info)
referer = urlparse(environ["HTTP_REFERER"])
origin = urlparse(device_info["origin"])
if origin.scheme != referer.scheme or origin.netloc != referer.netloc:
logger.info("Authentication failed from origin '%s'!= '%s'", origin, referer)
return False
logger.info("New connection from '%s' =>'%s'", device_info["ip"], device_info["origin"])
logger.debug("New connection from '%s': %s =>'%s'", sid, environ, device_info)
except jose.exceptions.JWTError:
logger.error("Authentication failed from '%s'! token='%s'", remote_address, query_string["token"][0])
return False
@sio.on("disconnect")
def disconnect(sid):
logging.getLogger(LOG_SOCKETIO).info('Disconnected "%s"', sid)
def send_alert_state(arm_state):
send_message("alert_state_change", arm_state)
def send_arm_state(arm_state):
send_message("arm_state_change", arm_state)
def send_sensors_state(sensors_state):
send_message("sensors_state_change", sensors_state)
def send_syren_state(syren_state):
send_message("syren_state_change", syren_state)
def send_system_state(system_state):
send_message("system_state_change", system_state)
def send_power_state(power_state):
send_message("power_state_change", power_state)
def send_card_registered():
send_message("card_registered", True)
def send_message(message_type, message):
logging.getLogger(LOG_SOCKETIO).debug("Sending message: %s -> %s", message_type, message)
sio.emit(message_type, message)
|
"""
During the 70s and 80s, some handheld calculators used a very different notation for arithmetic called Reverse Polish
notation [http://en.wikipedia.org/wiki/Reverse_Polish_notation] (RPN). Instead of putting operators (+, *, -, etc.)
between their operands (as in 3 + 4), they were placed behind them: to calculate 3 + 4, you first inputted the operands
(3 4) and then added them together by pressing +.
Internally, this was implemented using a stack: whenever you enter a number, it's pushed onto the stack, and whenever
you enter an operator, the top two elements are popped off for the calculation. Here's an example of a RPN calculator
calculating 3 4 * 6 2 - +:
[3] --> 3
[4] --> 3 4
[*] --> 12 ( 3 * 4 = 12)
[6] --> 12 6
[2] --> 12 6 2
[-] --> 12 4 ( 6 - 2 = 4)
[+] --> 16 (12 + 4 = 16)
Your task is to implement a program that reads a string in Reverse Polish notation and prints the result of the
calculation. Your program should support positive and negative integers and the operators +, -, *. (For extra credit,
you can implement extra functions, such as decimal numbers, division, exponentiation, etc.)
"""
import re
def main():
cmd = '3 4 / 6 2 ^ +'
match = re.search("((-?[0-9]+(\.[0-9]+)?) (-?[0-9]+(\.[0-9]+)?) [+\*^/\-])", cmd)
while match:
print(cmd)
work = match.group().split()
work[2] = work[2].replace('^', '**') # special case
answer = str(eval('{} {} {}'.format(work[0], work[2], work[1])))
cmd = cmd.replace(match.group(), answer)
match = re.search("((-?[0-9]+(\.[0-9]+)?) (-?[0-9]+(\.[0-9]+)?) [+\*^/\-])", cmd)
print(cmd)
if __name__ == "__main__":
main()
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Module contains class PandasDataframe.
PandasDataframe is a parent abstract class for any dataframe class
for pandas storage format.
"""
from collections import OrderedDict
import numpy as np
import pandas
import datetime
from pandas.core.indexes.api import ensure_index, Index, RangeIndex
from pandas.core.dtypes.common import is_numeric_dtype, is_list_like
from pandas._libs.lib import no_default
from typing import List, Hashable, Optional, Callable, Union, Dict
from modin.core.storage_formats.pandas.query_compiler import PandasQueryCompiler
from modin.error_message import ErrorMessage
from modin.core.storage_formats.pandas.parsers import (
find_common_type_cat as find_common_type,
)
from modin.core.dataframe.base.dataframe.dataframe import ModinDataframe
from modin.core.dataframe.base.dataframe.utils import (
Axis,
JoinType,
)
from modin.pandas.indexing import is_range_like
from modin.pandas.utils import is_full_grab_slice, check_both_not_none
from modin.logging import LoggerMetaClass
def lazy_metadata_decorator(apply_axis=None, axis_arg=-1, transpose=False):
"""
Lazily propagate metadata for the ``PandasDataframe``.
This decorator first adds the minimum required reindexing operations
to each partition's queue of functions to be lazily applied for
each PandasDataframe in the arguments by applying the function
run_f_on_minimally_updated_metadata. The decorator also sets the
flags for deferred metadata synchronization on the function result
if necessary.
Parameters
----------
apply_axis : str, default: None
The axes on which to apply the reindexing operations to the `self._partitions` lazily.
Case None: No lazy metadata propagation.
Case "both": Add reindexing operations on both axes to partition queue.
Case "opposite": Add reindexing operations complementary to given axis.
Case "rows": Add reindexing operations on row axis to partition queue.
axis_arg : int, default: -1
The index or column axis.
transpose : bool, default: False
Boolean for if a transpose operation is being used.
Returns
-------
Wrapped Function.
"""
def decorator(f):
from functools import wraps
@wraps(f)
def run_f_on_minimally_updated_metadata(self, *args, **kwargs):
for obj in (
[self]
+ [o for o in args if isinstance(o, PandasDataframe)]
+ [v for v in kwargs.values() if isinstance(v, PandasDataframe)]
+ [
d
for o in args
if isinstance(o, list)
for d in o
if isinstance(d, PandasDataframe)
]
+ [
d
for _, o in kwargs.items()
if isinstance(o, list)
for d in o
if isinstance(d, PandasDataframe)
]
):
if apply_axis == "both":
if obj._deferred_index and obj._deferred_column:
obj._propagate_index_objs(axis=None)
elif obj._deferred_index:
obj._propagate_index_objs(axis=0)
elif obj._deferred_column:
obj._propagate_index_objs(axis=1)
elif apply_axis == "opposite":
if "axis" not in kwargs:
axis = args[axis_arg]
else:
axis = kwargs["axis"]
if axis == 0 and obj._deferred_column:
obj._propagate_index_objs(axis=1)
elif axis == 1 and obj._deferred_index:
obj._propagate_index_objs(axis=0)
elif apply_axis == "rows":
obj._propagate_index_objs(axis=0)
result = f(self, *args, **kwargs)
if apply_axis is None and not transpose:
result._deferred_index = self._deferred_index
result._deferred_column = self._deferred_column
elif apply_axis is None and transpose:
result._deferred_index = self._deferred_column
result._deferred_column = self._deferred_index
elif apply_axis == "opposite":
if axis == 0:
result._deferred_index = self._deferred_index
else:
result._deferred_column = self._deferred_column
elif apply_axis == "rows":
result._deferred_column = self._deferred_column
return result
return run_f_on_minimally_updated_metadata
return decorator
class PandasDataframe(object, metaclass=LoggerMetaClass):
"""
An abstract class that represents the parent class for any pandas storage format dataframe class.
This class provides interfaces to run operations on dataframe partitions.
Parameters
----------
partitions : np.ndarray
A 2D NumPy array of partitions.
index : sequence
The index for the dataframe. Converted to a ``pandas.Index``.
columns : sequence
The columns object for the dataframe. Converted to a ``pandas.Index``.
row_lengths : list, optional
The length of each partition in the rows. The "height" of
each of the block partitions. Is computed if not provided.
column_widths : list, optional
The width of each partition in the columns. The "width" of
each of the block partitions. Is computed if not provided.
dtypes : pandas.Series, optional
The data types for the dataframe columns.
"""
_partition_mgr_cls = None
_query_compiler_cls = PandasQueryCompiler
# These properties flag whether or not we are deferring the metadata synchronization
_deferred_index = False
_deferred_column = False
@property
def __constructor__(self):
"""
Create a new instance of this object.
Returns
-------
PandasDataframe
"""
return type(self)
def __init__(
self,
partitions,
index,
columns,
row_lengths=None,
column_widths=None,
dtypes=None,
):
self._partitions = partitions
self._index_cache = ensure_index(index)
self._columns_cache = ensure_index(columns)
if row_lengths is not None and len(self.index) > 0:
# An empty frame can have 0 rows but a nonempty index. If the frame
# does have rows, the number of rows must equal the size of the
# index.
num_rows = sum(row_lengths)
if num_rows > 0:
ErrorMessage.catch_bugs_and_request_email(
num_rows != len(self._index_cache),
"Row lengths: {} != {}".format(num_rows, len(self._index_cache)),
)
ErrorMessage.catch_bugs_and_request_email(
any(val < 0 for val in row_lengths),
"Row lengths cannot be negative: {}".format(row_lengths),
)
self._row_lengths_cache = row_lengths
if column_widths is not None and len(self.columns) > 0:
# An empty frame can have 0 column but a nonempty column index. If
# the frame does have columns, the number of columns must equal the
# size of the columns.
num_columns = sum(column_widths)
if num_columns > 0:
ErrorMessage.catch_bugs_and_request_email(
num_columns != len(self._columns_cache),
"Column widths: {} != {}".format(
num_columns, len(self._columns_cache)
),
)
ErrorMessage.catch_bugs_and_request_email(
any(val < 0 for val in column_widths),
"Column widths cannot be negative: {}".format(column_widths),
)
self._column_widths_cache = column_widths
self._dtypes = dtypes
self._filter_empties()
@property
def _row_lengths(self):
"""
Compute the row partitions lengths if they are not cached.
Returns
-------
list
A list of row partitions lengths.
"""
if self._row_lengths_cache is None:
if len(self._partitions.T) > 0:
self._row_lengths_cache = [
obj.length() for obj in self._partitions.T[0]
]
else:
self._row_lengths_cache = []
return self._row_lengths_cache
@property
def _column_widths(self):
"""
Compute the column partitions widths if they are not cached.
Returns
-------
list
A list of column partitions widths.
"""
if self._column_widths_cache is None:
if len(self._partitions) > 0:
self._column_widths_cache = [obj.width() for obj in self._partitions[0]]
else:
self._column_widths_cache = []
return self._column_widths_cache
@property
def _axes_lengths(self):
"""
Get a pair of row partitions lengths and column partitions widths.
Returns
-------
list
The pair of row partitions lengths and column partitions widths.
"""
return [self._row_lengths, self._column_widths]
@property
def dtypes(self):
"""
Compute the data types if they are not cached.
Returns
-------
pandas.Series
A pandas Series containing the data types for this dataframe.
"""
if self._dtypes is None:
self._dtypes = self._compute_dtypes()
return self._dtypes
def _compute_dtypes(self):
"""
Compute the data types via TreeReduce pattern.
Returns
-------
pandas.Series
A pandas Series containing the data types for this dataframe.
"""
def dtype_builder(df):
return df.apply(lambda col: find_common_type(col.values), axis=0)
map_func = self._build_treereduce_func(0, lambda df: df.dtypes)
reduce_func = self._build_treereduce_func(0, dtype_builder)
# For now we will use a pandas Series for the dtypes.
if len(self.columns) > 0:
dtypes = self.tree_reduce(0, map_func, reduce_func).to_pandas().iloc[0]
else:
dtypes = pandas.Series([])
# reset name to None because we use "__reduced__" internally
dtypes.name = None
return dtypes
_index_cache = None
_columns_cache = None
def _validate_set_axis(self, new_labels, old_labels):
"""
Validate the possibility of replacement of old labels with the new labels.
Parameters
----------
new_labels : list-like
The labels to replace with.
old_labels : list-like
The labels to replace.
Returns
-------
list-like
The validated labels.
"""
new_labels = ensure_index(new_labels)
old_len = len(old_labels)
new_len = len(new_labels)
if old_len != new_len:
raise ValueError(
f"Length mismatch: Expected axis has {old_len} elements, "
+ f"new values have {new_len} elements"
)
return new_labels
def _get_index(self):
"""
Get the index from the cache object.
Returns
-------
pandas.Index
An index object containing the row labels.
"""
return self._index_cache
def _get_columns(self):
"""
Get the columns from the cache object.
Returns
-------
pandas.Index
An index object containing the column labels.
"""
return self._columns_cache
def _set_index(self, new_index):
"""
Replace the current row labels with new labels.
Parameters
----------
new_index : list-like
The new row labels.
"""
if self._index_cache is None:
self._index_cache = ensure_index(new_index)
else:
new_index = self._validate_set_axis(new_index, self._index_cache)
self._index_cache = new_index
self.synchronize_labels(axis=0)
def _set_columns(self, new_columns):
"""
Replace the current column labels with new labels.
Parameters
----------
new_columns : list-like
The new column labels.
"""
if self._columns_cache is None:
self._columns_cache = ensure_index(new_columns)
else:
new_columns = self._validate_set_axis(new_columns, self._columns_cache)
self._columns_cache = new_columns
if self._dtypes is not None:
self._dtypes.index = new_columns
self.synchronize_labels(axis=1)
columns = property(_get_columns, _set_columns)
index = property(_get_index, _set_index)
@property
def axes(self):
"""
Get index and columns that can be accessed with an `axis` integer.
Returns
-------
list
List with two values: index and columns.
"""
return [self.index, self.columns]
def _compute_axis_labels(self, axis: int, partitions=None):
"""
Compute the labels for specific `axis`.
Parameters
----------
axis : int
Axis to compute labels along.
partitions : np.ndarray, optional
A 2D NumPy array of partitions from which labels will be grabbed.
If not specified, partitions will be taken from `self._partitions`.
Returns
-------
pandas.Index
Labels for the specified `axis`.
"""
if partitions is None:
partitions = self._partitions
return self._partition_mgr_cls.get_indices(
axis, partitions, lambda df: df.axes[axis]
)
def _filter_empties(self):
"""Remove empty partitions from `self._partitions` to avoid triggering excess computation."""
if len(self.axes[0]) == 0 or len(self.axes[1]) == 0:
# This is the case for an empty frame. We don't want to completely remove
# all metadata and partitions so for the moment, we won't prune if the frame
# is empty.
# TODO: Handle empty dataframes better
return
self._partitions = np.array(
[
[
self._partitions[i][j]
for j in range(len(self._partitions[i]))
if j < len(self._column_widths) and self._column_widths[j] != 0
]
for i in range(len(self._partitions))
if i < len(self._row_lengths) and self._row_lengths[i] != 0
]
)
self._column_widths_cache = [w for w in self._column_widths if w != 0]
self._row_lengths_cache = [r for r in self._row_lengths if r != 0]
def synchronize_labels(self, axis=None):
"""
Set the deferred axes variables for the ``PandasDataframe``.
Parameters
----------
axis : int, default: None
The deferred axis.
0 for the index, 1 for the columns.
"""
if axis is None:
self._deferred_index = True
self._deferred_column = True
elif axis == 0:
self._deferred_index = True
else:
self._deferred_column = True
def _propagate_index_objs(self, axis=None):
"""
Synchronize labels by applying the index object for specific `axis` to the `self._partitions` lazily.
Adds `set_axis` function to call-queue of each partition from `self._partitions`
to apply new axis.
Parameters
----------
axis : int, default: None
The axis to apply to. If it's None applies to both axes.
"""
self._filter_empties()
if axis is None or axis == 0:
cum_row_lengths = np.cumsum([0] + self._row_lengths)
if axis is None or axis == 1:
cum_col_widths = np.cumsum([0] + self._column_widths)
if axis is None:
def apply_idx_objs(df, idx, cols):
return df.set_axis(idx, axis="index", inplace=False).set_axis(
cols, axis="columns", inplace=False
)
self._partitions = np.array(
[
[
self._partitions[i][j].add_to_apply_calls(
apply_idx_objs,
idx=self.index[
slice(cum_row_lengths[i], cum_row_lengths[i + 1])
],
cols=self.columns[
slice(cum_col_widths[j], cum_col_widths[j + 1])
],
)
for j in range(len(self._partitions[i]))
]
for i in range(len(self._partitions))
]
)
self._deferred_index = False
self._deferred_column = False
elif axis == 0:
def apply_idx_objs(df, idx):
return df.set_axis(idx, axis="index", inplace=False)
self._partitions = np.array(
[
[
self._partitions[i][j].add_to_apply_calls(
apply_idx_objs,
idx=self.index[
slice(cum_row_lengths[i], cum_row_lengths[i + 1])
],
)
for j in range(len(self._partitions[i]))
]
for i in range(len(self._partitions))
]
)
self._deferred_index = False
elif axis == 1:
def apply_idx_objs(df, cols):
return df.set_axis(cols, axis="columns", inplace=False)
self._partitions = np.array(
[
[
self._partitions[i][j].add_to_apply_calls(
apply_idx_objs,
cols=self.columns[
slice(cum_col_widths[j], cum_col_widths[j + 1])
],
)
for j in range(len(self._partitions[i]))
]
for i in range(len(self._partitions))
]
)
self._deferred_column = False
else:
ErrorMessage.catch_bugs_and_request_email(
axis is not None and axis not in [0, 1]
)
@lazy_metadata_decorator(apply_axis=None)
def mask(
self,
row_labels: Optional[List[Hashable]] = None,
row_positions: Optional[List[int]] = None,
col_labels: Optional[List[Hashable]] = None,
col_positions: Optional[List[int]] = None,
) -> "PandasDataframe":
"""
Lazily select columns or rows from given indices.
Parameters
----------
row_labels : list of hashable, optional
The row labels to extract.
row_positions : list-like of ints, optional
The row positions to extract.
col_labels : list of hashable, optional
The column labels to extract.
col_positions : list-like of ints, optional
The column positions to extract.
Returns
-------
PandasDataframe
A new PandasDataframe from the mask provided.
Notes
-----
If both `row_labels` and `row_positions` are provided, a ValueError is raised.
The same rule applies for `col_labels` and `col_positions`.
"""
if check_both_not_none(row_labels, row_positions):
raise ValueError(
"Both row_labels and row_positions were provided - please provide only one of row_labels and row_positions."
)
if check_both_not_none(col_labels, col_positions):
raise ValueError(
"Both col_labels and col_positions were provided - please provide only one of col_labels and col_positions."
)
indexers = []
for axis, indexer in enumerate((row_positions, col_positions)):
if is_range_like(indexer):
if indexer.step == 1 and len(indexer) == len(self.axes[axis]):
# By this function semantics, `None` indexer is a full-axis access
indexer = None
elif indexer is not None and not isinstance(indexer, pandas.RangeIndex):
# Pure python's range is not fully compatible with a list of ints,
# converting it to ``pandas.RangeIndex``` that is compatible.
indexer = pandas.RangeIndex(
indexer.start, indexer.stop, indexer.step
)
else:
ErrorMessage.catch_bugs_and_request_email(
failure_condition=not (indexer is None or is_list_like(indexer)),
extra_log=f"Mask takes only list-like numeric indexers, received: {type(indexer)}",
)
indexers.append(indexer)
row_positions, col_positions = indexers
if (
col_labels is None
and col_positions is None
and row_labels is None
and row_positions is None
):
return self.copy()
# Get numpy array of positions of values from `row_labels`
if row_labels is not None:
row_positions = self.index.get_indexer_for(row_labels)
if row_positions is not None:
sorted_row_positions = (
row_positions
if (
(is_range_like(row_positions) and row_positions.step > 0)
# `np.sort` of empty list returns an array with `float` dtype,
# which doesn't work well as an indexer
or len(row_positions) == 0
)
else np.sort(row_positions)
)
# Get dict of row_parts as {row_index: row_internal_indices}
# TODO: Rename `row_partitions_list`->`row_partitions_dict`
row_partitions_list = self._get_dict_of_block_index(
0, sorted_row_positions, are_indices_sorted=True
)
new_row_lengths = [
len(
# Row lengths for slice are calculated as the length of the slice
# on the partition. Often this will be the same length as the current
# length, but sometimes it is different, thus the extra calculation.
range(*part_indexer.indices(self._row_lengths[part_idx]))
if isinstance(part_indexer, slice)
else part_indexer
)
for part_idx, part_indexer in row_partitions_list.items()
]
new_index = self.index[
# pandas Index is more likely to preserve its metadata if the indexer is slice
slice(row_positions.start, row_positions.stop, row_positions.step)
# TODO: Fast range processing of non-positive-step ranges is not yet supported
if is_range_like(row_positions) and row_positions.step > 0
else sorted_row_positions
]
else:
row_partitions_list = {
i: slice(None) for i in range(len(self._row_lengths))
}
new_row_lengths = self._row_lengths
new_index = self.index
# Get numpy array of positions of values from `col_labels`
if col_labels is not None:
col_positions = self.columns.get_indexer_for(col_labels)
if col_positions is not None:
sorted_col_positions = (
col_positions
if (
(is_range_like(col_positions) and col_positions.step > 0)
# `np.sort` of empty list returns an array with `float` dtype,
# which doesn't work well as an indexer
or len(col_positions) == 0
)
else np.sort(col_positions)
)
# Get dict of col_parts as {col_index: col_internal_indices}
col_partitions_list = self._get_dict_of_block_index(
1, sorted_col_positions, are_indices_sorted=True
)
new_col_widths = [
len(
# Column widths for slice are calculated as the length of the slice
# on the partition. Often this will be the same length as the current
# length, but sometimes it is different, thus the extra calculation.
range(*part_indexer.indices(self._column_widths[part_idx]))
if isinstance(part_indexer, slice)
else part_indexer
)
for part_idx, part_indexer in col_partitions_list.items()
]
# Use the slice to calculate the new columns
# TODO: Support fast processing of negative-step ranges
if is_range_like(col_positions) and col_positions.step > 0:
# pandas Index is more likely to preserve its metadata if the indexer is slice
monotonic_col_idx = slice(
col_positions.start, col_positions.stop, col_positions.step
)
else:
monotonic_col_idx = sorted_col_positions
new_columns = self.columns[monotonic_col_idx]
ErrorMessage.catch_bugs_and_request_email(
failure_condition=sum(new_col_widths) != len(new_columns),
extra_log=f"{sum(new_col_widths)} != {len(new_columns)}.\n{col_positions}\n{self._column_widths}\n{col_partitions_list}",
)
if self._dtypes is not None:
new_dtypes = self.dtypes.iloc[monotonic_col_idx]
else:
new_dtypes = None
else:
col_partitions_list = {
i: slice(None) for i in range(len(self._column_widths))
}
new_col_widths = self._column_widths
new_columns = self.columns
if self._dtypes is not None:
new_dtypes = self.dtypes
else:
new_dtypes = None
new_partitions = np.array(
[
[
self._partitions[row_idx][col_idx].mask(
row_internal_indices, col_internal_indices
)
for col_idx, col_internal_indices in col_partitions_list.items()
if isinstance(col_internal_indices, slice)
or len(col_internal_indices) > 0
]
for row_idx, row_internal_indices in row_partitions_list.items()
if isinstance(row_internal_indices, slice)
or len(row_internal_indices) > 0
]
)
intermediate = self.__constructor__(
new_partitions,
new_index,
new_columns,
new_row_lengths,
new_col_widths,
new_dtypes,
)
# Check if monotonically increasing, return if it is. Fast track code path for
# common case to keep it fast.
if (
row_positions is None
# Fast range processing of non-positive-step ranges is not yet supported
or (is_range_like(row_positions) and row_positions.step > 0)
or len(row_positions) == 1
or np.all(row_positions[1:] >= row_positions[:-1])
) and (
col_positions is None
# Fast range processing of non-positive-step ranges is not yet supported
or (is_range_like(col_positions) and col_positions.step > 0)
or len(col_positions) == 1
or np.all(col_positions[1:] >= col_positions[:-1])
):
return intermediate
# The new labels are often smaller than the old labels, so we can't reuse the
# original order values because those were mapped to the original data. We have
# to reorder here based on the expected order from within the data.
# We create a dictionary mapping the position of the numeric index with respect
# to all others, then recreate that order by mapping the new order values from
# the old. This information is sent to `_reorder_labels`.
if row_positions is not None:
row_order_mapping = dict(
zip(sorted_row_positions, range(len(row_positions)))
)
new_row_order = [row_order_mapping[idx] for idx in row_positions]
else:
new_row_order = None
if col_positions is not None:
col_order_mapping = dict(
zip(sorted_col_positions, range(len(col_positions)))
)
new_col_order = [col_order_mapping[idx] for idx in col_positions]
else:
new_col_order = None
return intermediate._reorder_labels(
row_positions=new_row_order, col_positions=new_col_order
)
@lazy_metadata_decorator(apply_axis="rows")
def from_labels(self) -> "PandasDataframe":
"""
Convert the row labels to a column of data, inserted at the first position.
Gives result by similar way as `pandas.DataFrame.reset_index`. Each level
of `self.index` will be added as separate column of data.
Returns
-------
PandasDataframe
A PandasDataframe with new columns from index labels.
"""
new_row_labels = pandas.RangeIndex(len(self.index))
if self.index.nlevels > 1:
level_names = [
self.index.names[i]
if self.index.names[i] is not None
else "level_{}".format(i)
for i in range(self.index.nlevels)
]
else:
level_names = [
self.index.names[0]
if self.index.names[0] is not None
else "index"
if "index" not in self.columns
else "level_{}".format(0)
]
# We will also use the `new_column_names` in the calculation of the internal metadata, so this is a
# lightweight way of ensuring the metadata matches.
if self.columns.nlevels > 1:
# Column labels are different for multilevel index.
new_column_names = pandas.MultiIndex.from_tuples(
# Set level names on the 1st columns level and fill up empty level names with empty string.
# Expand tuples in level names. This is how reset_index works when col_level col_fill are not specified.
[
tuple(
list(level) + [""] * (self.columns.nlevels - len(level))
if isinstance(level, tuple)
else [level] + [""] * (self.columns.nlevels - 1)
)
for level in level_names
],
names=self.columns.names,
)
else:
new_column_names = pandas.Index(level_names, tupleize_cols=False)
new_columns = new_column_names.append(self.columns)
def from_labels_executor(df, **kwargs):
# Setting the names here ensures that external and internal metadata always match.
df.index.names = new_column_names
# Handling of a case when columns have the same name as one of index levels names.
# In this case `df.reset_index` provides errors related to columns duplication.
# This case is possible because columns metadata updating is deferred. To workaround
# `df.reset_index` error we allow columns duplication in "if" branch via `concat`.
if any(name_level in df.columns for name_level in df.index.names):
columns_to_add = df.index.to_frame()
columns_to_add.reset_index(drop=True, inplace=True)
df = df.reset_index(drop=True)
result = pandas.concat([columns_to_add, df], axis=1, copy=False)
else:
result = df.reset_index()
# Put the index back to the original due to GH#4394
result.index = df.index
return result
new_parts = self._partition_mgr_cls.apply_func_to_select_indices(
0,
self._partitions,
from_labels_executor,
[0],
keep_remaining=True,
)
new_column_widths = [
self.index.nlevels + self._column_widths[0]
] + self._column_widths[1:]
result = self.__constructor__(
new_parts,
new_row_labels,
new_columns,
row_lengths=self._row_lengths_cache,
column_widths=new_column_widths,
)
# Set flag for propagating deferred row labels across dataframe partitions
result.synchronize_labels(axis=0)
return result
def to_labels(self, column_list: List[Hashable]) -> "PandasDataframe":
"""
Move one or more columns into the row labels. Previous labels are dropped.
Parameters
----------
column_list : list of hashable
The list of column names to place as the new row labels.
Returns
-------
PandasDataframe
A new PandasDataframe that has the updated labels.
"""
extracted_columns = self.mask(col_labels=column_list).to_pandas()
if len(column_list) == 1:
new_labels = pandas.Index(extracted_columns.squeeze(axis=1))
else:
new_labels = pandas.MultiIndex.from_frame(extracted_columns)
result = self.mask(col_labels=[i for i in self.columns if i not in column_list])
result.index = new_labels
return result
@lazy_metadata_decorator(apply_axis="both")
def _reorder_labels(self, row_positions=None, col_positions=None):
"""
Reorder the column and or rows in this DataFrame.
Parameters
----------
row_positions : list of int, optional
The ordered list of new row orders such that each position within the list
indicates the new position.
col_positions : list of int, optional
The ordered list of new column orders such that each position within the
list indicates the new position.
Returns
-------
PandasDataframe
A new PandasDataframe with reordered columns and/or rows.
"""
if row_positions is not None:
ordered_rows = self._partition_mgr_cls.map_axis_partitions(
0, self._partitions, lambda df: df.iloc[row_positions]
)
row_idx = self.index[row_positions]
else:
ordered_rows = self._partitions
row_idx = self.index
if col_positions is not None:
ordered_cols = self._partition_mgr_cls.map_axis_partitions(
1, ordered_rows, lambda df: df.iloc[:, col_positions]
)
col_idx = self.columns[col_positions]
else:
ordered_cols = ordered_rows
col_idx = self.columns
return self.__constructor__(ordered_cols, row_idx, col_idx)
@lazy_metadata_decorator(apply_axis=None)
def copy(self):
"""
Copy this object.
Returns
-------
PandasDataframe
A copied version of this object.
"""
return self.__constructor__(
self._partitions,
self.index.copy(),
self.columns.copy(),
self._row_lengths,
self._column_widths,
self._dtypes,
)
@classmethod
def combine_dtypes(cls, list_of_dtypes, column_names):
"""
Describe how data types should be combined when they do not match.
Parameters
----------
list_of_dtypes : list
A list of pandas Series with the data types.
column_names : list
The names of the columns that the data types map to.
Returns
-------
pandas.Series
A pandas Series containing the finalized data types.
"""
# Compute dtypes by getting collecting and combining all of the partitions. The
# reported dtypes from differing rows can be different based on the inference in
# the limited data seen by each worker. We use pandas to compute the exact dtype
# over the whole column for each column.
dtypes = (
pandas.concat(list_of_dtypes, axis=1)
.apply(lambda row: find_common_type(row.values), axis=1)
.squeeze(axis=0)
)
dtypes.index = column_names
return dtypes
@lazy_metadata_decorator(apply_axis="both")
def astype(self, col_dtypes):
"""
Convert the columns dtypes to given dtypes.
Parameters
----------
col_dtypes : dictionary of {col: dtype,...}
Where col is the column name and dtype is a NumPy dtype.
Returns
-------
BaseDataFrame
Dataframe with updated dtypes.
"""
columns = col_dtypes.keys()
# Create Series for the updated dtypes
new_dtypes = self.dtypes.copy()
for i, column in enumerate(columns):
dtype = col_dtypes[column]
if (
not isinstance(dtype, type(self.dtypes[column]))
or dtype != self.dtypes[column]
):
# Update the new dtype series to the proper pandas dtype
try:
new_dtype = np.dtype(dtype)
except TypeError:
new_dtype = dtype
if dtype != np.int32 and new_dtype == np.int32:
new_dtypes[column] = np.dtype("int64")
elif dtype != np.float32 and new_dtype == np.float32:
new_dtypes[column] = np.dtype("float64")
# We cannot infer without computing the dtype if
elif isinstance(new_dtype, str) and new_dtype == "category":
new_dtypes = None
break
else:
new_dtypes[column] = new_dtype
def astype_builder(df):
"""Compute new partition frame with dtypes updated."""
return df.astype({k: v for k, v in col_dtypes.items() if k in df})
new_frame = self._partition_mgr_cls.map_partitions(
self._partitions, astype_builder
)
return self.__constructor__(
new_frame,
self.index,
self.columns,
self._row_lengths,
self._column_widths,
new_dtypes,
)
# Metadata modification methods
def add_prefix(self, prefix, axis):
"""
Add a prefix to the current row or column labels.
Parameters
----------
prefix : str
The prefix to add.
axis : int
The axis to update.
Returns
-------
PandasDataframe
A new dataframe with the updated labels.
"""
def new_labels_mapper(x, prefix=str(prefix)):
return prefix + str(x)
if axis == 0:
return self.rename(new_row_labels=new_labels_mapper)
return self.rename(new_col_labels=new_labels_mapper)
def add_suffix(self, suffix, axis):
"""
Add a suffix to the current row or column labels.
Parameters
----------
suffix : str
The suffix to add.
axis : int
The axis to update.
Returns
-------
PandasDataframe
A new dataframe with the updated labels.
"""
def new_labels_mapper(x, suffix=str(suffix)):
return str(x) + suffix
if axis == 0:
return self.rename(new_row_labels=new_labels_mapper)
return self.rename(new_col_labels=new_labels_mapper)
# END Metadata modification methods
def numeric_columns(self, include_bool=True):
"""
Return the names of numeric columns in the frame.
Parameters
----------
include_bool : bool, default: True
Whether to consider boolean columns as numeric.
Returns
-------
list
List of column names.
"""
columns = []
for col, dtype in zip(self.columns, self.dtypes):
if is_numeric_dtype(dtype) and (
include_bool or (not include_bool and dtype != np.bool_)
):
columns.append(col)
return columns
def _get_dict_of_block_index(self, axis, indices, are_indices_sorted=False):
"""
Convert indices to an ordered dict mapping partition (or block) index to internal indices in said partition.
Parameters
----------
axis : {0, 1}
The axis along which to get the indices (0 - rows, 1 - columns).
indices : list of int, slice
A list of global indices to convert.
are_indices_sorted : bool, default: False
Flag indicating whether the `indices` sequence is sorted by ascending or not.
Note: the internal algorithm requires for the `indices` to be sorted, this
flag is used for optimization in order to not sort already sorted data.
Be careful when passing ``True`` for this flag, if the data appears to be unsorted
with the flag set to ``True`` this would lead to undefined behavior.
Returns
-------
OrderedDict
A mapping from partition index to list of internal indices which correspond to `indices` in each
partition.
"""
# TODO: Support handling of slices with specified 'step'. For now, converting them into a range
if isinstance(indices, slice) and (
indices.step is not None and indices.step != 1
):
indices = range(*indices.indices(len(self.axes[axis])))
# Fasttrack slices
if isinstance(indices, slice) or (is_range_like(indices) and indices.step == 1):
# Converting range-like indexer to slice
indices = slice(indices.start, indices.stop, indices.step)
if is_full_grab_slice(indices, sequence_len=len(self.axes[axis])):
return OrderedDict(
zip(
range(self._partitions.shape[axis]),
[slice(None)] * self._partitions.shape[axis],
)
)
# Empty selection case
if indices.start == indices.stop and indices.start is not None:
return OrderedDict()
if indices.start is None or indices.start == 0:
last_part, last_idx = list(
self._get_dict_of_block_index(axis, [indices.stop]).items()
)[0]
dict_of_slices = OrderedDict(
zip(range(last_part), [slice(None)] * last_part)
)
dict_of_slices.update({last_part: slice(last_idx[0])})
return dict_of_slices
elif indices.stop is None or indices.stop >= len(self.axes[axis]):
first_part, first_idx = list(
self._get_dict_of_block_index(axis, [indices.start]).items()
)[0]
dict_of_slices = OrderedDict({first_part: slice(first_idx[0], None)})
num_partitions = np.size(self._partitions, axis=axis)
part_list = range(first_part + 1, num_partitions)
dict_of_slices.update(
OrderedDict(zip(part_list, [slice(None)] * len(part_list)))
)
return dict_of_slices
else:
first_part, first_idx = list(
self._get_dict_of_block_index(axis, [indices.start]).items()
)[0]
last_part, last_idx = list(
self._get_dict_of_block_index(axis, [indices.stop]).items()
)[0]
if first_part == last_part:
return OrderedDict({first_part: slice(first_idx[0], last_idx[0])})
else:
if last_part - first_part == 1:
return OrderedDict(
# FIXME: this dictionary creation feels wrong - it might not maintain the order
{
first_part: slice(first_idx[0], None),
last_part: slice(None, last_idx[0]),
}
)
else:
dict_of_slices = OrderedDict(
{first_part: slice(first_idx[0], None)}
)
part_list = range(first_part + 1, last_part)
dict_of_slices.update(
OrderedDict(zip(part_list, [slice(None)] * len(part_list)))
)
dict_of_slices.update({last_part: slice(None, last_idx[0])})
return dict_of_slices
if isinstance(indices, list):
# Converting python list to numpy for faster processing
indices = np.array(indices, dtype=np.int64)
negative_mask = np.less(indices, 0)
has_negative = np.any(negative_mask)
if has_negative:
# We're going to modify 'indices' inplace in a numpy way, so doing a copy/converting indices to numpy.
indices = (
indices.copy()
if isinstance(indices, np.ndarray)
else np.array(indices, dtype=np.int64)
)
indices[negative_mask] = indices[negative_mask] % len(self.axes[axis])
# If the `indices` array was modified because of the negative indices conversion
# then the original order was broken and so we have to sort anyway:
if has_negative or not are_indices_sorted:
indices = np.sort(indices)
if axis == 0:
bins = np.array(self._row_lengths)
else:
bins = np.array(self._column_widths)
# INT_MAX to make sure we don't try to compute on partitions that don't exist.
cumulative = np.append(bins[:-1].cumsum(), np.iinfo(bins.dtype).max)
def internal(block_idx, global_index):
"""Transform global index to internal one for given block (identified by its index)."""
return (
global_index
if not block_idx
else np.subtract(
global_index, cumulative[min(block_idx, len(cumulative) - 1) - 1]
)
)
partition_ids = np.digitize(indices, cumulative)
count_for_each_partition = np.array(
[(partition_ids == i).sum() for i in range(len(cumulative))]
).cumsum()
# Compute the internal indices and pair those with the partition index.
# If the first partition has any values we need to return, compute those
# first to make the list comprehension easier. Otherwise, just append the
# rest of the values to an empty list.
if count_for_each_partition[0] > 0:
first_partition_indices = [
(0, internal(0, indices[slice(count_for_each_partition[0])]))
]
else:
first_partition_indices = []
partition_ids_with_indices = first_partition_indices + [
(
i,
internal(
i,
indices[
slice(
count_for_each_partition[i - 1],
count_for_each_partition[i],
)
],
),
)
for i in range(1, len(count_for_each_partition))
if count_for_each_partition[i] > count_for_each_partition[i - 1]
]
return OrderedDict(partition_ids_with_indices)
@staticmethod
def _join_index_objects(axis, indexes, how, sort):
"""
Join the pair of index objects (columns or rows) by a given strategy.
Unlike Index.join() in pandas, if `axis` is 1, `sort` is False,
and `how` is "outer", the result will _not_ be sorted.
Parameters
----------
axis : {0, 1}
The axis index object to join (0 - rows, 1 - columns).
indexes : list(Index)
The indexes to join on.
how : {'left', 'right', 'inner', 'outer', None}
The type of join to join to make. If `None` then joined index
considered to be the first index in the `indexes` list.
sort : boolean
Whether or not to sort the joined index.
Returns
-------
(Index, func)
Joined index with make_reindexer func.
"""
assert isinstance(indexes, list)
# define helper functions
def merge(left_index, right_index):
"""Combine a pair of indices depending on `axis`, `how` and `sort` from outside."""
if axis == 1 and how == "outer" and not sort:
return left_index.union(right_index, sort=False)
else:
return left_index.join(right_index, how=how, sort=sort)
# define condition for joining indexes
all_indices_equal = all(indexes[0].equals(index) for index in indexes[1:])
do_join_index = how is not None and not all_indices_equal
# define condition for joining indexes with getting indexers
need_indexers = (
axis == 0
and not all_indices_equal
and any(not index.is_unique for index in indexes)
)
indexers = None
# perform joining indexes
if do_join_index:
if len(indexes) == 2 and need_indexers:
# in case of count of indexes > 2 we should perform joining all indexes
# after that get indexers
# in the fast path we can obtain joined_index and indexers in one call
indexers = [None, None]
joined_index, indexers[0], indexers[1] = indexes[0].join(
indexes[1], how=how, sort=sort, return_indexers=True
)
else:
joined_index = indexes[0]
# TODO: revisit for performance
for index in indexes[1:]:
joined_index = merge(joined_index, index)
else:
joined_index = indexes[0].copy()
if need_indexers and indexers is None:
indexers = [index.get_indexer_for(joined_index) for index in indexes]
def make_reindexer(do_reindex: bool, frame_idx: int):
"""Create callback that reindexes the dataframe using newly computed index."""
# the order of the frames must match the order of the indexes
if not do_reindex:
return lambda df: df
if need_indexers:
assert indexers is not None
return lambda df: df._reindex_with_indexers(
{0: [joined_index, indexers[frame_idx]]},
copy=True,
allow_dups=True,
)
return lambda df: df.reindex(joined_index, axis=axis)
return joined_index, make_reindexer
# Internal methods
# These methods are for building the correct answer in a modular way.
# Please be careful when changing these!
def _build_treereduce_func(self, axis, func):
"""
Properly formats a TreeReduce result so that the partitioning is correct.
Parameters
----------
axis : int
The axis along which to apply the function.
func : callable
The function to apply.
Returns
-------
callable
A function to be shipped to the partitions to be executed.
Notes
-----
This should be used for any TreeReduce style operation that results in a
reduced data dimensionality (dataframe -> series).
"""
def _tree_reduce_func(df, *args, **kwargs):
"""Tree-reducer function itself executing `func`, presenting the resulting pandas.Series as pandas.DataFrame."""
series_result = func(df, *args, **kwargs)
if axis == 0 and isinstance(series_result, pandas.Series):
# In the case of axis=0, we need to keep the shape of the data
# consistent with what we have done. In the case of a reduce, the
# data for axis=0 should be a single value for each column. By
# transposing the data after we convert to a DataFrame, we ensure that
# the columns of the result line up with the columns from the data.
# axis=1 does not have this requirement because the index already will
# line up with the index of the data based on how pandas creates a
# DataFrame from a Series.
result = pandas.DataFrame(series_result).T
result.index = ["__reduced__"]
else:
result = pandas.DataFrame(series_result)
if isinstance(series_result, pandas.Series):
result.columns = ["__reduced__"]
return result
return _tree_reduce_func
def _compute_tree_reduce_metadata(self, axis, new_parts):
"""
Compute the metadata for the result of reduce function.
Parameters
----------
axis : int
The axis on which reduce function was applied.
new_parts : NumPy 2D array
Partitions with the result of applied function.
Returns
-------
PandasDataframe
Modin series (1xN frame) containing the reduced data.
"""
new_axes, new_axes_lengths = [0, 0], [0, 0]
new_axes[axis] = ["__reduced__"]
new_axes[axis ^ 1] = self.axes[axis ^ 1]
new_axes_lengths[axis] = [1]
new_axes_lengths[axis ^ 1] = self._axes_lengths[axis ^ 1]
new_dtypes = None
result = self.__constructor__(
new_parts,
*new_axes,
*new_axes_lengths,
new_dtypes,
)
return result
@lazy_metadata_decorator(apply_axis="both")
def reduce(
self,
axis: Union[int, Axis],
function: Callable,
dtypes: Optional[str] = None,
) -> "PandasDataframe":
"""
Perform a user-defined aggregation on the specified axis, where the axis reduces down to a singleton. Requires knowledge of the full axis for the reduction.
Parameters
----------
axis : int or modin.core.dataframe.base.utils.Axis
The axis to perform the reduce over.
function : callable(row|col) -> single value
The reduce function to apply to each column.
dtypes : str, optional
The data types for the result. This is an optimization
because there are functions that always result in a particular data
type, and this allows us to avoid (re)computing it.
Returns
-------
PandasDataframe
Modin series (1xN frame) containing the reduced data.
Notes
-----
The user-defined function must reduce to a single value.
"""
axis = Axis(axis)
function = self._build_treereduce_func(axis.value, function)
new_parts = self._partition_mgr_cls.map_axis_partitions(
axis.value, self._partitions, function
)
return self._compute_tree_reduce_metadata(axis.value, new_parts)
@lazy_metadata_decorator(apply_axis="opposite", axis_arg=0)
def tree_reduce(
self,
axis: Union[int, Axis],
map_func: Callable,
reduce_func: Optional[Callable] = None,
dtypes: Optional[str] = None,
) -> "PandasDataframe":
"""
Apply function that will reduce the data to a pandas Series.
Parameters
----------
axis : int or modin.core.dataframe.base.utils.Axis
The axis to perform the tree reduce over.
map_func : callable(row|col) -> row|col
Callable function to map the dataframe.
reduce_func : callable(row|col) -> single value, optional
Callable function to reduce the dataframe.
If none, then apply map_func twice.
dtypes : str, optional
The data types for the result. This is an optimization
because there are functions that always result in a particular data
type, and this allows us to avoid (re)computing it.
Returns
-------
PandasDataframe
A new dataframe.
"""
axis = Axis(axis)
map_func = self._build_treereduce_func(axis.value, map_func)
if reduce_func is None:
reduce_func = map_func
else:
reduce_func = self._build_treereduce_func(axis.value, reduce_func)
map_parts = self._partition_mgr_cls.map_partitions(self._partitions, map_func)
reduce_parts = self._partition_mgr_cls.map_axis_partitions(
axis.value, map_parts, reduce_func
)
return self._compute_tree_reduce_metadata(axis.value, reduce_parts)
@lazy_metadata_decorator(apply_axis=None)
def map(self, func: Callable, dtypes: Optional[str] = None) -> "PandasDataframe":
"""
Perform a function that maps across the entire dataset.
Parameters
----------
func : callable(row|col|cell) -> row|col|cell
The function to apply.
dtypes : dtypes of the result, optional
The data types for the result. This is an optimization
because there are functions that always result in a particular data
type, and this allows us to avoid (re)computing it.
Returns
-------
PandasDataframe
A new dataframe.
"""
new_partitions = self._partition_mgr_cls.map_partitions(self._partitions, func)
if dtypes == "copy":
dtypes = self._dtypes
elif dtypes is not None:
dtypes = pandas.Series(
[np.dtype(dtypes)] * len(self.columns), index=self.columns
)
return self.__constructor__(
new_partitions,
self.axes[0],
self.axes[1],
self._row_lengths_cache,
self._column_widths_cache,
dtypes=dtypes,
)
def window(
self,
axis: Union[int, Axis],
reduce_fn: Callable,
window_size: int,
result_schema: Optional[Dict[Hashable, type]] = None,
) -> "PandasDataframe":
"""
Apply a sliding window operator that acts as a GROUPBY on each window, and reduces down to a single row (column) per window.
Parameters
----------
axis : int or modin.core.dataframe.base.utils.Axis
The axis to slide over.
reduce_fn : callable(rowgroup|colgroup) -> row|col
The reduce function to apply over the data.
window_size : int
The number of row/columns to pass to the function.
(The size of the sliding window).
result_schema : dict, optional
Mapping from column labels to data types that represents the types of the output dataframe.
Returns
-------
PandasDataframe
A new PandasDataframe with the reduce function applied over windows of the specified
axis.
Notes
-----
The user-defined reduce function must reduce each window’s column
(row if axis=1) down to a single value.
"""
pass
@lazy_metadata_decorator(apply_axis="both")
def fold(self, axis, func):
"""
Perform a function across an entire axis.
Parameters
----------
axis : int
The axis to apply over.
func : callable
The function to apply.
Returns
-------
PandasDataframe
A new dataframe.
Notes
-----
The data shape is not changed (length and width of the table).
"""
new_partitions = self._partition_mgr_cls.map_axis_partitions(
axis, self._partitions, func, keep_partitioning=True
)
return self.__constructor__(
new_partitions,
self.index,
self.columns,
self._row_lengths,
self._column_widths,
)
def infer_types(self, columns_list: List[str]) -> "PandasDataframe":
"""
Determine the compatible type shared by all values in the specified columns, and coerce them to that type.
Parameters
----------
columns_list : list
List of column labels to infer and induce types over.
Returns
-------
PandasDataframe
A new PandasDataframe with the inferred schema.
"""
pass
def join(
self,
axis: Union[int, Axis],
condition: Callable,
other: ModinDataframe,
join_type: Union[str, JoinType],
) -> "PandasDataframe":
"""
Join this dataframe with the other.
Parameters
----------
axis : int or modin.core.dataframe.base.utils.Axis
The axis to perform the join on.
condition : callable
Function that determines which rows should be joined. The condition can be a
simple equality, e.g. "left.col1 == right.col1" or can be arbitrarily complex.
other : ModinDataframe
The other data to join with, i.e. the right dataframe.
join_type : string {"inner", "left", "right", "outer"} or modin.core.dataframe.base.utils.JoinType
The type of join to perform.
Returns
-------
PandasDataframe
A new PandasDataframe that is the result of applying the specified join over the two
dataframes.
Notes
-----
During the join, this dataframe is considered the left, while the other is
treated as the right.
Only inner joins, left outer, right outer, and full outer joins are currently supported.
Support for other join types (e.g. natural join) may be implemented in the future.
"""
pass
def rename(
self,
new_row_labels: Optional[Union[Dict[Hashable, Hashable], Callable]] = None,
new_col_labels: Optional[Union[Dict[Hashable, Hashable], Callable]] = None,
level: Optional[Union[int, List[int]]] = None,
) -> "PandasDataframe":
"""
Replace the row and column labels with the specified new labels.
Parameters
----------
new_row_labels : dictionary or callable, optional
Mapping or callable that relates old row labels to new labels.
new_col_labels : dictionary or callable, optional
Mapping or callable that relates old col labels to new labels.
level : int, optional
Level whose row labels to replace.
Returns
-------
PandasDataframe
A new PandasDataframe with the new row and column labels.
Notes
-----
If level is not specified, the default behavior is to replace row labels in all levels.
"""
new_index = self.index.copy()
def make_label_swapper(label_dict):
if isinstance(label_dict, dict):
return lambda label: label_dict.get(label, label)
return label_dict
def swap_labels_levels(index_tuple):
if isinstance(new_row_labels, dict):
return tuple(new_row_labels.get(label, label) for label in index_tuple)
return tuple(new_row_labels(label) for label in index_tuple)
if new_row_labels:
swap_row_labels = make_label_swapper(new_row_labels)
if isinstance(self.index, pandas.MultiIndex):
if level is not None:
new_index.set_levels(
new_index.levels[level].map(swap_row_labels), level
)
else:
new_index = new_index.map(swap_labels_levels)
else:
new_index = new_index.map(swap_row_labels)
new_cols = self.columns.copy()
if new_col_labels:
new_cols = new_cols.map(make_label_swapper(new_col_labels))
def map_fn(df):
return df.rename(index=new_row_labels, columns=new_col_labels, level=level)
new_parts = self._partition_mgr_cls.map_partitions(self._partitions, map_fn)
return self.__constructor__(
new_parts,
new_index,
new_cols,
self._row_lengths,
self._column_widths,
self._dtypes,
)
def sort_by(
self,
axis: Union[int, Axis],
columns: Union[str, List[str]],
ascending: bool = True,
) -> "PandasDataframe":
"""
Logically reorder rows (columns if axis=1) lexicographically by the data in a column or set of columns.
Parameters
----------
axis : int or modin.core.dataframe.base.utils.Axis
The axis to perform the sort over.
columns : string or list
Column label(s) to use to determine lexicographical ordering.
ascending : boolean, default: True
Whether to sort in ascending or descending order.
Returns
-------
PandasDataframe
A new PandasDataframe sorted into lexicographical order by the specified column(s).
"""
pass
@lazy_metadata_decorator(apply_axis="both")
def filter(self, axis: Union[Axis, int], condition: Callable) -> "PandasDataframe":
"""
Filter data based on the function provided along an entire axis.
Parameters
----------
axis : int or modin.core.dataframe.base.utils.Axis
The axis to filter over.
condition : callable(row|col) -> bool
The function to use for the filter. This function should filter the
data itself.
Returns
-------
PandasDataframe
A new filtered dataframe.
"""
axis = Axis(axis)
assert axis in (
Axis.ROW_WISE,
Axis.COL_WISE,
), "Axis argument to filter operator must be 0 (rows) or 1 (columns)"
new_partitions = self._partition_mgr_cls.map_axis_partitions(
axis.value, self._partitions, condition, keep_partitioning=True
)
new_axes, new_lengths = [0, 0], [0, 0]
new_axes[axis.value] = self.axes[axis.value]
new_axes[axis.value ^ 1] = self._compute_axis_labels(
axis.value ^ 1, new_partitions
)
new_lengths[axis.value] = self._axes_lengths[axis.value]
new_lengths[
axis.value ^ 1
] = None # We do not know what the resulting widths will be
return self.__constructor__(
new_partitions,
*new_axes,
*new_lengths,
self.dtypes if axis == 0 else None,
)
def filter_by_types(self, types: List[Hashable]) -> "PandasDataframe":
"""
Allow the user to specify a type or set of types by which to filter the columns.
Parameters
----------
types : list
The types to filter columns by.
Returns
-------
PandasDataframe
A new PandasDataframe from the filter provided.
"""
return self.mask(
col_positions=[i for i, dtype in enumerate(self.dtypes) if dtype in types]
)
@lazy_metadata_decorator(apply_axis="both")
def explode(self, axis: Union[int, Axis], func: Callable) -> "PandasDataframe":
"""
Explode list-like entries along an entire axis.
Parameters
----------
axis : int or modin.core.dataframe.base.utils.Axis
The axis specifying how to explode. If axis=1, explode according
to columns.
func : callable
The function to use to explode a single element.
Returns
-------
PandasFrame
A new filtered dataframe.
"""
axis = Axis(axis)
partitions = self._partition_mgr_cls.map_axis_partitions(
axis.value, self._partitions, func, keep_partitioning=True
)
if axis == Axis.COL_WISE:
new_index = self._compute_axis_labels(0, partitions)
new_columns = self.columns
else:
new_index = self.index
new_columns = self._compute_axis_labels(1, partitions)
return self.__constructor__(partitions, new_index, new_columns)
@lazy_metadata_decorator(apply_axis="both")
def apply_full_axis(
self,
axis,
func,
new_index=None,
new_columns=None,
dtypes=None,
):
"""
Perform a function across an entire axis.
Parameters
----------
axis : {0, 1}
The axis to apply over (0 - rows, 1 - columns).
func : callable
The function to apply.
new_index : list-like, optional
The index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns : list-like, optional
The columns of the result. We may know this in
advance, and if not provided it must be computed.
dtypes : list-like, optional
The data types of the result. This is an optimization
because there are functions that always result in a particular data
type, and allows us to avoid (re)computing it.
Returns
-------
PandasDataframe
A new dataframe.
Notes
-----
The data shape may change as a result of the function.
"""
return self.broadcast_apply_full_axis(
axis=axis,
func=func,
new_index=new_index,
new_columns=new_columns,
dtypes=dtypes,
other=None,
)
@lazy_metadata_decorator(apply_axis="both")
def apply_full_axis_select_indices(
self,
axis,
func,
apply_indices=None,
numeric_indices=None,
new_index=None,
new_columns=None,
keep_remaining=False,
):
"""
Apply a function across an entire axis for a subset of the data.
Parameters
----------
axis : int
The axis to apply over.
func : callable
The function to apply.
apply_indices : list-like, default: None
The labels to apply over.
numeric_indices : list-like, default: None
The indices to apply over.
new_index : list-like, optional
The index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns : list-like, optional
The columns of the result. We may know this in
advance, and if not provided it must be computed.
keep_remaining : boolean, default: False
Whether or not to drop the data that is not computed over.
Returns
-------
PandasDataframe
A new dataframe.
"""
assert apply_indices is not None or numeric_indices is not None
# Convert indices to numeric indices
old_index = self.index if axis else self.columns
if apply_indices is not None:
numeric_indices = old_index.get_indexer_for(apply_indices)
# Get the indices for the axis being applied to (it is the opposite of axis
# being applied over)
dict_indices = self._get_dict_of_block_index(axis ^ 1, numeric_indices)
new_partitions = (
self._partition_mgr_cls.apply_func_to_select_indices_along_full_axis(
axis,
self._partitions,
func,
dict_indices,
keep_remaining=keep_remaining,
)
)
# TODO Infer columns and index from `keep_remaining` and `apply_indices`
if new_index is None:
new_index = self.index if axis == 1 else None
if new_columns is None:
new_columns = self.columns if axis == 0 else None
return self.__constructor__(new_partitions, new_index, new_columns, None, None)
@lazy_metadata_decorator(apply_axis="both")
def apply_select_indices(
self,
axis,
func,
apply_indices=None,
row_labels=None,
col_labels=None,
new_index=None,
new_columns=None,
keep_remaining=False,
item_to_distribute=no_default,
):
"""
Apply a function for a subset of the data.
Parameters
----------
axis : {0, 1}
The axis to apply over.
func : callable
The function to apply.
apply_indices : list-like, default: None
The labels to apply over. Must be given if axis is provided.
row_labels : list-like, default: None
The row labels to apply over. Must be provided with
`col_labels` to apply over both axes.
col_labels : list-like, default: None
The column labels to apply over. Must be provided
with `row_labels` to apply over both axes.
new_index : list-like, optional
The index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns : list-like, optional
The columns of the result. We may know this in
advance, and if not provided it must be computed.
keep_remaining : boolean, default: False
Whether or not to drop the data that is not computed over.
item_to_distribute : np.ndarray or scalar, default: no_default
The item to split up so it can be applied over both axes.
Returns
-------
PandasDataframe
A new dataframe.
"""
# TODO Infer columns and index from `keep_remaining` and `apply_indices`
if new_index is None:
new_index = self.index if axis == 1 else None
if new_columns is None:
new_columns = self.columns if axis == 0 else None
if axis is not None:
assert apply_indices is not None
# Convert indices to numeric indices
old_index = self.index if axis else self.columns
numeric_indices = old_index.get_indexer_for(apply_indices)
# Get indices being applied to (opposite of indices being applied over)
dict_indices = self._get_dict_of_block_index(axis ^ 1, numeric_indices)
new_partitions = self._partition_mgr_cls.apply_func_to_select_indices(
axis,
self._partitions,
func,
dict_indices,
keep_remaining=keep_remaining,
)
# Length objects for new object creation. This is shorter than if..else
# This object determines the lengths and widths based on the given
# parameters and builds a dictionary used in the constructor below. 0 gives
# the row lengths and 1 gives the column widths. Since the dimension of
# `axis` given may have changed, we currently just recompute it.
# TODO Determine lengths from current lengths if `keep_remaining=False`
lengths_objs = {
axis: [len(apply_indices)]
if not keep_remaining
else [self._row_lengths, self._column_widths][axis],
axis ^ 1: [self._row_lengths, self._column_widths][axis ^ 1],
}
return self.__constructor__(
new_partitions, new_index, new_columns, lengths_objs[0], lengths_objs[1]
)
else:
# We are applying over both axes here, so make sure we have all the right
# variables set.
assert row_labels is not None and col_labels is not None
assert keep_remaining
assert item_to_distribute is not no_default
row_partitions_list = self._get_dict_of_block_index(0, row_labels).items()
col_partitions_list = self._get_dict_of_block_index(1, col_labels).items()
new_partitions = self._partition_mgr_cls.apply_func_to_indices_both_axis(
self._partitions,
func,
row_partitions_list,
col_partitions_list,
item_to_distribute,
# Passing caches instead of values in order to not trigger shapes recomputation
# if they are not used inside this function.
self._row_lengths_cache,
self._column_widths_cache,
)
return self.__constructor__(
new_partitions,
new_index,
new_columns,
self._row_lengths_cache,
self._column_widths_cache,
)
@lazy_metadata_decorator(apply_axis="both")
def broadcast_apply(
self, axis, func, other, join_type="left", preserve_labels=True, dtypes=None
):
"""
Broadcast axis partitions of `other` to partitions of `self` and apply a function.
Parameters
----------
axis : {0, 1}
Axis to broadcast over.
func : callable
Function to apply.
other : PandasDataframe
Modin DataFrame to broadcast.
join_type : str, default: "left"
Type of join to apply.
preserve_labels : bool, default: True
Whether keep labels from `self` Modin DataFrame or not.
dtypes : "copy" or None, default: None
Whether keep old dtypes or infer new dtypes from data.
Returns
-------
PandasDataframe
New Modin DataFrame.
"""
# Only sort the indices if they do not match
(
left_parts,
right_parts,
joined_index,
partition_sizes_along_axis,
) = self._copartition(
axis, other, join_type, sort=not self.axes[axis].equals(other.axes[axis])
)
# unwrap list returned by `copartition`.
right_parts = right_parts[0]
new_frame = self._partition_mgr_cls.broadcast_apply(
axis, func, left_parts, right_parts
)
if dtypes == "copy":
dtypes = self._dtypes
new_index = self.index
new_columns = self.columns
# Pass shape caches instead of values in order to not trigger shape
# computation.
new_row_lengths = self._row_lengths_cache
new_column_widths = self._column_widths_cache
if not preserve_labels:
if axis == 1:
new_columns = joined_index
new_column_widths = partition_sizes_along_axis
else:
new_index = joined_index
new_row_lengths = partition_sizes_along_axis
return self.__constructor__(
new_frame,
new_index,
new_columns,
new_row_lengths,
new_column_widths,
dtypes=dtypes,
)
def _prepare_frame_to_broadcast(self, axis, indices, broadcast_all):
"""
Compute the indices to broadcast `self` considering `indices`.
Parameters
----------
axis : {0, 1}
Axis to broadcast along.
indices : dict
Dict of indices and internal indices of partitions where `self` must
be broadcasted.
broadcast_all : bool
Whether broadcast the whole axis of `self` frame or just a subset of it.
Returns
-------
dict
Dictionary with indices of partitions to broadcast.
Notes
-----
New dictionary of indices of `self` partitions represents that
you want to broadcast `self` at specified another partition named `other`. For example,
Dictionary {key: {key1: [0, 1], key2: [5]}} means, that in `other`[key] you want to
broadcast [self[key1], self[key2]] partitions and internal indices for `self` must be [[0, 1], [5]]
"""
if broadcast_all:
sizes = self._row_lengths if axis else self._column_widths
return {key: dict(enumerate(sizes)) for key in indices.keys()}
passed_len = 0
result_dict = {}
for part_num, internal in indices.items():
result_dict[part_num] = self._get_dict_of_block_index(
axis ^ 1, np.arange(passed_len, passed_len + len(internal))
)
passed_len += len(internal)
return result_dict
@lazy_metadata_decorator(apply_axis="both")
def broadcast_apply_select_indices(
self,
axis,
func,
other,
apply_indices=None,
numeric_indices=None,
keep_remaining=False,
broadcast_all=True,
new_index=None,
new_columns=None,
):
"""
Apply a function to select indices at specified axis and broadcast partitions of `other` Modin DataFrame.
Parameters
----------
axis : {0, 1}
Axis to apply function along.
func : callable
Function to apply.
other : PandasDataframe
Partitions of which should be broadcasted.
apply_indices : list, default: None
List of labels to apply (if `numeric_indices` are not specified).
numeric_indices : list, default: None
Numeric indices to apply (if `apply_indices` are not specified).
keep_remaining : bool, default: False
Whether drop the data that is not computed over or not.
broadcast_all : bool, default: True
Whether broadcast the whole axis of right frame to every
partition or just a subset of it.
new_index : pandas.Index, optional
Index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns : pandas.Index, optional
Columns of the result. We may know this in advance,
and if not provided it must be computed.
Returns
-------
PandasDataframe
New Modin DataFrame.
"""
assert (
apply_indices is not None or numeric_indices is not None
), "Indices to apply must be specified!"
if other is None:
if apply_indices is None:
apply_indices = self.axes[axis][numeric_indices]
return self.apply_select_indices(
axis=axis,
func=func,
apply_indices=apply_indices,
keep_remaining=keep_remaining,
new_index=new_index,
new_columns=new_columns,
)
if numeric_indices is None:
old_index = self.index if axis else self.columns
numeric_indices = old_index.get_indexer_for(apply_indices)
dict_indices = self._get_dict_of_block_index(axis ^ 1, numeric_indices)
broadcasted_dict = other._prepare_frame_to_broadcast(
axis, dict_indices, broadcast_all=broadcast_all
)
new_partitions = self._partition_mgr_cls.broadcast_apply_select_indices(
axis,
func,
self._partitions,
other._partitions,
dict_indices,
broadcasted_dict,
keep_remaining,
)
new_axes = [
self._compute_axis_labels(i, new_partitions)
if new_axis is None
else new_axis
for i, new_axis in enumerate([new_index, new_columns])
]
return self.__constructor__(new_partitions, *new_axes)
@lazy_metadata_decorator(apply_axis="both")
def broadcast_apply_full_axis(
self,
axis,
func,
other,
new_index=None,
new_columns=None,
apply_indices=None,
enumerate_partitions=False,
dtypes=None,
):
"""
Broadcast partitions of `other` Modin DataFrame and apply a function along full axis.
Parameters
----------
axis : {0, 1}
Axis to apply over (0 - rows, 1 - columns).
func : callable
Function to apply.
other : PandasDataframe or list
Modin DataFrame(s) to broadcast.
new_index : list-like, optional
Index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns : list-like, optional
Columns of the result. We may know this in
advance, and if not provided it must be computed.
apply_indices : list-like, default: None
Indices of `axis ^ 1` to apply function over.
enumerate_partitions : bool, default: False
Whether pass partition index into applied `func` or not.
Note that `func` must be able to obtain `partition_idx` kwarg.
dtypes : list-like, default: None
Data types of the result. This is an optimization
because there are functions that always result in a particular data
type, and allows us to avoid (re)computing it.
Returns
-------
PandasDataframe
New Modin DataFrame.
"""
if other is not None:
if not isinstance(other, list):
other = [other]
other = [o._partitions for o in other] if len(other) else None
if apply_indices is not None:
numeric_indices = self.axes[axis ^ 1].get_indexer_for(apply_indices)
apply_indices = self._get_dict_of_block_index(
axis ^ 1, numeric_indices
).keys()
new_partitions = self._partition_mgr_cls.broadcast_axis_partitions(
axis=axis,
left=self._partitions,
right=other,
apply_func=self._build_treereduce_func(axis, func),
apply_indices=apply_indices,
enumerate_partitions=enumerate_partitions,
keep_partitioning=True,
)
# Index objects for new object creation. This is shorter than if..else
new_axes = [
self._compute_axis_labels(i, new_partitions)
if new_axis is None
else new_axis
for i, new_axis in enumerate([new_index, new_columns])
]
if dtypes == "copy":
dtypes = self._dtypes
elif dtypes is not None:
dtypes = pandas.Series(
[np.dtype(dtypes)] * len(new_axes[1]), index=new_axes[1]
)
result = self.__constructor__(
new_partitions,
*new_axes,
None,
None,
dtypes,
)
if new_index is not None:
result.synchronize_labels(axis=0)
if new_columns is not None:
result.synchronize_labels(axis=1)
return result
def _copartition(self, axis, other, how, sort, force_repartition=False):
"""
Copartition two Modin DataFrames.
Perform aligning of partitions, index and partition blocks.
Parameters
----------
axis : {0, 1}
Axis to copartition along (0 - rows, 1 - columns).
other : PandasDataframe
Other Modin DataFrame(s) to copartition against.
how : str
How to manage joining the index object ("left", "right", etc.).
sort : bool
Whether sort the joined index or not.
force_repartition : bool, default: False
Whether force the repartitioning or not. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns
-------
tuple
Tuple containing:
1) 2-d NumPy array of aligned left partitions
2) list of 2-d NumPy arrays of aligned right partitions
3) joined index along ``axis``
4) List with sizes of partitions along axis that partitioning
was done on. This list will be empty if and only if all
the frames are empty.
"""
if isinstance(other, type(self)):
other = [other]
self_index = self.axes[axis]
others_index = [o.axes[axis] for o in other]
joined_index, make_reindexer = self._join_index_objects(
axis, [self_index] + others_index, how, sort
)
frames = [self] + other
non_empty_frames_idx = [
i for i, o in enumerate(frames) if o._partitions.size != 0
]
# If all frames are empty
if len(non_empty_frames_idx) == 0:
return (
self._partitions,
[o._partitions for o in other],
joined_index,
# There are no partition sizes because the resulting dataframe
# has no partitions.
[],
)
base_frame_idx = non_empty_frames_idx[0]
other_frames = frames[base_frame_idx + 1 :]
# Picking first non-empty frame
base_frame = frames[non_empty_frames_idx[0]]
base_index = base_frame.axes[axis]
# define conditions for reindexing and repartitioning `self` frame
do_reindex_base = not base_index.equals(joined_index)
do_repartition_base = force_repartition or do_reindex_base
# Perform repartitioning and reindexing for `base_frame` if needed.
# Also define length of base and frames. We will need to know the
# lengths for alignment.
if do_repartition_base:
reindexed_base = base_frame._partition_mgr_cls.map_axis_partitions(
axis,
base_frame._partitions,
make_reindexer(do_reindex_base, base_frame_idx),
)
if axis:
base_lengths = [obj.width() for obj in reindexed_base[0]]
else:
base_lengths = [obj.length() for obj in reindexed_base.T[0]]
else:
reindexed_base = base_frame._partitions
base_lengths = self._column_widths if axis else self._row_lengths
others_lengths = [o._axes_lengths[axis] for o in other_frames]
# define conditions for reindexing and repartitioning `other` frames
do_reindex_others = [
not o.axes[axis].equals(joined_index) for o in other_frames
]
do_repartition_others = [None] * len(other_frames)
for i in range(len(other_frames)):
do_repartition_others[i] = (
force_repartition
or do_reindex_others[i]
or others_lengths[i] != base_lengths
)
# perform repartitioning and reindexing for `other_frames` if needed
reindexed_other_list = [None] * len(other_frames)
for i in range(len(other_frames)):
if do_repartition_others[i]:
# indices of others frame start from `base_frame_idx` + 1
reindexed_other_list[i] = other_frames[
i
]._partition_mgr_cls.map_axis_partitions(
axis,
other_frames[i]._partitions,
make_reindexer(do_repartition_others[i], base_frame_idx + 1 + i),
lengths=base_lengths,
)
else:
reindexed_other_list[i] = other_frames[i]._partitions
reindexed_frames = (
[frames[i]._partitions for i in range(base_frame_idx)]
+ [reindexed_base]
+ reindexed_other_list
)
return (reindexed_frames[0], reindexed_frames[1:], joined_index, base_lengths)
@lazy_metadata_decorator(apply_axis="both")
def binary_op(self, op, right_frame, join_type="outer"):
"""
Perform an operation that requires joining with another Modin DataFrame.
Parameters
----------
op : callable
Function to apply after the join.
right_frame : PandasDataframe
Modin DataFrame to join with.
join_type : str, default: "outer"
Type of join to apply.
Returns
-------
PandasDataframe
New Modin DataFrame.
"""
left_parts, right_parts, joined_index, row_lengths = self._copartition(
0, right_frame, join_type, sort=True
)
# unwrap list returned by `copartition`.
right_parts = right_parts[0]
new_frame = self._partition_mgr_cls.binary_operation(
1, left_parts, lambda l, r: op(l, r), right_parts
)
new_columns = self.columns.join(right_frame.columns, how=join_type)
return self.__constructor__(
new_frame,
joined_index,
new_columns,
row_lengths,
column_widths=self._column_widths_cache,
)
@lazy_metadata_decorator(apply_axis="both")
def concat(
self,
axis: Union[int, Axis],
others: Union["PandasDataframe", List["PandasDataframe"]],
how,
sort,
) -> "PandasDataframe":
"""
Concatenate `self` with one or more other Modin DataFrames.
Parameters
----------
axis : int or modin.core.dataframe.base.utils.Axis
Axis to concatenate over.
others : list
List of Modin DataFrames to concatenate with.
how : str
Type of join to use for the axis.
sort : bool
Whether sort the result or not.
Returns
-------
PandasDataframe
New Modin DataFrame.
"""
axis = Axis(axis)
# Fast path for equivalent columns and partitioning
if (
axis == Axis.ROW_WISE
and all(o.columns.equals(self.columns) for o in others)
and all(o._column_widths == self._column_widths for o in others)
):
joined_index = self.columns
left_parts = self._partitions
right_parts = [o._partitions for o in others]
new_widths = self._column_widths_cache
elif (
axis == Axis.COL_WISE
and all(o.index.equals(self.index) for o in others)
and all(o._row_lengths == self._row_lengths for o in others)
):
joined_index = self.index
left_parts = self._partitions
right_parts = [o._partitions for o in others]
new_lengths = self._row_lengths_cache
else:
(
left_parts,
right_parts,
joined_index,
partition_sizes_along_axis,
) = self._copartition(
axis.value ^ 1, others, how, sort, force_repartition=False
)
if axis == Axis.COL_WISE:
new_lengths = partition_sizes_along_axis
else:
new_widths = partition_sizes_along_axis
new_partitions = self._partition_mgr_cls.concat(
axis.value, left_parts, right_parts
)
if axis == Axis.ROW_WISE:
new_index = self.index.append([other.index for other in others])
new_columns = joined_index
# TODO: Can optimize by combining if all dtypes are materialized
new_dtypes = None
# If we have already cached the length of each row in at least one
# of the row's partitions, we can build new_lengths for the new
# frame. Typically, if we know the length for any partition in a
# row, we know the length for the first partition in the row. So
# just check the lengths of the first column of partitions.
new_lengths = []
if new_partitions.size > 0:
for part in new_partitions.T[0]:
if part._length_cache is not None:
new_lengths.append(part.length())
else:
new_lengths = None
break
else:
new_columns = self.columns.append([other.columns for other in others])
new_index = joined_index
if self._dtypes is not None and all(o._dtypes is not None for o in others):
new_dtypes = self.dtypes.append([o.dtypes for o in others])
else:
new_dtypes = None
# If we have already cached the width of each column in at least one
# of the column's partitions, we can build new_widths for the new
# frame. Typically, if we know the width for any partition in a
# column, we know the width for the first partition in the column.
# So just check the widths of the first row of partitions.
new_widths = []
if new_partitions.size > 0:
for part in new_partitions[0]:
if part._width_cache is not None:
new_widths.append(part.width())
else:
new_widths = None
break
return self.__constructor__(
new_partitions, new_index, new_columns, new_lengths, new_widths, new_dtypes
)
def groupby(
self,
axis: Union[int, Axis],
by: Union[str, List[str]],
operator: Callable,
result_schema: Optional[Dict[Hashable, type]] = None,
) -> "PandasDataframe":
"""
Generate groups based on values in the input column(s) and perform the specified operation on each.
Parameters
----------
axis : int or modin.core.dataframe.base.utils.Axis
The axis to apply the grouping over.
by : string or list of strings
One or more column labels to use for grouping.
operator : callable
The operation to carry out on each of the groups. The operator is another
algebraic operator with its own user-defined function parameter, depending
on the output desired by the user.
result_schema : dict, optional
Mapping from column labels to data types that represents the types of the output dataframe.
Returns
-------
PandasDataframe
A new PandasDataframe containing the groupings specified, with the operator
applied to each group.
Notes
-----
No communication between groups is allowed in this algebra implementation.
The number of rows (columns if axis=1) returned by the user-defined function
passed to the groupby may be at most the number of rows in the group, and
may be as small as a single row.
Unlike the pandas API, an intermediate “GROUP BY” object is not present in this
algebra implementation.
"""
pass
@lazy_metadata_decorator(apply_axis="opposite", axis_arg=0)
def groupby_reduce(
self,
axis,
by,
map_func,
reduce_func,
new_index=None,
new_columns=None,
apply_indices=None,
):
"""
Groupby another Modin DataFrame dataframe and aggregate the result.
Parameters
----------
axis : {0, 1}
Axis to groupby and aggregate over.
by : PandasDataframe or None
A Modin DataFrame to group by.
map_func : callable
Map component of the aggregation.
reduce_func : callable
Reduce component of the aggregation.
new_index : pandas.Index, optional
Index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns : pandas.Index, optional
Columns of the result. We may know this in advance,
and if not provided it must be computed.
apply_indices : list-like, default: None
Indices of `axis ^ 1` to apply groupby over.
Returns
-------
PandasDataframe
New Modin DataFrame.
"""
by_parts = by if by is None else by._partitions
if by is None:
self._propagate_index_objs(axis=0)
if apply_indices is not None:
numeric_indices = self.axes[axis ^ 1].get_indexer_for(apply_indices)
apply_indices = list(
self._get_dict_of_block_index(axis ^ 1, numeric_indices).keys()
)
new_partitions = self._partition_mgr_cls.groupby_reduce(
axis, self._partitions, by_parts, map_func, reduce_func, apply_indices
)
new_axes = [
self._compute_axis_labels(i, new_partitions)
if new_axis is None
else new_axis
for i, new_axis in enumerate([new_index, new_columns])
]
return self.__constructor__(new_partitions, *new_axes)
@classmethod
def from_pandas(cls, df):
"""
Create a Modin DataFrame from a pandas DataFrame.
Parameters
----------
df : pandas.DataFrame
A pandas DataFrame.
Returns
-------
PandasDataframe
New Modin DataFrame.
"""
new_index = df.index
new_columns = df.columns
new_dtypes = df.dtypes
new_frame, new_lengths, new_widths = cls._partition_mgr_cls.from_pandas(
df, True
)
return cls(
new_frame,
new_index,
new_columns,
new_lengths,
new_widths,
dtypes=new_dtypes,
)
@classmethod
def from_arrow(cls, at):
"""
Create a Modin DataFrame from an Arrow Table.
Parameters
----------
at : pyarrow.table
Arrow Table.
Returns
-------
PandasDataframe
New Modin DataFrame.
"""
new_frame, new_lengths, new_widths = cls._partition_mgr_cls.from_arrow(
at, return_dims=True
)
new_columns = Index.__new__(Index, data=at.column_names, dtype="O")
new_index = Index.__new__(RangeIndex, data=range(at.num_rows))
new_dtypes = pandas.Series(
[cls._arrow_type_to_dtype(col.type) for col in at.columns],
index=at.column_names,
)
return cls(
partitions=new_frame,
index=new_index,
columns=new_columns,
row_lengths=new_lengths,
column_widths=new_widths,
dtypes=new_dtypes,
)
@classmethod
def _arrow_type_to_dtype(cls, arrow_type):
"""
Convert an arrow data type to a pandas data type.
Parameters
----------
arrow_type : arrow dtype
Arrow data type to be converted to a pandas data type.
Returns
-------
object
Any dtype compatible with pandas.
"""
import pyarrow
try:
res = arrow_type.to_pandas_dtype()
# Conversion to pandas is not implemented for some arrow types,
# perform manual conversion for them:
except NotImplementedError:
if pyarrow.types.is_time(arrow_type):
res = np.dtype(datetime.time)
else:
raise
if not isinstance(res, (np.dtype, str)):
return np.dtype(res)
return res
@lazy_metadata_decorator(apply_axis="both")
def to_pandas(self):
"""
Convert this Modin DataFrame to a pandas DataFrame.
Returns
-------
pandas.DataFrame
"""
df = self._partition_mgr_cls.to_pandas(self._partitions)
if df.empty:
df = pandas.DataFrame(columns=self.columns, index=self.index)
else:
for axis in [0, 1]:
ErrorMessage.catch_bugs_and_request_email(
not df.axes[axis].equals(self.axes[axis]),
f"Internal and external indices on axis {axis} do not match.",
)
df.index = self.index
df.columns = self.columns
return df
def to_numpy(self, **kwargs):
"""
Convert this Modin DataFrame to a NumPy array.
Parameters
----------
**kwargs : dict
Additional keyword arguments to be passed in `to_numpy`.
Returns
-------
np.ndarray
"""
return self._partition_mgr_cls.to_numpy(self._partitions, **kwargs)
@lazy_metadata_decorator(apply_axis=None, transpose=True)
def transpose(self):
"""
Transpose the index and columns of this Modin DataFrame.
Reflect this Modin DataFrame over its main diagonal
by writing rows as columns and vice-versa.
Returns
-------
PandasDataframe
New Modin DataFrame.
"""
new_partitions = self._partition_mgr_cls.lazy_map_partitions(
self._partitions, lambda df: df.T
).T
if self._dtypes is not None:
new_dtypes = pandas.Series(
np.full(len(self.index), find_common_type(self.dtypes.values)),
index=self.index,
)
else:
new_dtypes = None
return self.__constructor__(
new_partitions,
self.columns,
self.index,
self._column_widths,
self._row_lengths,
dtypes=new_dtypes,
)
def finalize(self):
"""
Perform all deferred calls on partitions.
This makes `self` Modin Dataframe independent of a history of queries
that were used to build it.
"""
self._partition_mgr_cls.finalize(self._partitions)
def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True):
"""
Get a Modin DataFrame that implements the dataframe exchange protocol.
See more about the protocol in https://data-apis.org/dataframe-protocol/latest/index.html.
Parameters
----------
nan_as_null : bool, default: False
A keyword intended for the consumer to tell the producer
to overwrite null values in the data with ``NaN`` (or ``NaT``).
This currently has no effect; once support for nullable extension
dtypes is added, this value should be propagated to columns.
allow_copy : bool, default: True
A keyword that defines whether or not the library is allowed
to make a copy of the data. For example, copying data would be necessary
if a library supports strided buffers, given that this protocol
specifies contiguous buffers. Currently, if the flag is set to ``False``
and a copy is needed, a ``RuntimeError`` will be raised.
Returns
-------
ProtocolDataframe
A dataframe object following the dataframe protocol specification.
"""
from modin.core.dataframe.pandas.exchange.dataframe_protocol.dataframe import (
PandasProtocolDataframe,
)
return PandasProtocolDataframe(
self, nan_as_null=nan_as_null, allow_copy=allow_copy
)
@classmethod
def from_dataframe(cls, df: "ProtocolDataframe") -> "PandasDataframe":
"""
Convert a DataFrame implementing the dataframe exchange protocol to a Core Modin Dataframe.
See more about the protocol in https://data-apis.org/dataframe-protocol/latest/index.html.
Parameters
----------
df : ProtocolDataframe
The DataFrame object supporting the dataframe exchange protocol.
Returns
-------
PandasDataframe
A new Core Modin Dataframe object.
"""
if type(df) == cls:
return df
if not hasattr(df, "__dataframe__"):
raise ValueError(
"`df` does not support DataFrame exchange protocol, i.e. `__dataframe__` method"
)
from modin.core.dataframe.pandas.exchange.dataframe_protocol.from_dataframe import (
from_dataframe_to_pandas,
)
ErrorMessage.default_to_pandas(message="`from_dataframe`")
pandas_df = from_dataframe_to_pandas(df)
return cls.from_pandas(pandas_df)
|
#!/usr/bin/env python
import pygame, sys, os, time, random
from pygame.locals import *
# optional
if not pygame.font: print 'Warning, no fonts'
if not pygame.mixer: print 'Warning, no sound'
pygame.init()
def loadImage(filename):
return pygame.image.load(os.path.join(filename))
paddle_surface = loadImage("paddle.bmp")
class Paddle:
def __init__(self, x = 0, y = 0):
self.x = x
self.y = y
self.xv = 0
self.yv = 0
self.score = 0
def setPosition(self, x = 0, y = 0):
self.x = x
self.y = y
def setSurface(self, surf):
self.surface = surf
def draw(self, screen):
screen.blit(self.surface, (self.x, self.y))
p1 = Paddle()
p2 = Paddle()
p3 = Paddle()
p4 = Paddle()
ball = Paddle()
players = [ball, p1, p2, p3, p4]
width = 760#must divide by 20
height = 760
ballsize = 10
paddlespeed = ballsize
ballspeed = paddlespeed/2
clock = pygame.time.Clock()
def startgame():
global p1, p2, p3, p4, ball
p1.setPosition(0, screen.get_height() - ballsize)
p2.setPosition(screen.get_width() - ballsize, 0)
p3.setPosition(screen.get_width() - ballsize, 0)
p4.setPosition(0, screen.get_height() - ballsize)
ball.setPosition(screen.get_width()/2, screen.get_height()/2)
random.seed()
r1 = random.randint(0, 1)
r2 = random.randint(0, 1)
if r1 == 0:
ball.yv = 0
if r2 == 0:
ball.xv = -ballspeed
else:
ball.xv = ballspeed
else:
ball.xv = 0
if r2 == 0:
ball.yv = -ballspeed
else:
ball.yv = ballspeed
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('Ultimate Pong')
pygame.mouse.set_visible(0)
font = pygame.font.Font(None, 36)
t_ping = font.render("PING", 1, (90, 90, 90))
t_pong = font.render("PONG", 1, (90, 90, 90))
lose_render = font.render("LOSE", 1, (90, 90, 90))
textpos = t_pong.get_rect(centerx = screen.get_width()/2, centery =screen.get_height()/2)
pong = True
startgame()
for player in [ p1, p2, p3, p4 ]:
player.setSurface(paddle_surface)
ball.setSurface(paddle_surface)
def readinput(events):
global p1, p2
for event in events:
if event.type == QUIT:
sys.exit()
if event.type == KEYDOWN:
if event.key == K_RIGHT:
p1.xv = paddlespeed
elif event.key == K_LEFT:
p1.xv = -paddlespeed
elif event.key == K_DOWN:
p1.yv = paddlespeed
elif event.key == K_UP:
p1.yv = -paddlespeed
elif event.key == K_s:
p2.yv = paddlespeed
elif event.key == K_w:
p2.yv = -paddlespeed
elif event.key == K_d:
p2.xv = paddlespeed
elif event.key == K_a:
p2.xv = -paddlespeed
elif event.key == K_h:
p3.xv = paddlespeed
elif event.key == K_f:
p3.xv = -paddlespeed
elif event.key == K_g:
p3.yv = paddlespeed
elif event.key == K_t:
p3.yv = -paddlespeed
elif event.key == K_l:
p4.xv = paddlespeed
elif event.key == K_j:
p4.xv = -paddlespeed
elif event.key == K_k:
p4.yv = paddlespeed
elif event.key == K_i:
p4.yv = -paddlespeed
elif event.type == KEYUP:
if event.key == K_RIGHT:
p1.xv = 0
elif event.key == K_LEFT:
p1.xv = 0
elif event.key == K_DOWN:
p1.yv = 0
elif event.key == K_UP:
p1.yv = 0
elif event.key == K_s:
p2.yv = 0
elif event.key == K_w:
p2.yv = 0
elif event.key == K_d:
p2.xv = 0
elif event.key == K_a:
p2.xv = 0
elif event.key == K_h:
p3.xv = 0
elif event.key == K_f:
p3.xv = 0
elif event.key == K_g:
p3.yv = 0
elif event.key == K_t:
p3.yv = 0
elif event.key == K_l:
p4.xv = 0
elif event.key == K_j:
p4.xv = 0
elif event.key == K_k:
p4.yv = 0
elif event.key == K_i:
p4.yv = 0
def hit():
global pong
if pong == True:
pong = False
else:
pong = True
def collisions():
if ball.x == p1.x + ballsize:
if p1.y == ball.y:
hit()
if ball.xv != 0:
ball.xv = -ball.xv
ball.yv = 0
else:
ball.xv = ballspeed
ball.yv = 0
if ball.x == p2.x - ballsize:
if p2.y == ball.y:
hit()
if ball.xv != 0:
ball.xv = -ball.xv
ball.yv = 0
else:
ball.yv = 0
ball.xv = -ballspeed
if ball.y == p3.y + ballsize:
if p3.x == ball.x:
hit()
if ball.yv != 0:
ball.yv = -ball.yv
ball.xv = 0
else:
ball.yv = ballspeed
ball.xv = 0
if ball.y == p4.y - ballsize:
if p4.x == ball.x:
hit()
if ball.yv != 0:
ball.yv = -ball.yv
ball.xv = 0
else:
ball.xv = 0
ball.yv = -ballspeed
def gameover():
#caluclate scores
if ball.x <= -ballsize:
p2.score +=1
elif ball.x >= screen.get_width():
p1.score +=1
elif ball.y <= -ballsize:
p4.score +=1
else:
p3.score +=1
#display scores
tmp=0
screen.fill((0, 0, 0))
for i in range(1,5):
player_score = font.render("player %d: %d" % (i, players[i].score), 1, (100, 100, 100))
screen.blit(player_score, (screen.get_width()/2 + 40, screen.get_height()/2 + 40 + tmp))
tmp += 40
screen.blit(lose_render, textpos)
pygame.display.flip()
while (True):
clock.tick(20)
if ball.x <= -ballsize or ball.x >= screen.get_width() or ball.y <=-ballsize or ball.y >=screen.get_height(): #ie game over
gameover()
time.sleep(3)
startgame()
continue
else:
screen.fill((0, 0, 0))
readinput(pygame.event.get())
if pong:
screen.blit(t_pong, textpos)
else:
screen.blit(t_ping, textpos)
for player in [p1, p2, p3, p4, ball]:
player.x += player.xv
player.y += player.yv
player.draw(screen)
collisions()
pygame.display.flip()
|
# General imports
import argparse
# Data imports
from aides_dataset import AidesDataset
# Models imports
from bertopic import BERTopic # BERTopic for topic modeling
from transformers import pipeline # We will load XNLI for zeroshot classification
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-bertopic_path", type=str, required=True,
help="Path to file containing BERTopic model.")
parser.add_argument("-n_words_per_topic", type=int, required=False, default=5,
help="Number of word to define a topic. Uses the most frequents from the topics.")
parser.add_argument("-zeroshot_model", type=str, required=False,
default="BaptisteDoyen/camembert-base-xnli",
help="HuggingFace model name to perform zero-shot classification with.")
parser.add_argument("-aides_path", type=str, required=True,
help="Path to file containing MT aides dataset.")
parser.add_argument("-results_path", type=str, required=True,
help="Path to file to save results to.")
# parser.add_argument("-dataset", type=str, default='mt',
# help='dataset selection. at=aide territoire aides. mt=mission transition aides')
# parser.add_argument("-num_samples", type=int,
# help='limit number of samples for debugging.')
# parser.add_argument("-thr", type=float, default=0.5,
# help='limit number of samples for debugging.')
# parser.add_argument("-save_path", type=str,
# help='save path for models. in that case, only postprocess the results.')
args = parser.parse_args()
# Load BERTopic model
# topic_model = BERTopic(language='French')
# topic_model.load(args.bertopic_path)
print(f"Loading BERTopic model from {args.bertopic_path}.")
topic_model = BERTopic.load(args.bertopic_path)
all_topics = topic_model.get_topics()
n_topics = len(all_topics)
# Define labels:
# For each topic extracted by bertopic, we create a string containing all
# the most frequent words for this topic, giving us a label.
print(f"Computing {args.n_words_per_topic} most frequent words per topic.")
def get_most_frequent_words(topic):
# Note: topic is a list of couples word*frequence.
# Sort by frequence
topic.sort(key=lambda x:x[1])
# Keep N most frequents
topic = topic[:args.n_words_per_topic]
# Remove frequences
topic = [word for word,_ in topic]
# Join with comas
topic = ", ".join(topic)
# Return
return topic
all_topics = [get_most_frequent_words(topic) for topic in all_topics.values()]
# Load 0-shot classifier
print(f"Loading classifier {args.zeroshot_model}.")
classifier = pipeline("zero-shot-classification", model=args.zeroshot_model)
# Load data
print(f"Loading dataset from {args.aides_path}.")
aides_dataset = AidesDataset(args.aides_path)
# Pre-process data
print("Pre-processing data")
aides_dataset.filter_features(["name", "description"])
aides_dataset.clean_text_features(["description"],
no_html_tags=True,
no_escaped_characters=True,
no_punctuation=False,
no_upper_case=False,
no_stopwords=False)
docs = [f'Titre : {aide["name"]}\nDescription : {aide["description"]}' for aide in aides_dataset.aides]
# Classify
print(f"Classifying aides & writing results in {args.results_path}.")
with open(args.results_path, "w") as file:
for doc in docs:
result = classifier(doc, all_topics)
predicted_topic = result["labels"][0]
topic_proba = result["scores"][0]
file.write("--------------------------------------------------------------------------------\n")
file.write(f"--> Topic: {predicted_topic} (with probability {topic_proba})\n")
file.write(f"--> Aide text:\n{doc}\n")
|
def isCaseInsensitivePalindrome(inputString):
'''
Given a string, check if it can become a palindrome through a case change of some (possibly, none) letters.
'''
lowerCase = inputString.lower()
return lowerCase == lowerCase[::-1]
print(isCaseInsensitivePalindrome("AaBaa"))
print(isCaseInsensitivePalindrome("aabbc")) |
# -*- coding: utf-8 -*-
"""
DATS_6501: Capstone
Spring 2020
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import Toolbox as tls
#Import data
df2018 = tls.dataClean('psam_', 2018)
df2018 = tls.additionalClean(df2018)
X = df2018.iloc[:,:-1]
catCols = X.columns[:10]
numCols = X.columns[10:]
cramersV = tls.cramersV_matrix(catCols, X)
plt.figure()
sns.heatmap(cramersV, annot=True, fmt = '.2f', cmap = 'BuGn_r', \
xticklabels=catCols, yticklabels=catCols)
plt.title('Cramer\'s V Association Matrix')
plt.xticks(rotation = 45)
plt.tight_layout()
#plt.savefig('2018CramersHeatmap.png')
plt.close()
corrMatrix = tls.corr_matrix(numCols, X)
plt.figure()
sns.heatmap(corrMatrix, annot=True, fmt = '.2f', cmap = 'BuGn_r', \
xticklabels=numCols, yticklabels=numCols)
plt.title('Correlation Matrix')
plt.xticks(rotation = 45)
plt.tight_layout()
#plt.savefig('2018CorrelationHeatmap.png')
plt.close()
#based on evaluation of violin plots of categorical vs numerical variables, the
# column MV was dropped due to strong relationship with Age
df2018 = df2018.drop(columns='MV')
#Produce the final sampling method and estimator
estimators = tls.getPipeline(df2018)
pipe = estimators[0][2]
samplers = tls.samplingTest(df2018, pipe)
sampling = samplers[0][2]
#Import and clean data from additional years
df2012 = tls.dataClean('ss12', 2012)
df2012 = tls.additionalClean(df2012)
df2012 = df2012.drop(columns='MV')
df2007 = tls.dataClean('ss07', 2007)
df2007 = tls.additionalClean(df2007)
df2007 = df2007.drop(columns='MV')
#Fit the final model and generate visuals of the performance metrics
# and feature importances
tls.finalForm(df2018, 2018, pipe, sampling)
tls.finalForm(df2012, 2012, pipe, sampling)
tls.finalForm(df2007, 2007, pipe, sampling)
|
def fib_num(n):
f_1 = 0
f_2 = 1
f_3 = 1
even_fib = []
while f_3 <= n:
if f_3 % 2 == 0:
even_fib.append(f_3)
f_1 = f_2
f_2 = f_3
f_3 = f_1 + f_2
print(even_fib)
return sum(even_fib) |
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
x = int(input())
if x >= 0:
print(x)
else:
print(0)
|
from collections import namedtuple
import pandas as pd
import numpy as np
from etfl.io.json import load_json_model
from etfl.optim.config import standard_solver_config, growth_uptake_config
from etfl.optim.variables import GrowthActivation, BinaryActivator, \
mRNAVariable, EnzymeVariable
from time import time
from copy import copy
from etfl.optim.utils import fix_growth, release_growth, \
get_active_growth_bounds, safe_optim
try:
from gurobipy import GRB
except ModuleNotFoundError:
pass
solver = 'optlang-gurobi'
# solver = 'optlang-cplex'
def _va_sim(model):
model.objective.direction = 'max'
sol_max = safe_optim(model)
model.objective.direction = 'min'
sol_min = safe_optim(model)
return sol_min, sol_max
def simulate(available_uptake, model, variables, warm_start=None):
# model.solver.problem.reset()
model.logger.info('available_uptake = {}'.format(available_uptake))
model.reactions.EX_glc__D_e.lower_bound = available_uptake
model.growth_reaction.lower_bound = 0
model.growth_reaction.upper_bound = 10
model.objective = model.growth_reaction.id
model.objective.direction = 'max'
out = safe_optim(model)
if model.solver.status == 'infeasible':
ret = {'obj':np.nan,
'mu': np.nan,
'mu_lb':np.nan,
'mu_ub':np.nan,
'available_substrate':available_uptake,
'uptake':np.nan,
'prot_ratio':np.nan,
'mrna_ratio':np.nan
}
for var in variables:
ret[var + '_lb'] = np.nan
ret[var + '_ub'] = np.nan
print('INFEASIBLE SOLUTION AT q={}'.format(available_uptake))
return pd.Series(ret)
growth_solution = copy(model.solution)
mu_i, mu_lb, mu_ub = get_active_growth_bounds(model)
mu = model.growth_reaction.flux
# release_warm_start(model)
try:
prot_ratio = model.interpolation_variable.prot_ggdw.variable.primal
mrna_ratio = model.interpolation_variable.mrna_ggdw.variable.primal
except AttributeError:
# Model without neidhardt data
prot = []
mRNA = []
RNAs = model.get_variables_of_type(mRNAVariable)
Prots = model.get_variables_of_type(EnzymeVariable)
for the_var in RNAs:
mRNA += [model.solution.raw.loc[the_var.name]]
for the_var in Prots:
prot += [model.solution.raw.loc[the_var.name]]
prot_ratio = sum(prot)
mrna_ratio = sum(mRNA)
ret = {'obj':model.solution.objective_value,
'mu': mu,
'mu_lb':mu_lb,
'mu_ub':mu_ub,
'available_substrate':-1*available_uptake,
'uptake':-1*growth_solution.fluxes['EX_glc__D_e'],
'prot_ratio':prot_ratio,
'mrna_ratio':mrna_ratio
}
fix_growth(model, model.solution)
for var in variables:
# THIS WILL DO THE VA ON ETHANOL
model.objective = model.variables.get(var)
lb, ub = _va_sim(model)
ret[var + '_lb'] = lb.objective_value#np.nan
ret[var + '_ub'] = ub.objective_value#np.nan
print(pd.Series(ret))
release_growth(model)
# apply_warm_start(model, growth_solution)
# Add values of other secretions in the ret dictionnary
for exch in model.reactions:
ret[exch.id] = growth_solution.fluxes.loc[exch.id]
return pd.Series(ret)
if __name__ == '__main__':
# Do things
variables = [
# 'EZ_rib',
# 'EZ_rnap',
# 'EZ_dummy_enzyme',
# 'MR_dummy_gene',
# 'DM_ac_e', # acetate exchange
]
# uptake_range = pd.Series(np.arange(-1,-40, -1))
# uptake_range = pd.Series(np.arange(-1,-30, -1))
# uptake_range = pd.Series(np.arange(-1,-15, -0.5))
# uptake_range = pd.Series(np.arange(-15,-20, -1))
uptake_range = pd.Series(np.arange(-1/3,-5,-1/3)).append(pd.Series(np.arange(-6,-13,-1)))
# uptake_range = pd.Series(np.arange(-0,-0.25,-0.25))
model_files = {
# 'cEFL':'iJO1366_cEFL_2037_enz_128_bins__20201103_191050.json',
# 'vEFL':'iJO1366_vEFL_2037_enz_128_bins__20201103_190819.json',
'cETFL':'SlackModel iJO1366_cETFL_2037_enz_128_bins__20201103_190927.json',
'vETFL':'SlackModel iJO1366_vETFL_2037_enz_128_bins__20201103_190721.json',
}
models = {k:load_json_model('models/'+v,solver=solver) for k,v in model_files.items()}
data = {}
for name,model in models.items():
# growth_uptake_config(model)
model.warm_start = None
model.logger.info('Simulating ...')
start = time()
data[name] = uptake_range.apply(simulate, args=[model,variables])
stop = time()
print('Elapsed time: {}'.format(stop - start))
data[name].to_csv('outputs/benchmark_{}.csv'.format(name))
|
# Copyright 2019 BDL Benchmarks Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from setuptools import find_packages
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="bdlb",
version="0.0.2",
description="BDL Benchmarks",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/oatml/bdl-benchmarks",
author="Oxford Applied and Theoretical Machine Learning Group",
author_email="oatml@googlegroups.com",
license="Apache-2.0",
packages=find_packages(),
install_requires=[
# "numpy==1.18.5",
# "scipy==1.4.1",
# "pandas==1.0.4",
# "matplotlib==3.2.1",
# "seaborn==0.10.1",
# "scikit-learn==0.21.3",
# "kaggle==1.5.6",
# "opencv-python==4.2.0.34",
# "tensorflow-gpu==2.0.0-beta0",
# "tensorflow-probability==0.7.0",
# "tensorflow-datasets==1.1.0",
],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Researchers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: Apache 2.0 License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
)
|
import torch
import numpy as np
from tqdm import tqdm
from time import time
import sys
from os.path import join
import lpips
from Hessian.GAN_hessian_compute import hessian_compute
from torchvision.transforms import ToPILImage
from torchvision.utils import make_grid
ImDist = lpips.LPIPS(net='squeeze').cuda()
use_gpu = True if torch.cuda.is_available() else False
model = torch.hub.load('facebookresearch/pytorch_GAN_zoo:hub',
'PGAN', model_name='celebAHQ-256',
pretrained=True, useGPU=use_gpu)
class PGGAN_wrapper(): # nn.Module
def __init__(self, PGGAN, ):
self.PGGAN = PGGAN
def visualize(self, code, scale=1):
imgs = self.PGGAN.forward(code,) # Matlab version default to 0.7
return torch.clamp((imgs + 1.0) / 2.0, 0, 1) * scale
G = PGGAN_wrapper(model.avgG)
#%%
from argparse import ArgumentParser
parser = ArgumentParser(description='Computing Hessian at different part of the code space in PG GAN')
parser.add_argument('--dataset', type=str, default="rand", help='dataset name `pasu` or `evol`, `text`')
parser.add_argument('--method', type=str, default="BP", help='Method of computing Hessian can be `BP` or `ForwardIter` `BackwardIter` ')
parser.add_argument('--idx_rg', type=int, default=[0, 200], nargs="+", help='range of index of vectors to use')
parser.add_argument('--EPS', type=float, default=1E-4, help='EPS of finite differencing HVP operator, will only be ')
args = parser.parse_args()
if len(args.idx_rg) == 2:
id_str, id_end = args.idx_rg[0], args.idx_rg[1]
else:
id_str, id_end = 0, 200
print("doing it all! ")
#%%
# figdir = r"E:\OneDrive - Washington University in St. Louis\Hessian_summary\DCGAN"
savedir = r"/scratch/binxu/GAN_hessian"
for triali in tqdm(range(id_str, id_end)):
noise, _ = model.buildNoiseData(1)
feat = noise.detach().clone().cuda()
T0 = time()
eva_BI, evc_BI, H_BI = hessian_compute(G, feat, ImDist, hessian_method="BackwardIter")
print("%.2f sec" % (time() - T0)) # 13.40 sec
T0 = time()
eva_FI, evc_FI, H_FI = hessian_compute(G, feat, ImDist, hessian_method="ForwardIter", EPS=1E-4)
print("%.2f sec" % (time() - T0)) # 6.89 sec
T0 = time()
eva_BP, evc_BP, H_BP = hessian_compute(G, feat, ImDist, hessian_method="BP")
print("%.2f sec" % (time() - T0)) # 12.5 sec
print("Correlation of Flattened Hessian matrix BP vs BackwardIter %.3f" % np.corrcoef(H_BP.flatten(), H_BI.flatten())[0, 1])
print("Correlation of Flattened Hessian matrix BP vs ForwardIter %.3f" %
np.corrcoef(H_BP.flatten(), H_FI.flatten())[0, 1])
print("Correlation of Flattened Hessian matrix ForwardIter vs BackwardIter %.3f"%
np.corrcoef(H_FI.flatten(), H_BI.flatten())[0, 1])
np.savez(join(savedir, "Hessian_cmp_%d.npz" % triali), eva_BI=eva_BI, evc_BI=evc_BI, H_BI=H_BI,
eva_FI=eva_FI, evc_FI=evc_FI, H_FI=H_FI,
eva_BP=eva_BP, evc_BP=evc_BP, H_BP=H_BP, feat=feat.detach().cpu().numpy())
print("Save finished")
|
"""Script to get the air quality based on the user's current location"""
import sys
import requests as req
if __name__ == "__main__":
if len(sys.argv) > 1:
url = "https://api.waqi.info/feed/here/?token=" + sys.argv[1]
response = req.get(url, verify=False)
if response.json()['status'] == "ok":
print("The air quality is " + str(response.json()['data']['aqi']))
print("The data was fetched from " +
response.json()['data']['city']['name'])
print("The pollutant measured was " +
str(response.json()['data']['dominentpol']))
elif response.json()['status'] == "error":
print("The server returned an error. The message is " +
response.json()['data'])
else:
print("Cannot fetch AQI without token")
|
import timeit
HASHES = [
("pyblake2", "blake2b"),
("pyblake2", "blake2s"),
("hashlib", "md5"),
("hashlib", "sha1"),
("hashlib", "sha256"),
("hashlib", "sha512"),
]
SIZES = [64, 128, 1024, 2047, 2048, 1000000]
SETUP_CODE = """
from {mod} import {fn} as hasher
data = b'x'*{size}
"""
BENCH_CODE = """
h = hasher()
h.update(data)
h.digest()
"""
def measure(mod, fn, size):
num = 10
best = min(timeit.repeat(BENCH_CODE,
SETUP_CODE.format(mod=mod, fn=fn, size=size),
number=num, repeat=5))
return num * (size/1024./1024.) / best
def main():
for size in SIZES:
print("{0} bytes\n".format(size))
for mod, fn in HASHES:
mbs = measure(mod, fn, size)
print(" {0}.{1} \t {2:3.0f} MB/s".format(mod, fn, mbs))
print("")
if __name__ == "__main__":
main()
|
import os
import sys
cmd = "/workspace/onnxruntime_training_bert {}".format(" ".join(sys.argv[1:]))
print(cmd)
os.system(cmd)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.