repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
jack-florey/pong
|
app/games/__init__.py
|
<gh_stars>0
from app.games.views import games # noqa
|
jack-florey/pong
|
app/games/forms.py
|
<gh_stars>0
from flask_wtf import FlaskForm
from wtforms import ValidationError
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.fields import (
PasswordField,
StringField,
SubmitField,
FieldList,
FormField
)
from wtforms.fields.html5 import EmailField
from wtforms.validators import (
Email,
EqualTo,
InputRequired,
Length,
)
from app import db
from app.models import Role, User, Player
class PlayerForm(FlaskForm):
name = StringField('Name')
class SetupGameForm(FlaskForm):
name = StringField(
'Name', validators=[InputRequired(),
Length(1, 64)])
players = FieldList(FormField(PlayerForm), min_entries=10)
submit = SubmitField('Start Game')
def validate_name(self, field):
if Player.query.filter_by(name=field.data).first():
raise ValidationError('Name already registered, please pick another!')
|
jack-florey/pong
|
app/games/views.py
|
from flask import (
Blueprint,
abort,
flash,
redirect,
render_template,
request,
url_for,
)
from flask_login import current_user, login_required
from flask_rq import get_queue
import sys
from app import db
from app.games.forms import (
SetupGameForm,
)
from app.decorators import admin_required
from app.models import Game, GameState
from flask import Flask
games = Blueprint('games', __name__)
app = Flask(__name__)
@games.route('/')
@admin_required
def index():
"""Admin dashboard page."""
return render_template('admin/index.html')
@games.route('/create')
@admin_required
def create_game():
#Create the model
g = Game(name="%s's Game" % current_user.first_name)
db.session.add(g)
db.session.commit()
game_id = g.id
#forward the user
return redirect(url_for('games.setup_game', game_id=game_id))
@games.route('/<int:game_id>/play')
def play_game(game_id):
game = Game.query.filter_by(id=game_id).first()
gs = game.game_object
return render_template('games/play_game.html', game=gs)
@games.route('/<int:game_id>/actions/cup_hit', methods=['POST'])
def cup_hit(game_id):
game = Game.query.filter_by(id=game_id).first()
gs = game.game_object
gs.cup_hit(request.form['target_id'])
game.update_game_state(gs)
db.session.add(game)
db.session.commit()
return "hit"
@games.route('/<int:game_id>/actions/cycle_rack', methods=['POST'])
def cycle_rack(game_id):
game = Game.query.filter_by(id=game_id).first()
gs = game.game_object
gs.cycle_rack(request.form['target_id'])
game.update_game_state(gs)
db.session.add(game)
db.session.commit()
return "cycled"
@games.route('/<int:game_id>/setup', methods=['GET', 'POST'])
@login_required
@admin_required
def setup_game(game_id):
game = Game.query.filter_by(id=game_id).first()
if game is None:
abort(404)
if game.game_object and not game.game_object.is_in_setup():
return redirect(url_for('games.play_game', game_id=game.id))
form = SetupGameForm(obj = game)
if form.validate_on_submit():
game.name = form.name.data
gs = GameState()
for player in form.players.entries:
player_name = player.data['name']
if player_name:
gs.add_player(player_name)
game.game_object = gs
gs.start_game()
db.session.add(game)
db.session.commit()
return redirect(url_for('games.play_game', game_id=game.id))
return render_template('games/setup_game.html', game=game, form=form)
|
jack-florey/pong
|
app/models/game_models.py
|
<filename>app/models/game_models.py
from .. import db
from sqlalchemy import types
import json
from json import JSONEncoder
import jsonpickle
import random
import uuid
from functools import reduce
import datetime
class JsonType(types.TypeDecorator):
impl = types.Unicode
def process_bind_param(self, value, dialect):
if value :
return unicode(jsonpickle.encode(value))
else:
return None
def process_result_value(self, value, dialect):
if value:
return jsonpickle.decode(value)
else:
return None
class Game(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
time_started = db.Column(db.DateTime,default=datetime.datetime.utcnow)
game_object = db.Column(JsonType())
previous_game_object = db.Column(JsonType())
def update_game_state(self, new_state):
fresh = jsonpickle.decode(unicode(jsonpickle.encode(new_state)))
self.previous_game_object = self.game_object
self.game_object = fresh
def is_setup(self):
if game_object:
return False
return game_object.is_setup()
class Player(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
wins = db.Column(db.Integer,default=0)
kills = db.Column(db.Integer,default=0)
deaths = db.Column(db.Integer,default=0)
AUTORUN_ON = "on"
AUTORUN_OFF = "off"
MODE_SETUP = "setup"
MODE_PLAYING = "playing"
MODE_COMPLETE = "complete"
MODE_ARCHIVED = "archived"
class GameState(object):
def __init__(self):
self.players = {}
self.board = []
self.eliminated = []
self.current_index = None
self.mode = MODE_SETUP
self.date_finished = None
self.num_cups = 6
def init_players(self, players):
for p in players:
add_player(p)
def add_player(self, player_name, id=None):
if self.mode == MODE_SETUP:
ps = PlayerState(player_name, self.num_cups, id)
self.board.append(ps.id)
self.players[ps.id] = ps
return ps
else:
raise Exception("Game is already in progress!!")
def remove_player(self, uuid):
if self.mode == MODE_SETUP:
self.players.pop(uuid)
else:
raise Exception("Game is already in progress!!")
def start_game(self, shuffle=True):
self.mode = MODE_PLAYING
if len(self.players) == 0:
raise Exception("No Players registered!")
if shuffle:
random.shuffle(self.board)
self.current_index = self.board[0]
def current_player(self):
print self.players
return self.players[self.current_index]
def cup_hit(self, uuid):
p = self.players[uuid]
if not p.hit():
raise Exception("No cups to hit!")
if not p.alive():
self.kill(uuid)
def player(self, name):
for p in players:
if p.name == name:
return p
raise Exception("No player %s" % name)
def kill(self, uuid):
if self.players[uuid].alive():
raise Exception("can't kill living player")
killer = self.get_previous_player(uuid)
killed = self.players[uuid]
self.board.remove(uuid)
killed.knocked_out_by = killer.id
self.eliminated.append(uuid)
killer.add_cup()
if len(self.board) == 1:
self.trigger_end_game()
def trigger_end_game(self):
self.finished_time = datetime.datetime.utcnow
self.mode = MODE_COMPLETE
def is_playing(self):
return self.mode == MODE_PLAYING
def is_complete(self):
return self.mode == MODE_COMPLETE or self.mode == MODE_ARCHIVED
def is_archived(self):
return self.mode == MODE_ARCHIVED
def is_in_setup(self):
return self.mode == MODE_SETUP
def index(self, uuid):
try:
return self.board.index(uuid)
except:
raise Exception("Player %s isn't active" % self.players[uuid])
def get_previous_player(self, uuid):
return self.players[self.board[self.index(uuid) - 1 % len(self.board)]]
def get_kills(self, player_id):
return sum(1 for p in self.players.values() if p.knocked_out_by == player_id)
def __str__(self):
joined =", ".join([str(self.players[j]) for j in self.board])
return "Mode: %s \r\nPlaying: %s \r\nKnocked Out: %s" % (self.mode,
", ".join([str(self.players[j]) for j in self.board]),
", ".join([str(self.players[j]) for j in self.eliminated]))
def cycle_rack(self, player_id):
p = self.players[player_id]
p.cycle_rack()
STATUS_ELIMINATED = "eliminated"
STATUS_ALIVE = "alive"
class PlayerState(object):
def __init__(self, name, num_cups, id=None):
if not id:
id = uuid.uuid4()
self.id = str(id)
self.name = name
self.max_cups = num_cups
self.cups = num_cups
self.status = STATUS_ALIVE
self.knocked_out_by = None
self.fifth_cup_missing = 1
def alive(self):
return self.cups > 0
def cycle_rack(self):
self.fifth_cup_missing = ((self.missing_fifth()) % 6) + 1
def hit(self):
if self.cups > 0:
self.cups = self.cups - 1
return True
return False
def add_cup(self):
if self.cups != self.max_cups:
self.cups = self.cups + 1
def __str__(self):
return "%s{%s}: Cups [%d / %d]" % (self.name, self.id[:8], self.cups, self.max_cups)
def __repr__(self):
return "Player %s{%s}" % (self.name, self.id)
def missing_fifth(self):
if hasattr(self, 'fifth_cup_missing'):
return self.fifth_cup_missing
return 1
def rack_image(self):
if self.cups != 5:
return self.cups
return "%d_%d" % (self.cups, self.missing_fifth())
|
jack-florey/pong
|
tests/test_game_state.py
|
import unittest
from app import create_app, db
from flask import current_app
from app.models import *
class GameStateTestCase(unittest.TestCase):
def setUp(self):
self.game = GameState()
self.clayton = self.game.add_player("Clayton")
self.ariel = self.game.add_player("Ariel")
self.jimbo = self.game.add_player("Jimbo")
self.arcturus = self.game.add_player("Arcturus")
self.game.start_game(shuffle=False)
def tearDown(self):
self.game = None
def testBasicGameFlows(self):
g = self.game
self.assertEquals(self.arcturus, g.get_previous_player(self.clayton.id))
self.assertEquals(self.clayton, g.get_previous_player(self.ariel.id))
g.cup_hit(self.arcturus.id)
self.assertEquals(5, self.arcturus.cups)
for i in range(0,6):
g.cup_hit(self.clayton.id)
self.assertEquals(3, len(g.board))
self.assertEquals(1, len(g.eliminated))
self.assertEquals(self.arcturus, g.get_previous_player(self.ariel.id))
self.assertEquals(self.ariel, g.get_previous_player(self.jimbo.id))
self.assertEquals(self.arcturus.id, self.clayton.knocked_out_by)
self.assertEquals(6, self.arcturus.cups)
|
jack-florey/pong
|
app/main/views.py
|
from flask import Blueprint, render_template
from flask_login import current_user
from app.models import EditableHTML, Game, Permission
main = Blueprint('main', __name__)
from flask_moment import Moment
@main.route('/')
def index():
games = Game.query.all()
can_create = current_user.can(Permission.GAME_ADMIN)
return render_template('main/index.html', games=games, can_create=can_create)
@main.route('/about')
def about():
editable_html_obj = EditableHTML.get_editable_html('about')
return render_template(
'main/about.html', editable_html_obj=editable_html_obj)
|
jack-florey/pong
|
tests/test_game_model.py
|
import time
import unittest
from app import create_app, db
from app.models import *
from datetime import datetime
class GameModelTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_phases(self):
g = Game(name="test")
db.session.add(g)
db.session.commit()
game = GameState()
g.game_object = game
db.session.add(g)
db.session.commit()
game.add_player("Clayton")
game.add_player("Ariel")
game.add_player("Jimbo")
game.add_player("Arcturus")
game.start_game(shuffle=False)
g.update_game_state(game)
db.session.add(g)
db.session.commit()
|
jack-florey/pong
|
tests/test_serialization.py
|
<filename>tests/test_serialization.py<gh_stars>0
import unittest
from app import create_app, db
from flask import current_app
from app.models import *
import json
import jsonpickle
from json import JSONEncoder
class GameSerializationTestCase(unittest.TestCase):
def setUp(self):
self.game = GameState()
self.clayton = self.game.add_player("Clayton")
self.ariel = self.game.add_player("Ariel")
self.jimbo = self.game.add_player("Jimbo")
self.arcturus = self.game.add_player("Arcturus")
self.game.start_game(shuffle=False)
def tearDown(self):
self.game = None
def testPlayerSerialization(self):
g = self.game
for i in range(0, 6):
g.cup_hit(self.jimbo.id)
g.cup_hit(self.ariel.id)
pickled = jsonpickle.encode(self.game)
thawed = jsonpickle.decode(pickled)
self.assertEquals(self.ariel.id,thawed.get_previous_player(self.arcturus.id).id)
self.assertEquals(thawed.players[self.ariel.id].cups, 5)
|
GR-Tang/AI-0403-Team-3
|
docker/deploy.py
|
<gh_stars>0
# importing necessary libraries
import uvicorn
from fastapi import FastAPI, File, UploadFile
from starlette.responses import RedirectResponse
from PIL import Image
from io import BytesIO
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications.imagenet_utils import decode_predictions
from tensorflow.keras.models import load_model
import os
# setting parameters
app = FastAPI()
input_shape = (224, 224)
# loading saved model
save_folder = "../models"
model_name = "effnet1"
model = load_model(f"{save_folder}/{model_name}")
def read_image(file):
image = Image.open(BytesIO(file))
return image
# defining image preprocess
def preprocess(image: Image.Image):
image = image.resize(input_shape)
image = np.asfarray(image)
#image = image / 255
image = np.expand_dims(image, 0)
return image
# loading the image for inference
def predict(image: np.ndarray):
pred = model.predict(image)
if pred[0][0] > pred[0][1]:
infer = "With Mask"
else:
infer = "Without Mask"
return infer
# Restful API for the inference
@app.get("/", include_in_schema=False)
async def index():
return RedirectResponse(url="/docs")
@app.post("/predict/image")
async def predict_image(file: UploadFile = File(...)):
image = read_image(await file.read())
image = preprocess(image)
infer = predict(image)
print(infer)
return infer
# on starting the service
if __name__ == "__main__":
port = int(os.environ.get("PORT", 8000))
# change host to "0.0.0.0" before building image to deploy on heroku
# or "127.0.0.1" for local
uvicorn.run(app, port = port, host = "0.0.0.0")
|
GR-Tang/AI-0403-Team-3
|
snippets/server.py
|
import uvicorn
from fastapi import FastAPI, File, UploadFile
from starlette.responses import RedirectResponse
from PIL import Image
from io import BytesIO
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications.imagenet_utils import decode_predictions
app = FastAPI()
input_shape = (224, 224)
# Model is placeholder
def load_model():
model = tf.keras.applications.EfficientNetB0(input_shape)
return model
model = load_model()
def read_image(file):
image = Image.open(BytesIO(file))
return image
# Image preprocessing method is placeholder too
def preprocess(image: Image.Image):
image = image.resize(input_shape)
image = np.asfarray(image)
#image = image / 255
image = np.expand_dims(image, 0)
return image
# Prediction also placeholder
def predict(image: np.ndarray):
predictions = model.predict(image)
predictions = decode_predictions(predictions)[0][0][1]
return predictions
# Actual API for the inference
@app.get("/", include_in_schema=False)
async def index():
return RedirectResponse(url="/docs")
@app.post("/predict/image")
async def predict_image(file: UploadFile = File(...)):
image = read_image(await file.read())
image = preprocess(image)
predictions = predict(image)
print(predictions)
return predictions
if __name__ == "__main__":
uvicorn.run(app, port = 8000, host = "127.0.0.1")
|
GR-Tang/AI-0403-Team-3
|
snippets/deploylocal.py
|
<reponame>GR-Tang/AI-0403-Team-3<gh_stars>0
import uvicorn
from fastapi import FastAPI, File, UploadFile
from starlette.responses import RedirectResponse
from PIL import Image
from io import BytesIO
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications.imagenet_utils import decode_predictions
from tensorflow.keras.models import load_model
app = FastAPI()
input_shape = (224, 224)
# Model is placeholder
save_folder = "../data/saved"
model_name = "effnet1"
model = load_model(f"{save_folder}/{model_name}")
def read_image(file):
image = Image.open(BytesIO(file))
return image
# Image preprocessing method is placeholder too
def preprocess(image: Image.Image):
image = image.resize(input_shape)
image = np.asfarray(image)
#image = image / 255
image = np.expand_dims(image, 0)
return image
# Prediction also placeholder
def predict(image: np.ndarray):
pred = model.predict(image)
if pred[0][0] > pred[0][1]:
infer = "With Mask"
else:
infer = "Without Mask"
return infer
# Actual API for the inference
@app.get("/", include_in_schema=False)
async def index():
return RedirectResponse(url="/docs")
@app.post("/predict/image")
async def predict_image(file: UploadFile = File(...)):
image = read_image(await file.read())
image = preprocess(image)
infer = predict(image)
print(infer)
return infer
if __name__ == "__main__":
uvicorn.run(app, port = 8000, host = "127.0.0.1")
|
raghavpatnecha/SmartMirror
|
pySHSpeex/setup.py
|
<reponame>raghavpatnecha/SmartMirror
##############################################################################
# Copyright 2015 SoundHound, Incorporated. All rights reserved.
##############################################################################
from distutils.core import setup, Extension
SPEEX_SRC_DIR = "soundhound-speex"
SOURCES="cb_search.c exc_10_32_table.c exc_8_128_table.c filters.c gain_table.c hexc_table.c high_lsp_tables.c lsp.c ltp.c speex.c stereo.c vbr.c vq.c bits.c exc_10_16_table.c exc_20_32_table.c exc_5_256_table.c exc_5_64_table.c gain_table_lbr.c hexc_10_32_table.c lpc.c lsp_tables_nb.c modes.c modes_wb.c nb_celp.c quant_lsp.c sb_celp.c speex_callbacks.c speex_header.c window.c soundhound.c"
SOURCES = [ SPEEX_SRC_DIR + "/src/%s" % x for x in SOURCES.split() ]
module1 = Extension('pySHSpeex',
sources = [ 'pySHSpeexmodule.c' ] + SOURCES,
include_dirs = [ SPEEX_SRC_DIR + '/include' ],
define_macros = [ ('FIXED_POINT', '1') ] )
setup(name = 'SHSpeex',
version = '1.1',
description = 'SoundHound speex encoder',
ext_modules = [module1])
|
raghavpatnecha/SmartMirror
|
houndify.py
|
<reponame>raghavpatnecha/SmartMirror
##############################################################################
# Copyright 2017 SoundHound, Incorporated. All rights reserved.
##############################################################################
import base64
import hashlib
import hmac
import httplib
import json
import threading
import time
import uuid
import urllib
import struct
try:
import pySHSpeex
except ImportError:
pass
HOUND_SERVER = "api.houndify.com"
TEXT_ENDPOINT = "/v1/text"
VOICE_ENDPOINT = "/v1/audio"
VERSION = '1.0.0'
class _BaseHoundClient(object):
def __init__(self, clientID, clientKey, userID, hostname, proxyHost, proxyPort, proxyHeaders):
self.clientID = clientID
self.clientKey = base64.urlsafe_b64decode(clientKey)
self.userID = userID
self.hostname = hostname
self.proxyHost = proxyHost
self.proxyPort = proxyPort
self.proxyHeaders = proxyHeaders
self.HoundRequestInfo = {
'ClientID': clientID,
'UserID': userID,
'SDK': 'python2.7',
'SDKVersion': VERSION
}
def setHoundRequestInfo(self, key, value):
"""
There are various fields in the HoundRequestInfo object that can
be set to help the server provide the best experience for the client.
Refer to the Houndify documentation to see what fields are available
and set them through this method before starting a request
"""
self.HoundRequestInfo[key] = value
def removeHoundRequestInfo(self, key):
"""
Remove request info field through this method before starting a request
"""
self.HoundRequestInfo.pop(key, None)
def setLocation(self, latitude, longitude):
"""
Many domains make use of the client location information to provide
relevant results. This method can be called to provide this information
to the server before starting the request.
latitude and longitude are floats (not string)
"""
self.HoundRequestInfo['Latitude'] = latitude
self.HoundRequestInfo['Longitude'] = longitude
self.HoundRequestInfo['PositionTime'] = int(time.time())
def setConversationState(self, conversation_state):
self.HoundRequestInfo["ConversationState"] = conversation_state
if "ConversationStateTime" in conversation_state:
self.HoundRequestInfo["ConversationStateTime"] = conversation_state["ConversationStateTime"]
def _generateHeaders(self, requestInfo):
requestID = str(uuid.uuid4())
if 'RequestID' in requestInfo:
requestID = requestInfo['RequestID']
timestamp = str(int(time.time()))
if 'TimeStamp' in requestInfo:
timestamp = str(requestInfo['TimeStamp'])
HoundRequestAuth = self.userID + ";" + requestID
h = hmac.new(self.clientKey, (HoundRequestAuth + timestamp).encode('utf-8'), hashlib.sha256)
signature = base64.urlsafe_b64encode(h.digest()).decode('utf-8')
HoundClientAuth = self.clientID + ";" + timestamp + ";" + signature
headers = {
'Hound-Request-Info': json.dumps(requestInfo),
'Hound-Request-Authentication': HoundRequestAuth,
'Hound-Client-Authentication': HoundClientAuth
}
if 'InputLanguageEnglishName' in requestInfo:
headers["Hound-Input-Language-English-Name"] = requestInfo["InputLanguageEnglishName"]
if 'InputLanguageIETFTag' in requestInfo:
headers["Hound-Input-Language-IETF-Tag"] = requestInfo["InputLanguageIETFTag"]
return headers
class TextHoundClient(_BaseHoundClient):
"""
TextHoundClient is used for making text queries for Hound
"""
def __init__(self, clientID, clientKey, userID, requestInfo = dict(), hostname = HOUND_SERVER, proxyHost = None, proxyPort = None, proxyHeaders = None):
_BaseHoundClient.__init__(self, clientID, clientKey, userID, hostname, proxyHost, proxyPort, proxyHeaders)
self.HoundRequestInfo.update(requestInfo)
def query(self, query):
"""
Make a text query to Hound.
query is the string of the query
"""
headers = self._generateHeaders(self.HoundRequestInfo)
if self.proxyHost:
conn = httplib.HTTPSConnection(self.proxyHost, self.proxyPort)
conn.set_tunnel(self.hostname, headers = self.proxyHeaders)
else:
conn = httplib.HTTPSConnection(self.hostname)
conn.request('GET', TEXT_ENDPOINT + '?query=' + urllib.quote(query), headers = headers)
resp = conn.getresponse()
raw_response = resp.read()
try:
parsedMsg = json.loads(raw_response)
return parsedMsg
except:
return { "Error": raw_response }
class HoundListener(object):
"""
HoundListener is an abstract base class that defines the callbacks
that can be received while streaming speech to the server
"""
def onPartialTranscript(self, transcript):
"""
onPartialTranscript is fired when the server has sent a partial transcript
in live transcription mode. 'transcript' is a string with the partial transcript
"""
pass
def onFinalResponse(self, response):
"""
onFinalResponse is fired when the server has completed processing the query
and has a response. 'response' is the JSON object (as a Python dict) which
the server sends back.
"""
pass
def onError(self, err):
"""
onError is fired if there is an error interacting with the server. It contains
the parsed JSON from the server.
"""
pass
class StreamingHoundClient(_BaseHoundClient):
"""
StreamingHoundClient is used to send streaming audio to the Hound
server and receive live transcriptions back
"""
def __init__(self, clientID, clientKey, userID, requestInfo = dict(), hostname = HOUND_SERVER, sampleRate = 16000, useSpeex = False, proxyHost = None, proxyPort = None, proxyHeaders = None):
"""
clientID and clientKey are "Client ID" and "Client Key"
from the Houndify.com web site.
"""
_BaseHoundClient.__init__(self, clientID, clientKey, userID, hostname, proxyHost, proxyPort, proxyHeaders)
self.sampleRate = sampleRate
self.useSpeex = useSpeex
self.HoundRequestInfo['PartialTranscriptsDesired'] = True
self.HoundRequestInfo.update(requestInfo)
def setSampleRate(self, sampleRate):
"""
Override the default sample rate of 16 khz for audio.
NOTE that only 8 khz and 16 khz are supported
"""
if sampleRate == 8000 or sampleRate == 16000:
self.sampleRate = sampleRate
else:
raise Exception("Unsupported sample rate")
def start(self, listener=HoundListener()):
"""
This method is used to make the actual connection to the server and prepare
for audio streaming.
listener is a HoundListener (or derived class) object
"""
self.audioFinished = False
self.lastResult = None
self.buffer = ''
if self.proxyHost:
self.conn = httplib.HTTPSConnection(self.proxyHost, self.proxyPort)
self.conn.set_tunnel(self.hostname, headers = self.proxyHeaders)
else:
self.conn = httplib.HTTPSConnection(self.hostname)
self.conn.putrequest('POST', VOICE_ENDPOINT)
headers = self._generateHeaders(self.HoundRequestInfo)
headers['Transfer-Encoding'] = 'chunked';
for header in headers:
self.conn.putheader(header, headers[header])
self.conn.endheaders()
self.callbackTID = threading.Thread(target = self._callback, args = (listener,))
self.callbackTID.start()
audio_header = self._wavHeader(self.sampleRate)
if self.useSpeex:
audio_header = pySHSpeex.Init(self.sampleRate == 8000)
self._send(audio_header)
def fill(self, data):
"""
After successfully connecting to the server with start(), pump PCM samples
through this method.
data is 16-bit, 8 KHz/16 KHz little-endian PCM samples.
Returns True if the server detected the end of audio and is processing the data
or False if the server is still accepting audio
"""
# buffer gets flushed on next call to start()
if self.audioFinished:
return True
self.buffer += data
# 20ms 16-bit audio frame = (2 * 0.02 * sampleRate) bytes
frame_size = int(2 * 0.02 * self.sampleRate)
while len(self.buffer) > frame_size:
frame = self.buffer[:frame_size]
if self.useSpeex:
frame = pySHSpeex.EncodeFrame(frame)
self._send(frame)
self.buffer = self.buffer[frame_size:]
return False
def finish(self):
"""
Once fill returns True, call finish() to finalize the transaction. finish will
wait for all the data to be received from the server.
After finish() is called, you can start another request with start() but each
start() call should have a corresponding finish() to wait for the threads
"""
self._send('')
self.callbackTID.join()
return self.lastResult
def _callback(self, listener):
read_headers = True
headers = ''
body = ''
for line in self._readline(self.conn.sock):
if read_headers:
headers += line
if headers.endswith('\r\n\r\n'):
read_headers = False
continue
body += line
parsedMsg = None
try:
parsedMsg = json.loads(line)
except:
continue
if type(parsedMsg) is not dict:
continue
if "Status" in parsedMsg and parsedMsg["Status"] == "Error":
self.lastResult = parsedMsg
listener.onError(parsedMsg)
self.audioFinished = True
return
if "Format" in parsedMsg:
if parsedMsg["Format"] == "SoundHoundVoiceSearchParialTranscript" or parsedMsg["Format"] == "HoundVoiceQueryPartialTranscript":
## also check SafeToStopAudio
listener.onPartialTranscript(parsedMsg["PartialTranscript"])
if "SafeToStopAudio" in parsedMsg and parsedMsg["SafeToStopAudio"]:
## Because of the GIL, simple flag assignment like this is atomic
self.audioFinished = True
if parsedMsg["Format"] == "SoundHoundVoiceSearchResult" or parsedMsg["Format"] == "HoundQueryResult":
self.lastResult = parsedMsg
listener.onFinalResponse(parsedMsg)
return
self.lastResult = { "Error": body }
listener.onError({ "Error": body })
self.audioFinished = True
def _wavHeader(self, sampleRate=16000):
genHeader = "RIFF"
genHeader += struct.pack('<L', 36) #ChunkSize - dummy
genHeader += "WAVE"
genHeader += "fmt "
genHeader += struct.pack('<L', 16) #Subchunk1Size
genHeader += struct.pack('<H', 1) #AudioFormat - PCM
genHeader += struct.pack('<H', 1) #NumChannels
genHeader += struct.pack('<L', sampleRate) #SampleRate
genHeader += struct.pack('<L', 8 * sampleRate) #ByteRate
genHeader += struct.pack('<H', 2) #BlockAlign
genHeader += struct.pack('<H', 16) #BitsPerSample
genHeader += "data"
genHeader += struct.pack('<L', 0) #Subchunk2Size - dummy
return genHeader
def _send(self, msg):
if self.conn:
chunkSize = "%x\r\n" % len(msg)
try:
self.conn.send(chunkSize)
self.conn.send(msg + '\r\n')
except:
self.conn.close()
self.conn = None
def _readline(self, socket):
_buffer = ''
while True:
more = socket.recv(4096)
if not more: break
_buffer += more
while True:
split_buffer = _buffer.split("\r\n", 1)
if len(split_buffer) == 1: break
_buffer = split_buffer[1]
yield split_buffer[0] + "\r\n"
if _buffer: yield _buffer
|
antipodos/slack-metabot
|
app.py
|
<reponame>antipodos/slack-metabot
from flask import Flask, request, abort, jsonify, render_template
from flask_bootstrap import Bootstrap
from slackbot import create_events_adapter, \
inform_about_new_channel, \
inform_about_random_channel, \
inform_responseurl_about_random_channel, \
inform_responseurl_about_slackstats
from rq import Queue
from worker import conn
app = Flask(__name__)
Bootstrap(app)
redis_queue = Queue(connection=conn)
slack_events_adapter = create_events_adapter(app=app)
redis_reported_channels_key = "reported_channels"
@app.route("/", methods=["GET"])
def web_home():
return render_template("main.html")
@slack_events_adapter.on("channel_created")
def slack_events_channel_created(data):
if not request.json:
abort(400)
redis_queue.enqueue(worker_inform_about_new_channel, data["event"]["channel"]["id"])
return jsonify(ok=True)
def worker_inform_about_new_channel(channel_id):
if not channel_got_reported(channel_id):
add_channel_to_reported_channels(channel_id)
inform_about_new_channel(channel_id)
def add_channel_to_reported_channels(channel_id):
conn.sadd(redis_reported_channels_key, channel_id)
def channel_got_reported(channel_id):
return conn.sismember(redis_reported_channels_key, channel_id)
@slack_events_adapter.on("app_mention")
def slack_events_app_mention(data):
message = data["event"]
channel = message["channel"]
redis_queue.enqueue(inform_about_random_channel,
channel,
"I've been summoned? There, I picked a random channel for you:")
return jsonify(ok=True)
@app.route("/commands/randomchannel", methods=["POST"])
def slack_command_endpoint_random_channel():
try:
ts = request.headers.get('X-Slack-Request-Timestamp')
sig = request.headers.get('X-Slack-Signature')
request.data = request.get_data()
result = slack_events_adapter.server.verify_signature(ts, sig)
except:
result = False
if not result:
abort(401)
redis_queue.enqueue(inform_responseurl_about_random_channel,
request.form['response_url'],
"There, I picked a random channel for you:")
return '', 200
@app.route("/commands/slackstats", methods=["POST"])
def slack_command_endpoint_slackstats():
try:
ts = request.headers.get('X-Slack-Request-Timestamp')
sig = request.headers.get('X-Slack-Signature')
request.data = request.get_data()
result = slack_events_adapter.server.verify_signature(ts, sig)
except:
result = False
if not result:
abort(401)
redis_queue.enqueue(inform_responseurl_about_slackstats,
request.form['response_url'],
"Metabot informs")
return '', 200
if __name__ == '__main__':
app.run()
|
antipodos/slack-metabot
|
slackbot.py
|
<filename>slackbot.py
import os
from slack import WebClient
import requests
from slackeventsapi import SlackEventAdapter
import random
token = os.environ["SLACK_API_TOKEN"]
signing_secret = os.environ["SLACK_SIGNING_SECRET"]
metachannel_id = os.environ["SLACK_METACHANNEL_ID"]
footer_message = "<https://metabot.object.farm|a Meta Bot service>"
slack_client = WebClient(token=token)
call_limit = 100
def create_events_adapter(app):
"""
Creates an event adapter to be used to decorate route methods in flask
:param app: flask app
:return: new slack event adapter
"""
return SlackEventAdapter(signing_secret, "/events", app)
def who_am_i():
"""
Identity of slack bot
:return: bot user object (json)
"""
return slack_client.auth_test()
def all_my_channels():
"""
Compiles list of all channels the bot is member of
:return: list of channel objects (json)
"""
bot_id = who_am_i()["user_id"]
bot_channels = list()
for channel in all_channels():
members = channel_members(channel["id"])
if bot_id in members:
bot_channels.append(channel)
return bot_channels
def inform_about_new_channel(channel_id):
"""
Informs all channels the bot is member of about a newly created channel
:param channel_id: id of newly created channel
"""
channel = slack_client.conversations_info(channel=channel_id)["channel"]
#post_message_to_my_channels(format_channel_info(channel, "A new channel got created!"))
#performance fix
post_message_to_channel(metachannel_id, message=format_channel_info(channel, "A new channel got created!"))
def inform_about_random_channel(channel_to_inform, message):
"""
Post to channel about a random channel
:param channel_to_inform: channel to post to
:param message: pre text message
"""
post_message_to_channel(channel_to_inform,
format_channel_info(
pick_random_channel(),
message)
)
def inform_responseurl_about_random_channel(response_url, message):
channel = pick_random_channel()
requests.post(response_url, json=format_channel_info(channel, message))
def inform_responseurl_about_slackstats(response_url, message):
number_of_channels = len(all_channels())
requests.post(response_url, json=format_slack_message(message,
"Slack Stats",
"Number of Channels: {}".format(number_of_channels),
footer_message))
def post_message_to_channel(channel_id, message):
"""
Posts attachments to channel
:param channel_id: channel to post to
:param message: attachment essage
"""
slack_client.chat_postMessage(channel=channel_id, attachments=message["attachments"])
def post_message_to_my_channels(message):
"""
Post a message in all channels the bot is member of
:param message: message to post
"""
for channel in all_my_channels():
post_message_to_channel(channel_id=channel["id"], message=message)
def all_channels():
"""
Compiles list of all available public channels
:return: list of channels
"""
return paginated_api_call(slack_client.conversations_list,
"channels",
exclude_archived=1,
types="public_channel"
)
def channel_members(channel_id):
return paginated_api_call(slack_client.conversations_members,
"members",
channel=channel_id,
)
def pick_random_channel():
"""
Picks a random channel out of all available public channels
:return: one randomly picked channel
"""
return random.choice(all_channels())
def paginated_api_call(api_method, response_objects_name, **kwargs):
"""
Calls api method and cycles through all pages to get all objects
:param method: api method to call
:param response_objects_name: name of collection in response json
:param kwargs: url params to pass to call, additionally to limit and cursor which will be added automatically
"""
ret = list()
cursor = None
while cursor != "":
if cursor is not None:
r = api_method(limit=call_limit, cursor=cursor, **kwargs)
else:
r = api_method(limit=call_limit, **kwargs)
response_objects = r.get(response_objects_name)
if response_objects is not None:
ret.extend(response_objects)
metadata = r.get("response_metadata")
if metadata is not None:
cursor = metadata["next_cursor"]
else:
cursor = ""
return ret
def format_slack_message(pretext, title, text, footer):
msg = {
"attachments": [
{
"pretext": pretext,
"title": title,
"fallback": title,
"color": "#2eb886",
"text": text,
"mrkdwn_in": [
"text",
"pretext"
],
"footer": footer
}
]
}
return msg
def format_channel_info(channel, pretext):
purpose = "\n_{}_".format(channel["purpose"]["value"]) if channel["purpose"]["value"] != "" else ""
return format_slack_message(pretext,
"<#{}|{}>".format(channel["id"], channel["name"]),
"Created by <@{}>{}".format(channel["creator"], purpose),
footer_message)
|
nimaema/Mask_RCNN
|
samples/fiber/fiber.py
|
if __name__ == '__main__':
import matplotlib
# Agg backend runs without a display
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import sys
import json
import datetime
import numpy as np
from numpy.lib.type_check import imag
from skimage import io
from imgaug import augmenters as iaa
from skimage.color import rgba2rgb
from skimage.color import rgb2gray
import tensorflow as tf
import random
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
from mrcnn import model as modellib
from mrcnn import visualize
# COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# # import fiber
# # Path to trained weights file
# COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
# Results directory
# Save submission files here
RESULTS_DIR = os.path.join(ROOT_DIR, "results/fiber")
# The dataset doesn't have a standard train/val split, so I picked
# a variety of images to surve as a validation set.
VAL_IMAGE_IDS = [str(i) for i in range(1,8)]
# print(VAL_IMAGE_IDS)
############################################################
# Configurations
############################################################
class FiberConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "fiber"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + fiber
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.90
STEPS_PER_EPOCH = (41 - len(VAL_IMAGE_IDS)) // IMAGES_PER_GPU
VALIDATION_STEPS = max(1, len(VAL_IMAGE_IDS) // IMAGES_PER_GPU)
class FiberInferenceConfig(FiberConfig):
# Set batch size to 1 to run one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Don't resize imager for inferencing
IMAGE_RESIZE_MODE = "square"
# Non-max suppression threshold to filter RPN proposals.
# You can increase this during training to generate more propsals.
RPN_NMS_THRESHOLD = 0.7
USE_MINI_MASK = False
############################################################
# Dataset
############################################################
class FiberDataset(utils.Dataset):
def load_fiber(self, dataset_dir, subset):
"""Load a subset of the nuclei dataset.
dataset_dir: Root directory of the dataset
subset: Subset to load. Either the name of the sub-directory,
such as stage1_train, stage1_test, ...etc. or, one of18
* train: stage1_train excluding validation images
* val: validation images from VAL_IMAGE_IDS
"""
# Add classes. We have one class.
# Naming the dataset nucleus, and the class nucleus
self.add_class("fiber", 1, "fiber")
# Which subset?
# "val": use hard-coded list above
# "train": use data from stage1_train minus the hard-coded list above
# else: use the data from the specified sub-directory
subset_dir = "5000"
dataset_dir = os.path.join(dataset_dir, subset_dir)
if subset == "val":
image_ids = VAL_IMAGE_IDS
else:
# Get image ids from directory names
image_ids = next(os.walk(dataset_dir))[1]
if subset == "train":
image_ids = list(set(image_ids) - set(VAL_IMAGE_IDS))
for image_id in image_ids:
self.add_image(
"fiber",
image_id=image_id,
path=os.path.join(dataset_dir, image_id, "image/gt_{}.png".format(image_id)))
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
dataset_dir = os.getcwd()
info = self.image_info[image_id]
# Get mask directory from image path
mask_dir = os.path.join(os.path.dirname(
os.path.dirname(info['path'])), "mask")
# Read mask files from .png image
mask = []
for f in next(os.walk(mask_dir))[2]:
if f.endswith(".png"):
m = io.imread(os.path.join(
mask_dir, f)).astype(np.bool)
m = rgb2gray(rgba2rgb(m))#.astype(np.bool)
mask.append(m)
mask = np.stack(mask, axis=-1)
# Return mask, and array of class IDs of each instance. Since we have
# one class ID, we return an array of ones
return mask, np.ones([mask.shape[-1]], dtype=np.int32)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "fiber":
return info["id"]
else:
super(self.__class__, self).image_reference(image_id)
DATASET_DIR = os.getcwd()
#
# # Dataset directory
# # DATASET_DIR = os.path.join(ROOT_DIR, "datasets/nucleus")
#
# Inference Configuration
config = FiberInferenceConfig()
config.display()
dataset_train = FiberDataset()
dataset_train.load_fiber(DATASET_DIR, subset="train")
dataset_train.prepare()
# Validation dataset
dataset_val = FiberDataset()
dataset_val.load_fiber(DATASET_DIR, subset="val")
dataset_val.prepare()
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=ROOT_DIR)
# model.load_weights(COCO_WEIGHTS_PATH, by_name=True, exclude=[
# "mrcnn_class_logits", "mrcnn_bbox_fc",
# "mrcnn_bbox", "mrcnn_mask"])
# tf.test.gpu_device_name()
print("Train network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE/10,
epochs=25,
layers='all')
def rle_encode(mask):
"""Encodes a mask in Run Length Encoding (RLE).
Returns a string of space-separated values.
"""
assert mask.ndim == 2, "Mask must be of shape [Height, Width]"
# Flatten it column wise
m = mask.T.flatten()
# Compute gradient. Equals 1 or -1 at transition points
g = np.diff(np.concatenate([[0], m, [0]]), n=1)
# 1-based indicies of transition points (where gradient != 0)
rle = np.where(g != 0)[0].reshape([-1, 2]) + 1
# Convert second index in each pair to lenth
rle[:, 1] = rle[:, 1] - rle[:, 0]
return " ".join(map(str, rle.flatten()))
def rle_decode(rle, shape):
"""Decodes an RLE encoded list of space separated
numbers and returns a binary mask."""
rle = list(map(int, rle.split()))
rle = np.array(rle, dtype=np.int32).reshape([-1, 2])
rle[:, 1] += rle[:, 0]
rle -= 1
mask = np.zeros([shape[0] * shape[1]], np.bool)
for s, e in rle:
assert 0 <= s < mask.shape[0]
assert 1 <= e <= mask.shape[0], "shape: {} s {} e {}".format(shape, s, e)
mask[s:e] = 1
# Reshape and transpose
mask = mask.reshape([shape[1], shape[0]]).T
return mask
def mask_to_rle(image_id, mask, scores):
"Encodes instance masks to submission format."
assert mask.ndim == 3, "Mask must be [H, W, count]"
# If mask is empty, return line with image ID only
if mask.shape[-1] == 0:
return "{},".format(image_id)
# Remove mask overlaps
# Multiply each instance mask by its score order
# then take the maximum across the last dimension
order = np.argsort(scores)[::-1] + 1 # 1-based descending
mask = np.max(mask * np.reshape(order, [1, 1, -1]), -1)
# Loop over instance masks
lines = []
for o in order:
m = np.where(mask == o, 1, 0)
# Skip if empty
if m.sum() == 0.0:
continue
rle = rle_encode(m)
lines.append("{}, {}".format(image_id, rle))
return "\n".join(lines)
############################################################
# Detection
############################################################
def detect(model, dataset_dir, subset):
"""Run detection on images in the given directory."""
print("Running on {}".format(dataset_dir))
# Create directory
if not os.path.exists(RESULTS_DIR):
os.makedirs(RESULTS_DIR)
submit_dir = "submit_{:%Y%m%dT%H%M%S}".format(datetime.datetime.now())
submit_dir = os.path.join(RESULTS_DIR, submit_dir)
os.makedirs(submit_dir)
# Read dataset
dataset = FiberDataset()
dataset.load_fiber(dataset_dir, subset)
dataset.prepare()
# Load over images
submission = []
for image_id in dataset.image_ids:
# Load image and run detection
image = dataset.load_image(image_id)
# Detect objects
r = model.detect([image], verbose=0)[0]
# Encode image to RLE. Returns a string of multiple lines
source_id = dataset.image_info[image_id]["id"]
rle = mask_to_rle(source_id, r["masks"], r["scores"])
submission.append(rle)
# Save image with masks
visualize.display_instances(
image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names,
show_bbox=True, show_mask=True,
title="Predictions")
plt.savefig("{}/{}.png".format(submit_dir, dataset.image_info[image_id]["id"]))
# Save to csv file
submission = "ImageId,EncodedPixels\n" + "\n".join(submission)
file_path = os.path.join(submit_dir, "submit.csv")
with open(file_path, "w") as f:
f.write(submission)
print("Saved to ", submit_dir)
config = FiberInferenceConfig()
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=ROOT_DIR)
weights_path = model.find_last()
model.load_weights(weights_path, by_name=True)
subset = "val"
detect(model, DATASET_DIR, subset)
|
pannoi/cdn-dns-controller
|
e2e/steps/list_hosted_zones_step_implementation.py
|
from behave import given, when, then, step
import requests
api_endpoints = {}
request_headers = {}
response_codes ={}
response_texts={}
request_bodies = {}
api_url=None
@given(u'I set api url to list zones "{endpoint}"')
def step_impl(context, endpoint):
global api_url
api_url = endpoint
@given(u'I set GET posts api endpoint to list zones "{after_slash_url}"')
def step_impl(context, after_slash_url):
api_endpoints['GET_URL'] = api_url + after_slash_url
print('url: ' + api_endpoints['GET_URL'])
@when(u'I set HEADER param request content to list zones type as "{header_content_type}"')
def step_impl(context, header_content_type):
response = requests.get(url=api_endpoints['GET_URL'], headers=request_headers)
response_texts['GET'] = response.text
response_codes['GET'] = response.status_code
@then(u'I receive valid HTTP response code "{response_code}" for "{request_name}" to list zones')
def step_impl(context, response_code, request_name):
print('Get rep code for '+request_name+':'+ str(response_codes[request_name]))
assert response_codes[request_name] == int(response_code)
@then(u'Response BODY "{request_name}" is non-empty to list zones')
def step_impl(context, request_name):
print('request_name: '+request_name)
print(response_texts)
assert response_texts[request_name] is not None
|
pannoi/cdn-dns-controller
|
src/acm.py
|
<gh_stars>1-10
import boto3
import time
from src.environment import Environment
from src.route53 import Route53
class ACM():
""" Class to interact with AWS ACM to manage certificates. """
def __init__(self):
""" Class constructor. """
self.client = boto3.client(
'acm',
aws_access_key_id = Environment.aws_access_key,
aws_secret_access_key = Environment.aws_secret_key,
region_name = Environment.aws_region
)
def list_certificates(self):
""" Lists all certificates in ACM. """
return self.client.list_certificates(
MaxItems = 123
)
def get_certificate(self, certificate_arn):
"""
Lists certificate information by certificate ARN.
:param certificate_arn: unique certificate_arn provided by amazon
"""
return self.client.get_certificate(
CertificateArn = certificate_arn
)
def request_certificate(self, domain_name):
"""
Requests certificate from ACM.
:param domain_name: domain name for certificate signing
"""
response = self.client.request_certificate(
DomainName=domain_name,
ValidationMethod='DNS',
)
return response.get('CertificateArn')
def delete_certificate(self, certificate_arn):
"""
Deletes certificate from ACM by certificate ARN.
:param certificate_arn: unique certificate_arn provided by amazon
"""
return self.client.delete_certificate(
CertificateArn=certificate_arn
)
def get_domain_validation_records(self, certificate_arn):
"""
When certificate is created it needs to be verified by domain record.
Method returns this validation record which needs to be set in Route53.
:param certificate_arn: unique certificate_arn provided by amazon
"""
certificate_metadata = self.client.describe_certificate(
CertificateArn=certificate_arn
)
return certificate_metadata.get('Certificate', {}).get('DomainValidationOptions', [])
def get_resource_record_data(self, r):
"""
Parsing function for record_set dict.
:param r: record_set dictionary from method -> get_domain_validation_records()
"""
return (r.get('Type'), r.get('Name'), r.get('Value'))
def wait_for_certificate_validation(self, certificate_arn, sleep_time=5, timeout=300):
"""
Method to wait until certificate will be valid with timeout.
:param certificate_arn : unique certificate_arn provided by amazon
:param sleep_time : default 5 sec, checks certificate status every 5 seconds
:param timeout : maximum time to try certificate verification
"""
status = self.client.describe_certificate(CertificateArn=certificate_arn)['Certificate']['Status']
elapsed_time = 0
while status == 'PENDING_VALIDATION':
if elapsed_time > timeout:
raise Exception('Timeout ({}s) reached for certificate validation'.format(timeout))
time.sleep(sleep_time)
status = self.client.describe_certificate(CertificateArn=certificate_arn)['Certificate']['Status']
elapsed_time += 5
def create_dns_record(self, record, zone_id, comment="Created by cdn-dns-controller"):
"""
Function creates dns record to validate acm certificate.
:param record : record set which needs to be added to Route53
:param zone_id : hosted zone id where record should be created
"""
route53 = Route53()
record_type, record_name, record_value = self.get_resource_record_data(
record[0]['ResourceRecord']
)
return route53.change_resource_record_set(
zone_id=zone_id,
comment=comment,
action='UPSERT',
name=record_name,
type=record_type,
ttl=300,
target=record_value
)
|
pannoi/cdn-dns-controller
|
src/cloudfront.py
|
import boto3
from jinja2 import Environment as jEnv
from jinja2 import Template, FileSystemLoader
import time
from src.environment import Environment
from src.acm import ACM
from src.helpers import Helper
from src.route53 import Route53
class CloudFront():
""" Class describes CloudFront interface. """
def __init__(self):
""" Class constructor. """
self.client = boto3.client(
'cloudfront',
aws_access_key_id = Environment.aws_access_key,
aws_secret_access_key = Environment.aws_secret_key,
region_name = Environment.aws_region
)
def list_distirbutions(self):
""" Lists infromation about all CDN distribution. """
return self.client.list_distirbutions()
def get_distribution(self, distribution_id):
"""
Lists inforamtion about specific distribution by id.
:param distribution_id: Id of CDN distribution
"""
return self.client.get_distribution(
Id=distribution_id
)
def delete_distribution(self, distribution_id):
"""
Deletes CDN distribution
:param distribution_id: Id of CDN distribution
"""
acm = ACM()
result = self.client.delete_distribution(
Id=distribution_id
)
certificate_arn = result['DistributionConfig']['ViewerCertificate']['ACMCertificateArn']
self.wait_for_distribution_deletion(distribution_id=distribution_id)
acm.delete_certificate(certificate_arn=certificate_arn)
return result
def wait_for_distribution_deletion(self, distribution_id, sleep_time=5, timeout=600):
"""
Function waits until distribution will be disabled to delete it and remove certificate.
:param distribution_id : Id of CDN distribution
:param sleep_time : default 5 sec, checks certificate status every 5 seconds
:param timeout : maximum time to try certificate verification
"""
status = self.get_distribution(distribution_id)['DistributionConfig']['Status']
elapsed_time = 0
while status == 'InProgress':
if elapsed_time > timeout:
raise Exception('Timeout ({}s) reached for CDN distribution deletion'.format(timeout))
time.sleep(sleep_time)
status = self.get_distribution(distribution_id)['DistributionConfig']['Status']
elapsed_time += sleep_time
def create_distribution(self, comment, origin_id, domain_name, hosted_zone, endpoint):
"""
Function creates new CDN distribution.
:param comment : Comment to new distribution
:param origin_id : Origin_id of new distribution
:param domain_name : Domain Name which will be assigned to new distribution
:param hosted_zone : Hosted zone where should be record for cdn created
:param endpoint : Endpoint what should be mapped for CDN, ussualy ELB
"""
acm = ACM()
helpers = Helper()
certificate = acm.request_certificate(domain_name=domain_name)
time.sleep(5)
record = acm.get_domain_validation_records(certificate_arn=certificate)
acm.create_dns_record(record=record, zone_id=hosted_zone)
acm.wait_for_certificate_validation(certificate_arn=certificate)
caller_reference = helpers.get_random_string(13)
file_loader = FileSystemLoader('templates')
env = jEnv(loader=file_loader)
env.trim_blocks = True
template = env.get_template('cdn_distribution_default.j2')
output = template.render(
caller_reference=caller_reference,
comment=comment,
origin_id=origin_id,
domain_name=domain_name,
endpoint=endpoint,
certificate=certificate
)
new_cdn = self.client.create_distribution(
DistributionConfig=output
)
# Create record to route trafic via CDN
route53 = Route53()
route53.change_resource_record_alias(
zone_id=hosted_zone,
comment=comment,
action="UPSERT",
type='A',
hosted_zone=Environment.cdn_hosted_zone,
dns_name=new_cdn['Distribution']['DomainName'],
name=domain_name
)
return new_cdn
|
pannoi/cdn-dns-controller
|
tests/test_environment.py
|
<filename>tests/test_environment.py
import os
# mock .env
os.environ['AWS_ACCESS_KEY'] = "SUPER_SECRET_ID"
os.environ['AWS_SECRET_KEY'] = "SUPER_SECRET_KEY"
os.environ['AWS_REGION'] = "us-east-1"
os.environ['ROUTE53_DELEGATION_SET'] = "SOME_DELEGATION_SET"
os.environ['CDN_HOSTED_ZONE_ID'] = "CDN_HOSTED_ZONE_ID"
def test_aws_access_key():
aws_access_key = os.getenv('AWS_ACCESS_KEY', default=None)
if aws_access_key is None:
raise OSError("AWS_ACCESS_KEY is not set")
def test_aws_secret_key():
aws_secret_key = os.getenv('AWS_SECRET_KEY', default=None)
if aws_secret_key is None:
raise OSError("AWS_SECRET_KEY is not set")
def test_aws_region():
aws_region = os.getenv('AWS_REGION', default=None)
if aws_region is None:
raise OSError("AWS_REGION is not set")
def test_route53_delegation_set():
route53_delegation_set = os.getenv('ROUTE53_DELEGATION_SET', default=None)
if route53_delegation_set is None:
raise OSError("ROUTE53_DELEGATION_SET is not set")
def test_cdn_hosted_zone():
cdn_hosted_zone = os.getenv('CDN_HOSTED_ZONE_ID', default=None)
if cdn_hosted_zone is None:
raise OSError("CDN_HOSTED_ZONE_ID is not set")
|
pannoi/cdn-dns-controller
|
src/environment.py
|
import os
from dataclasses import dataclass
@dataclass(frozen=True)
class Environment:
""" Class describes .env variables. """
aws_access_key: str = os.getenv('AWS_ACCESS_KEY')
aws_secret_key: str = os.getenv('AWS_SECRET_KEY')
aws_region: str = os.getenv('AWS_REGION')
route53_delegation_set: str = os.getenv('ROUTE53_DELEGATION_SET')
cdn_hosted_zone: str = os.getenv('CDN_HOSTED_ZONE_ID')
|
pannoi/cdn-dns-controller
|
tests/test_jinja_templates.py
|
<filename>tests/test_jinja_templates.py
import jinja2
import pytest
def prerender(filename, context):
path = 'templates'
return jinja2.Environment(
loader=jinja2.FileSystemLoader(path)
).get_template(filename).render(context)
@pytest.mark.parametrize('comment, action, name, type, ttl, target', [
('some_comment', 'UPSERT', 'super_name', 'A', '300', '8.8.8.8'),
('cool_comment', 'UPSERT', 'ultra_name', 'CNAME', '60', 'google.com')
])
def test_record_set_template(comment, action, name, type, ttl, target):
filename = 'resource_record_set.j2'
context = {
'comment': comment,
'action': action,
'name': name,
'type': type,
'ttl': ttl,
'target': target
}
rendered = prerender(filename, context)
for val in context.values():
assert val in rendered
@pytest.mark.parametrize('comment, action, evaluate_health, dns_name, hosted_zone, name, type',[
('test_alias', 'UPSERT', 'False', 'example.com', 'ID123', 'example.com', 'A'),
])
def test_record_alias_template(comment, action, evaluate_health, dns_name, hosted_zone, name, type):
filename = 'resource_record_alias.j2'
context = {
'comment': comment,
'action': action,
'evaluate_health': evaluate_health,
'dns_name': dns_name,
'hosted_zone': hosted_zone,
'name': name,
'type': type
}
rendered = prerender(filename, context)
for val in context.values():
assert val in rendered
@pytest.mark.parametrize('cal_ref, comment, origin_id, domain_name, endpoint, certificate', [
('123321', 'comment', 'example.com', 'example.com', 'google.com', 'arn://test.amazonaws.com')
])
def test_cdn_default_template(cal_ref, comment, origin_id, domain_name, endpoint, certificate):
filename = 'cdn_distribution_default.j2'
context = {
'caller_reference': cal_ref,
'comment': comment,
'origin_id': origin_id,
'domain_name': domain_name,
'endpoint': endpoint,
'certificate': certificate
}
rendered = prerender(filename, context)
for val in context.values():
assert val in rendered
|
pannoi/cdn-dns-controller
|
src/route53.py
|
import boto3
from jinja2 import Environment as jEnv
from jinja2 import Template, FileSystemLoader
from src.environment import Environment
from src.helpers import Helper
class Route53():
""" Class describes Route53 interface. """
def __init__(self):
""" Class constructor. """
self.client = boto3.client(
'route53',
aws_access_key_id = Environment.aws_access_key,
aws_secret_access_key = Environment.aws_secret_key
)
def list_hosted_zones(self):
""" Function lists all hosted zones in Route53. """
return self.client.list_hosted_zones()
def get_hosted_zone(self, zone_id):
"""
Function return the hosted zone information
:param zone_id: Id of hosted zone to GET record sets.
"""
return self.client.get_hosted_zone(
Id=zone_id
)
def create_hosted_zone(self, domain_name, comment="", is_private=False):
"""
Function creates new hosted zone under Route53 domain
:param domain_name: Domain name = hosted zone name
"""
helpers = Helper()
return self.client.create_hosted_zone(
Name = domain_name,
CallerReference = helpers.get_random_string(13),
HostedZoneConfig={
'Comment': comment,
'PrivateZone': is_private
},
DelegationSetId = Environment.route53_delegation_set
)
def change_resource_record_set(self, zone_id, comment, action, name , type, ttl, target):
"""
Function is needed to CREATE/MODIFY/DELETE resource record sets udner route53 hosted zone.
:param zone_id : Target hosted zone id where record will be changed
:param comment : Comment for resource record set
:param action : CREATE/UPSERT/DELETE
:param name : Name of resource record set which will changed
:param type : 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'
:param ttl : TTL
:param target : Endpoint to map record
"""
file_loader = FileSystemLoader('templates')
env = jEnv(loader=file_loader)
env.trim_blocks = True
template = env.get_template('resource_record_set.j2')
output = template.render(
comment=comment,
action=action,
name=name,
type=type,
ttl=ttl,
target=target
)
return self.client.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch=output
)
def change_resource_record_alias(self, zone_id, comment, action, type, hosted_zone, dns_name, name):
"""
Function is needed to CREATE/MODIFY/DELETE recourd aliases udner route53 hosted zone.
:param zone_id : Target hosted zone id where alias will be changed
:param comment : Comment for alias record
:param action : CREATE/UPSERT/DELETE
:param type : 'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'
:param hosted_zone : Hosted zone for alias -> where is the other resource located f.e. CDN
:param dns_name : Dns name for alias creation
:param name : Alias record name which will be created
"""
file_loader = FileSystemLoader('templates')
env = jEnv(loader=file_loader)
env.trim_blocks = True
template = env.get_template('resource_record_alias.j2')
output = template.render(
comment=comment,
action=action,
evaluate_health='False',
dns_name=dns_name,
hosted_zone=hosted_zone,
name=name,
type=type
)
return self.client.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch=output
)
def delete_hosted_zone(self, zone_id, force=False):
"""
Function delete specified hosted zone under Route53 domain.
:param zone_id : Hosted zone which should be deleted
:param force : Recursively delete all records in hosted zone
"""
if force:
self.recurse_record_deletion(zone_id=zone_id)
return self.client.delete_hosted_zone(
Id=zone_id
)
def get_records(self, zone_id):
"""
Lists all records under specific hosted_zone.
:param zone_id : Id of specific hosted zone
"""
return self.client.list_resource_record_sets(
HostedZoneId=zone_id
)
def recurse_record_deletion(self, zone_id):
"""
Recursevly deletes all records in hosted zone
:param zone_id : Id of zone where records should be deleted
"""
records = self.get_records(zone_id=zone_id)
records = records['ResourceRecordSets']
deletion_comment = "Deleted by cdn-dns-controller"
for r in records:
if 'AliasTarget' in r:
self.change_resource_record_alias(
zone_id=zone_id,
comment=deletion_comment,
action="DELETE",
hosted_zone=r['AliasTarget']['HostedZoneId'],
dns_name=r['AliasTarget']['DNSName'],
name=r['Name'],
type=r['Type']
)
else:
if r['Type'] != 'SOA' and r['Type'] != 'NS':
self.change_resource_record_set(
zone_id=zone_id,
comment=deletion_comment,
action="DELETE",
name=r['Name'],
type=r['Type'],
ttl=r['TTL'],
target=r['ResourceRecords'][0]['Value']
)
|
pannoi/cdn-dns-controller
|
main.py
|
<reponame>pannoi/cdn-dns-controller
from flask import Flask, jsonify
from flask import make_response
from flask import request
import logging
from src.route53 import Route53
from src.cloudfront import CloudFront
app = Flask(__name__)
# Route53 routes
@app.route('/zones/', methods=['GET'])
def list_hosted_zones():
""" Function lists all hosted zones in Route53. """
route53 = Route53()
return route53.list_hosted_zones()
@app.route('/zones/<string:zone_id>', methods=['GET'])
def get_hosted_zone(zone_id):
"""
Function return the hosted zone information
:param zone_id: Id of hosted zone to GET record sets.
"""
route53 = Route53()
return route53.get_hosted_zone(zone_id=zone_id)
@app.route('/zones/', methods=['POST'])
def create_hosted_zone():
""" Function creates new hosted zone under Route53 domain. """
route53 = Route53()
data = request.get_json()
hz_name = data['Name']
comment = data['Comment'] if data['Comment'] else ""
is_private = data['Private'] if data['Private'] else False
return route53.create_hosted_zone(domain_name=hz_name, comment=comment, is_private=is_private)
@app.route('/zones/<string:zone_id>', methods=['POST'])
def change_resource_record(self, zone_id):
"""
Funtion changes resources record set in specified hosted zone.
:param zone_id: Id of targetd hosted zone
"""
route53 = Route53()
data = request.get_json()
if data['RerordType'] == 'Alias':
return route53.change_resource_record_alias(
zone_id=zone_id,
comment=data['Comment'],
action=data['Action'],
type=data['Type'],
hosted_zone=data['HostedZone'],
dns_name=data['DnsName'],
name=data['Name']
)
elif data['RecordType'] == 'Set':
return route53.change_resource_record_set(
zone_id=zone_id,
comment=data['Comment'],
action=data['Action'],
name=data['Name'],
type=data['Type'],
ttl=data['TTL'],
target=data['Target']
)
else:
return make_response(jsonify({'error': 'Bad Request: RecordType not found, should be "Set" or "Alias"'}), 400)
@app.route('/zones/<string:zone_id>', methods=['DELETE'])
def delete_zone(zone_id):
""" Deletes hosted zone. """
route53 = Route53()
data = request.get.json()
force = False
if 'force' in data:
force = True
return route53.delete_hosted_zone(zone_id, force=force)
# CloudFront routes
@app.route('/distributions/', methods=['GET'])
def list_distributions():
""" Lists infromation about all CDN distribution. """
cloudfront = CloudFront()
return cloudfront.list_distirbutions()
@app.route('/distributions/<string:distribution_id>', methods=['GET'])
def get_distribution(distribution_id):
"""
Lists inforamtion about specific distribution by id.
:param distribution_id: Id of CDN distribution
"""
cloudfront = CloudFront()
return cloudfront.get_distribution(distribution_id=distribution_id)
@app.route('/distributions/', methods=['POST'])
def create_distribution():
""" Creates new CDN distribution. """
cloudfront = CloudFront()
data = request.get_json()
return cloudfront.create_distribution(
comment=data['Comment'],
origin_id=data['OriginId'],
domain_name=data['DomainName'],
hosted_zone=data['HostedZone'],
endpoint=data['Endpoint']
)
@app.route('/distributions/<string:distribution_id>', methods=['DELETE'])
def delete_distribution(distribution_id):
"""
Deletes CDN distribution
:param distribution_id: Id of CDN distribution
"""
cloudfront = CloudFront()
return cloudfront.delete_distribution(distribution_id=distribution_id)
@app.errorhandler(404)
def not_found():
""" If route is not defined on backend -> return 404. """
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == "__main__":
app.run(host='0.0.0.0')
app.logging.info('cdn-dns-controller is ready to use')
|
japinol7/api_examples
|
api_01_ex/app/tests/test_app.py
|
<filename>api_01_ex/app/tests/test_app.py
import json
import pytest
from app.app import animes, ANIME_NOT_FOUND_MSG_ERROR
ANIMES_MAX_ID_BEFORE_TESTS = 63
ANIME_NOT_EXISTING_ID = 111111
def test_list_anime_ids_response(app_client_get_root):
response = app_client_get_root
assert response.status_code == 200
def test_list_anime_ids_count(app_client_get_root):
response = app_client_get_root
json_resp = response.json()
animes_count = len(animes)
assert len(json_resp) == animes_count
def test_list_anime_ids_expected(app_client_get_root):
response = app_client_get_root
json_resp = response.json()
expected = [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
39, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63]
assert json_resp == expected
def test_list_animes_response(app_client_get_all):
response = app_client_get_all
assert response.status_code == 200
def test_list_animes_count(app_client_get_all):
response = app_client_get_all
json_resp = response.json()
animes_count = len(animes)
assert len(json_resp) == animes_count
def test_list_animes_first_anime(app_client_get_all):
response = app_client_get_all
json_resp = response.json()
expected = {'id': 12,
'title': 'Detective Conan',
'year': 1996,
'episodes': 0,
'status': 'CURRENTLY',
'type': 'TV',
'animeSeason': {'season': 'WINTER', 'year': 1996},
'picture': 'https://cdn.myanimelist.net/images/anime/7/75199.jpg',
'sources': ['https://anidb.net/anime/266', 'https://anilist.co/anime/235', 'https://kitsu.io/anime/210', 'https://myanimelist.net/anime/235', 'https://notify.moe/anime/lWnhcKiig']
}
assert json_resp[0] == expected
def test_list_animes_last_anime(app_client_get_all, app_client_get_last_expected):
response = app_client_get_all
json_resp = response.json()
assert json_resp[-1] == app_client_get_last_expected
def test_create_anime_count(animes_count_before_tests, app_client_post):
response = app_client_post
assert response.status_code == 201
assert len(animes) == animes_count_before_tests + 1
def test_get_anime(app_client_get, app_client_get_last_expected):
response = app_client_get(ANIMES_MAX_ID_BEFORE_TESTS)
assert response.status_code == 200
assert response.json() == app_client_get_last_expected
def test_get_anime_not_found(app_client_get):
response = app_client_get(ANIME_NOT_EXISTING_ID)
assert response.status_code == 404
assert response.json() == {'error': ANIME_NOT_FOUND_MSG_ERROR}
def test_create_anime_expected(app_client_post, client_test):
response = app_client_post
expected = {
'id': ANIMES_MAX_ID_BEFORE_TESTS + 1,
'title': 'Fake Detective Conan',
'year': 1995,
'episodes': 12,
'status': 'CURRENTLY',
'type': 'TV',
'animeSeason': {'season': 'fake_season', 'year': 1995},
'picture': 'fake_picture',
'sources': ['fake_source_01', 'fake_source_02']
}
assert response.json() == expected
response = client_test.get(f'/{ANIMES_MAX_ID_BEFORE_TESTS + 1}/')
assert response.json() == expected
def test_create_anime_expected_second(app_client_post_second, client_test):
response = app_client_post_second
expected = {
'id': ANIMES_MAX_ID_BEFORE_TESTS + 2,
'title': 'Fake 2 Detective Conan',
'year': 1999,
'episodes': 24,
'status': 'CURRENTLY',
'type': 'TV',
'animeSeason': {'season': 'fake_season', 'year': 1999},
'picture': 'fake_2_picture',
'sources': ['fake_2_source_01', 'fake_2_source_02']
}
assert response.json() == expected
response = client_test.get(f'/{ANIMES_MAX_ID_BEFORE_TESTS + 2}/')
assert response.json() == expected
def test_update_anime(client_test):
anime_update = {
'title': 'Changed Detective Conan',
'year': 2020,
'episodes': 1024,
'status': 'CURRENTLY',
'type': 'TV',
'animeSeason': {'season': 'updated_season', 'year': 1024},
'picture': 'updated_2_picture',
'sources': ['updated_2_source_01', 'updated_2_source_02']
}
response = client_test.put(f'/{ANIMES_MAX_ID_BEFORE_TESTS}/', json=json.loads(json.dumps(anime_update)))
expected = anime_update.copy()
expected.update({
'id': ANIMES_MAX_ID_BEFORE_TESTS,
})
assert response.json() == expected
response = client_test.get(f'/{ANIMES_MAX_ID_BEFORE_TESTS}/')
assert response.json() == expected
def test_update_anime_not_found(client_test):
anime_update = {
'title': 'Changed Detective Conan',
'year': 2020,
'episodes': 1080,
'status': 'CURRENTLY',
'type': 'TV',
'animeSeason': {'season': 'updated_season', 'year': 1024},
'picture': 'updated_2_picture',
'sources': ['updated_2_source_01', 'updated_2_source_02']
}
response = client_test.put(f'/{ANIME_NOT_EXISTING_ID}/', json=json.loads(json.dumps(anime_update)))
assert response.status_code == 404
assert response.json() == {'error': ANIME_NOT_FOUND_MSG_ERROR}
def test_update_anime_validation(client_test):
anime_update = {
'title': 'Changed Detective Conan',
'year': 1520,
'episodes': "1024",
'status': 'CURRENTLY',
'type': 'TV',
'animeSeason': [{'season': 'updated_season', 'year': 1024}],
'picture': 20,
'sources': 'updated_2_source_01'
}
response = client_test.put(f'/{ANIMES_MAX_ID_BEFORE_TESTS}/', json=json.loads(json.dumps(anime_update)))
expected = {
'animeSeason': 'Must be an object.',
'picture': 'Must be a string.',
'sources': 'Must be an array.',
'year': 'Must be greater than or equal to 1900.'
}
assert response.json() == expected
@pytest.mark.parametrize('anime_id', [20, 63])
def test_delete_anime(anime_id, client_test):
animes_count_before = len(animes)
response = client_test.delete(f'/{anime_id}/')
assert response.status_code == 204
response = client_test.get(f'/{anime_id}/')
assert response.status_code == 404
assert len(animes) == animes_count_before - 1
|
japinol7/api_examples
|
api_01_ex/app/app.py
|
<filename>api_01_ex/app/app.py
import json
import logging
import os
from apistar import App, Route, types, validators
from apistar.http import JSONResponse
logging.basicConfig(format='%(asctime)s %(levelname)s %(name)s: %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ANIME_TITLE_WANTED = (
'detective conan',
)
ANIME_TITLE_NOT_WANTED = (
)
ANIME_TYPE_EXCLUDED_OLD = (
'Movie',
'Special',
'OVA',
'ONA',
)
ANIME_TYPE_EXCLUDED = (
'Special',
)
ANIME_COLUMNS_WANTED = (
'id',
'title',
'year',
'type',
'animeSeason',
'episodes',
'picture',
'sources',
'status'
)
def is_a_wanted_anime(anime, allow_missing_year=False):
if not allow_missing_year and not anime['year']:
return False
if anime['type'] in ANIME_TYPE_EXCLUDED:
return False
for item in ANIME_TITLE_NOT_WANTED:
if item in anime['title'].lower():
return False
for item in ANIME_TITLE_WANTED:
if item in anime['title'].lower():
return True
return False
def get_wanted_columns(anime):
return {k: v for k, v in anime.items() if k in ANIME_COLUMNS_WANTED}
def load_anime_data():
file_name = os.path.join('res', 'data', 'anime-offline-database.json')
animes = {}
with open(file_name, encoding='UTF8') as fin:
data = json.load(fin)
for idx, row in enumerate(data['data']):
row.update({
'id': idx,
'year': row['animeSeason'].get('year', 0) or 0,
})
if not is_a_wanted_anime(row):
continue
row = get_wanted_columns(row)
animes[idx] = row
return animes
animes = load_anime_data()
ANIME_VALID_STATUS = set([anime["status"] for anime in animes.values()])
ANIME_NOT_FOUND_MSG_ERROR = 'Anime not found'
ANIME_NOT_FOUND_LOG_ERROR = "Anime not found. Anime id: %s"
class Anime(types.Type):
id = validators.Integer(allow_null=True)
title = validators.String(max_length=2500)
year = validators.Integer(minimum=1900, maximum=2050)
episodes = validators.Integer(maximum=9999)
status = validators.String(enum=list(ANIME_VALID_STATUS))
type = validators.String(allow_null=True)
animeSeason = validators.Object(allow_null=True)
picture = validators.String(allow_null=True)
sources = validators.Array(allow_null=True)
def list_anime_ids():
logger.info(f'Get a list of all anime ids.')
return [anime for anime in animes]
def list_animes():
logger.info(f'Get all animes.')
return [Anime(anime[1]) for anime in sorted(animes.items())]
def get_anime(anime_id):
logger.info(f'Get anime with id: {anime_id}')
try:
anime_id = int(anime_id)
except ValueError:
logger.error(f'Value error: Anime id is not an integer: {anime_id}')
anime = animes.get(anime_id)
if not anime:
logger.info(ANIME_NOT_FOUND_LOG_ERROR % anime_id)
error = {'error': ANIME_NOT_FOUND_MSG_ERROR}
return JSONResponse(error, status_code=404)
return JSONResponse(Anime(anime), status_code=200)
def update_anime(anime_id, anime: Anime):
logger.info(f'Update anime with id: {anime_id}')
anime_id = int(anime_id)
if not animes.get(anime_id):
logger.info("Update Error. " + ANIME_NOT_FOUND_LOG_ERROR % anime_id)
error = {'error': ANIME_NOT_FOUND_MSG_ERROR}
return JSONResponse(error, status_code=404)
anime.id = anime_id
animes[anime_id] = dict(anime)
return JSONResponse(Anime(anime), status_code=200)
def create_anime(anime: Anime):
logger.info(f'Create anime with title: {anime.title}')
anime_id = max(animes.keys()) + 1
anime.id = anime_id
animes[anime_id] = dict(anime)
return JSONResponse(Anime(anime), status_code=201)
def delete_anime(anime_id):
logger.info(f'Delete anime with id: {anime_id}')
anime_id = int(anime_id)
if not animes.get(anime_id):
logger.info("Delete Error. " + ANIME_NOT_FOUND_LOG_ERROR % anime_id)
error = {'error': ANIME_NOT_FOUND_MSG_ERROR}
return JSONResponse(error, status_code=404)
del animes[anime_id]
return JSONResponse({}, status_code=204)
routes = [
Route('/', method='GET', handler=list_anime_ids),
Route('/all', method='GET', handler=list_animes),
Route('/', method='POST', handler=create_anime),
Route('/{anime_id}/', method='GET', handler=get_anime),
Route('/{anime_id}/', method='PUT', handler=update_anime),
Route('/{anime_id}/', method='DELETE', handler=delete_anime),
]
app = App(routes=routes)
if __name__ == '__main__':
app.serve('127.0.0.1', 5000, debug=True)
|
japinol7/api_examples
|
api_01_ex/app/tests/conftest.py
|
"""Define some fixtures to use in the project."""
import pytest
import json
from apistar import test
from app.app import app, animes
client = test.TestClient(app)
@pytest.fixture(scope='session')
def client_test():
return client
@pytest.fixture(scope='session')
def animes_count_before_tests():
return len(animes)
@pytest.fixture(scope='session')
def app_client_get_root():
response = client.get('/')
return response
@pytest.fixture(scope='session')
def app_client_get_all():
response = client.get('/all')
return response
@pytest.fixture(scope='session')
def app_client_post():
anime_to_add = {
'title': 'Fake Detective Conan',
'year': 1995,
'episodes': 12,
'status': 'CURRENTLY',
'type': 'TV',
'animeSeason': {'season': 'fake_season', 'year': 1995},
'picture': 'fake_picture',
'sources': ['fake_source_01', 'fake_source_02']
}
response = client.post('/', json=json.loads(json.dumps(anime_to_add)))
return response
@pytest.fixture(scope='session')
def app_client_post_second():
anime_to_add = {
'title': 'Fake 2 Detective Conan',
'year': 1999,
'episodes': 24,
'status': 'CURRENTLY',
'type': 'TV',
'animeSeason': {'season': 'fake_season', 'year': 1999},
'picture': 'fake_2_picture',
'sources': ['fake_2_source_01', 'fake_2_source_02']
}
response = client.post('/', json=json.loads(json.dumps(anime_to_add)))
return response
@pytest.fixture(scope='session')
def app_client_get():
def make_app_client_get(anime_id):
response = client.get(f'/{anime_id}/')
return response
return make_app_client_get
@pytest.fixture(scope='session')
def app_client_get_last_expected():
return {'id': 63,
'title': 'Detective Conan Movie 4: Captured In her Eyes',
'year': 2000, 'episodes': 1,
'status': 'UNKNOWN',
'type': 'Movie',
'animeSeason': {'season': 'UNDEFINED', 'year': 2000},
'picture': 'https://anime-planet.com/images/anime/covers/detective-conan-movie-4-captured-in-her-eyes-1489.jpg',
'sources': ['https://anime-planet.com/anime/detective-conan-movie-4-captured-in-her-eyes']}
|
carolmb/political
|
distance.py
|
import xnet
import glob
import math
import matplotlib
import concurrent.futures
import numpy as np
import matplotlib.pyplot as plt
from igraph import *
from collections import defaultdict
from util import get_valid_pp
from util import filter_pp_name
def calculate_dist(filenames):
for filename in filenames:
# print(filename)
net = xnet.xnet2igraph(filename)
weights = net.es['weight']
weights = [math.sqrt(2*(1-w)) for w in weights]
if len(weights) > 0:
net.es['distance'] = weights
xnet.igraph2xnet(net,filename[:-5]+"_dist.xnet")
else:
print('error',filename)
def to_sort(dates,nets):
dates = np.asarray(dates)
nets = np.asarray(nets)
sorted_idxs = np.argsort(dates)
dates = dates[sorted_idxs]
nets = nets[sorted_idxs]
return dates,nets
# Utilidades
def get_freqs(summaries,dates):
ys = defaultdict(lambda:defaultdict(lambda:[]))
freq_dict = defaultdict(lambda:[])
for d in dates:
year_summary = summaries[d]
for pp1,summary_pp1 in year_summary.items():
if summary_pp1:
for pp2,(mean,std,f) in summary_pp1.items():
ys[pp1][pp2].append((d,mean,std,f))
freq_dict[pp2].append(f)
freq = [(np.nanmean(fs),pp) for pp,fs in freq_dict.items()]
freq = sorted(freq,reverse=True)
i = 0
f_max = freq[i][0]
while np.isnan(freq[i][0]):
i+= 1
f_max = freq[i][0]
return ys,freq,f_max
def plot_metric(to_plot,interval_colors,color,output_fname,metric_name,is_custom_labels,is_bg):
plt.figure(figsize=(12,3))
xs2 = []
print(output_fname)
for pp1,(means,total_std,fraq,xs) in to_plot.items():
if len(xs) > len(xs2):
xs2 = xs
fraq = max(fraq,0.45)
# elw = max(0.3,2*fraq)
# lw = max(0.3,2*fraq)
# ms = max(0.3,2*fraq)
plt.errorbar(xs,means,total_std,
linestyle='-',label=pp1.upper(),fmt='o',elinewidth=1.5*fraq,
linewidth=2*fraq,markersize=2*fraq,
alpha=max(0.6,fraq),color=color[pp1])
delta = 12
if is_custom_labels:
delta = 1
labels = [str(int(x)) if i%delta == 0 else '' for i,x in enumerate(xs2)]
xpos = np.arange(min(xs2), max(xs2)+1/delta, 1/delta)
plt.xticks(xpos,labels=labels,rotation=35)
if is_bg:
for begin,delta,color in interval_colors:
if begin+delta >= xs2[0] and begin <= xs2[-1]:
plt.axvspan(max(begin,xs2[0]), min(begin+delta,xs2[-1]), facecolor=color, alpha=0.3)
plt.axvline(max(begin,xs2[0]),color='#2e2e2e',linestyle='--',alpha=0.5)
plt.legend(loc='upper right',bbox_to_anchor=(1.05, 1.0))
plt.xlabel('year')
plt.ylabel(metric_name)
plt.savefig(output_fname+'.pdf',format='pdf',bbox_inches="tight")
plt.clf()
# Menores caminhos
def calculate_shortest_paths(net,pps):
summary = defaultdict(lambda:defaultdict(lambda:0))
all_paths = []
for pp1 in pps:
sources = net.vs.select(political_party_eq=pp1)
for pp2 in pps:
# print('current pps:',pp1,pp2)
targets = net.vs.select(political_party_eq=pp2)
targets = [v.index for v in targets]
paths = []
# for s in sources:
# for t in targets:
# print(net.shortest_paths_dijkstra(source=s,target=t,weights='distance')[0],end=',')
for s in sources:
path_lens = net.get_shortest_paths(s,to=targets,weights='distance',output="epath")
for p in path_lens:
x = sum(net.es[idx]['distance'] for idx in p)
# print(x,end=',')
if x > 0:
paths.append(x)
all_paths.append(x)
if len(paths) == 0:
summary[pp1][pp2] = (np.nan,np.nan,np.nan)
summary[pp2][pp1] = (np.nan,np.nan,np.nan)
else:
mean = np.mean(paths)
std_dev = np.std(paths)
summary[pp1][pp2] = (mean,std_dev,len(targets))
summary[pp2][pp1] = (mean,std_dev,len(sources))
if pp1 == pp2:
break
all_paths_mean = np.mean(all_paths)
all_paths_std = np.std(all_paths)
return summary,(all_paths_mean,all_paths_std)
def shortest_path_by_pp(freq,pp2_means,f_max):
to_plot = dict()
for f,pp2 in freq:
means_std = pp2_means[pp2]
means_std = np.asarray(means_std)
means = means_std[:,1]
std = means_std[:,2]
xs = means_std[:,0]
fraq = f/f_max
if not np.isnan(means).all():
to_plot[pp2] = (means,std,fraq,xs)
return to_plot
def plot_shortest_paths(dates,nets,valid_pps,interval_colors,color,header,is_custom_labels,is_bg):
summaries = dict()
all_paths_summary = []
for date,net in zip(dates,nets):
summaries[date],all_paths = calculate_shortest_paths(net,valid_pps)
all_paths_summary.append(all_paths)
all_paths_summary = np.asarray(all_paths_summary)
ys,_,_ = get_freqs(summaries,dates)
for pp1,pp2_means in ys.items():
if not pp1 in valid_pps:
continue
freq = []
for pp2,means_std in pp2_means.items():
means_std = np.array(means_std)
freq.append((np.nanmean(means_std[:,3]),pp2))
freq = sorted(freq,reverse=True)
f_max = freq[0][0]
to_plot = shortest_path_by_pp(freq,pp2_means,f_max)
to_plot['all'] = (all_paths_summary[:,0], all_paths_summary[:,1],0.3,dates)
plot_metric(to_plot,interval_colors,color,header+pp1,'average shortest path len',is_custom_labels,is_bg)
def plot_shortest_paths_all_years(dates,nets,valid_pps,interval_colors,color,is_bg):
header = 'shortest_path_'
plot_shortest_paths(dates,nets,valid_pps,interval_colors,color,header,True,is_bg)
def plot_shortest_paths_mandate(dates,nets,year,valid_pps,interval_colors,color,is_bg):
idxs = [idx for idx,date in enumerate(dates) if date < year+4 and date >= year]
current_dates = [dates[idx] for idx in idxs]
current_nets = [nets[idx] for idx in idxs]
header = 'shortest_path_' + str(year) + '_' + str(year+3) + '_'
plot_shortest_paths(current_dates,current_nets,valid_pps,interval_colors,color,header,False,is_bg)
# Isolamento/Fragmentação
def fragmentation_to_plot(summaries,dates):
to_plot = dict()
ys,freq,f_max = get_freqs(summaries,dates)
fragmentation = dict()
for f,pp1 in freq:
pp2_means = ys[pp1]
means = np.zeros(len(pp2_means[pp1]))
xs = []
for pp2,means_std in pp2_means.items():
if pp1 == pp2:
means_std = np.array(means_std)
means = means_std[:,1]
std = means_std[:,2]
xs = means_std[:,0]
break
fraq = f/f_max
fraq = max(fraq,0.45)
if np.isnan(fraq) or np.isnan(means).all():
continue
to_plot[pp1] = (means,std,fraq,xs)
return to_plot
def isolation_to_plot(summaries,dates):
to_plot = dict()
ys,freq,f_max = get_freqs(summaries,dates)
# if np.isnan(f_max):
# return None,None
for f,pp1 in freq:
pp2_means = ys[pp1]
# if pp1 == 'psl':
# print(pp2_means)
means = np.zeros(len(pp2_means[pp1]))
total_std = np.zeros(len(pp2_means[pp1]))
total = np.zeros(len(pp2_means[pp1]))
xs = []
for pp2,means_std in pp2_means.items():
if not pp1 == pp2:
means_std = np.array(means_std)
means_std[np.isnan(means_std)]=0
if not np.isnan(means_std).any():
xs = means_std[:,0]
t = means_std[:,3]
std = means_std[:,2]
total += t
means += means_std[:,1]*t
total_std += std*t
means /= total
total_std /= total
fraq = f/f_max
fraq = max(fraq,0.45)
if np.isnan(fraq) or np.isnan(means).all():
continue
to_plot[pp1] = (means,total_std,fraq,xs)
return to_plot
def plot_metric_all_years(dates,nets,metric_to_plot,valid_pps,pps_color,metric_name,is_bg):
summaries = dict()
for d,n in zip(dates,nets):
summaries[d],all_paths = calculate_shortest_paths(n,valid_pps)
output_fname = metric_name + '_' + str(min(dates))+'_'+str(max(dates))
to_plot = metric_to_plot(summaries,dates)
metric = {pp1:(means,total_std) for pp1,(means,total_std,_,_) in to_plot.items()}
plot_metric(to_plot,interval_colors,pps_color,output_fname,metric_name,True,is_bg)
return metric,dates
def plot_metric_mandate(dates,nets,metric_to_plot,year,valid_pps,pps_color,metric_name,is_bg,delta=4):
summaries = dict()
idxs = [idx for idx,date in enumerate(dates) if date < year+delta and date >= year]
current_dates = [dates[idx] for idx in idxs]
current_nets = [nets[idx] for idx in idxs]
for d,n in zip(current_dates,current_nets):
summaries[d],all_paths = calculate_shortest_paths(n,valid_pps)
output_fname = metric_name + '_' + str(int(min(current_dates)))+'_'+str(int(max(current_dates)))
to_plot = metric_to_plot(summaries,current_dates)
metric = {pp1:(means,total_std) for pp1,(means,total_std,_,_) in to_plot.items()}
plot_metric(to_plot,interval_colors,pps_color,output_fname,metric_name,False,is_bg)
return metric,current_dates
if __name__ == '__main__':
##############################################################
# READ INPUT
##############################################################
source_by_year = 'data/1991-2019/by_year/dep_*_obstr_0.8_leidenalg'
source_by_mandate = 'data/1991-2019/mandate/dep_*_0.8'
# Called only once
source = 'data/1991-2019/by_year/dep_*_obstr_0.8_leidenalg'
filenames = glob.glob(source+'.xnet')
calculate_dist(filenames)
filenames_by_year = sorted(glob.glob(source_by_year+'_dist.xnet'))
filenames_by_mandate = sorted(glob.glob(source_by_mandate+'_dist.xnet'))
dates_by_year, dates_by_mandate = [],[]
nets_by_year, nets_by_mandate = [],[]
for filename in filenames_by_year:
net = xnet.xnet2igraph(filename)
net.vs['political_party'] = [filter_pp_name(p) for p in net.vs['political_party']]
nets_by_year.append(net.components().giant())
date = int(filename.split('dep_')[1].split('_')[0])
dates_by_year.append(date)
for filename in filenames_by_mandate:
net = xnet.xnet2igraph(filename)
net.vs['political_party'] = [filter_pp_name(p) for p in net.vs['political_party']]
nets_by_mandate.append(net.components().giant())
# por ano
date = int(filename.split('dep_')[1].split('_')[0])
date += float(filename.split('dep_')[1].split('_')[1])/12
dates_by_mandate.append(date)
dates_by_year,nets_by_year = to_sort(dates_by_year,nets_by_year)
dates_by_mandate,nets_by_mandate = to_sort(dates_by_mandate,nets_by_mandate)
##############################################################
# VALID POLITICAL PARTIES
##############################################################
# valid_pps = list(get_valid_pp(nets_by_year,1990,1,cut_percent=0.06))
# valid_pps = ['psdb', 'pp', 'pmdb', 'pt', 'dem', 'pl', 'ptb', 'psb', 'pr']
# valid_pps = sorted(valid_pps)
# valid_pps = ['psdb', 'pp', 'pmdb', 'pt', 'dem', 'pdt', 'psb', 'psl', 'ptb', 'prb', 'pl']
valid_pps = ['psdb', 'pp', 'pmdb', 'pt', 'dem']#,'psl']
colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] + ['magenta','navy','violet','teal']
pps_color = dict()
for pp,c in zip(valid_pps,colors):
pps_color[pp] = c
pps_color['all'] = 'cyan'
interval_colors = [(1992.95,2.05,pps_color['pmdb']),(1995,8,pps_color['psdb']),
(2003,13.4,pps_color['pt']),(2016.4,0.26,'#757373'),(2016.66,2.34,pps_color['pmdb'])]
# ,
# (2019,1,pps_color['psl'])] # psl
govs = [('FHC',(1995.01,2003)),('Lula',(2003.01,2011)),('Dilma',(2011.01,2016.4)),('Temer',(2016.4,2019)),('Bolsonaro',(2019.1,2020))]
gov_map = {'FHC':'psdb','Lula':'pt','Dilma':'pt','Temer':'pmdb','Bolsonaro':'psl'}
##############################################################
# PLOT SHORTEST PATHS
##############################################################
# todos os anos
plot_shortest_paths_all_years(dates_by_year,nets_by_year,valid_pps,interval_colors,pps_color,True)
# por mandato
for year in range(2016,2020,4):
plot_shortest_paths_mandate(dates_by_mandate,nets_by_mandate,year,valid_pps,interval_colors,pps_color,False)
##############################################################
# ISOLATION/FRAGMENTATION
##############################################################
# Código para dados em intervalos de anos:
plot_metric_all_years(dates_by_year,nets_by_year,isolation_to_plot,valid_pps,pps_color,'isolation',True)
plot_metric_all_years(dates_by_year,nets_by_year,fragmentation_to_plot,valid_pps,pps_color,'fragmentation',True)
# Código para dados em intervalos de meses:
plot_metric_mandate(dates_by_mandate,nets_by_mandate,fragmentation_to_plot,2015,valid_pps,pps_color,'fragmentation',True,5)
plot_metric_mandate(dates_by_mandate,nets_by_mandate,isolation_to_plot,2015,valid_pps,pps_color,'isolation',True,5)
##############################################################
# ZOOM 2015 - 2020
##############################################################
total_frag = defaultdict(lambda:[])
total_xs = []
for year in range(2015,2020,4):
frag,xs = plot_metric_mandate(dates_by_mandate,nets_by_mandate,fragmentation_to_plot,year,valid_pps,pps_color,'fragmentation',True)
for k,v in frag.items():
total_frag[k].append(v)
total_xs.append(xs)
total_isol = defaultdict(lambda:[])
total_xs = []
for year in range(2015,2020,4):
isol,xs = plot_metric_mandate(dates_by_mandate,nets_by_mandate,isolation_to_plot,year,valid_pps,pps_color,'isolation',True)
for k,v in isol.items():
total_isol[k].append(v)
total_xs.append(xs)
|
carolmb/political
|
data_metrics.py
|
import xnet
import glob
import numpy as np
import matplotlib.pyplot as plt
from igraph import *
from util import get_valid_pp
from util import filter_pp_name
from collections import defaultdict
from sklearn.metrics import normalized_mutual_info_score
np.set_printoptions(suppress=True,formatter={'float_kind':'{:f}'.format})
header = 'imgs/'
def read_nets_by_years(path):
filenames = glob.glob(path)
filenames = sorted(filenames)
dates = []
nets = []
for filename in filenames:
net = xnet.xnet2igraph(filename)
net.vs['political_party'] = [filter_pp_name(p) for p in net.vs['political_party']]
nets.append(net.components().giant())
base = filename.split('dep')[1].split('_')
# date = float(filename.split('dep')[1].split('_')[0])
date = float(filename.split('_')[2].split('.')[0])
dates.append(date)
dates = np.asarray(dates)
nets = np.asarray(nets)
sorted_idxs = np.argsort(dates)
dates = dates[sorted_idxs]
nets = nets[sorted_idxs]
return dates,nets
def plot(xs,ys,x_name,y_name,filename,dim=(12,2)):
plt.figure(figsize=dim)
plt.plot(xs,ys,'o',ls='-')
labels = [str(x) if float(x)%5 == 0 else '' for x in xs]
plt.xticks(np.arange(min(xs), max(xs)+1, 1.0),labels=labels)
# plt.legend(loc='upper right')
plt.xlabel(x_name)
plt.ylabel(y_name)
plt.savefig(header+filename+'.pdf',format='pdf',bbox_inches="tight")
plt.close()
def degree(dates,nets):
degrees = [mean(net.degree()) for net in nets]
plot(dates,degrees,'year','mean degree','mean_degree')
def dep_by_pp(dates,nets):
ys = defaultdict(lambda:[])
for date,net in zip(dates,nets):
unique,count = np.unique(net.vs['political_party'],return_counts=True)
for u,c in zip(unique,count):
ys[u].append((date,c))
total = [(sum([v for d,v in d_v]),k) for k,d_v in ys.items()]
total = sorted(total,reverse=True)
ys_sorted = []
labels_sorted = []
others = np.zeros(len(dates))
for t,k in total:
current_ys = ys[k]
current_dates = [d for d,c in current_ys]
real_y = []
for date in dates:
if not date in current_dates:
real_y.append(0)
else:
real_y.append(current_ys[0][1])
current_ys = current_ys[1:]
if t <= 288:
others += real_y
continue
labels_sorted.append(k)
ys_sorted.append(real_y)
ys_sorted = [others] + ys_sorted
labels_sorted = ['others'] + labels_sorted
ys_sorted = np.asarray(ys_sorted)
ys_sorted = np.cumsum(ys_sorted, axis=0)
fig = plt.figure(figsize=(12,3))
ax1 = fig.add_subplot(111)
for label,pp_ys in (zip(reversed(labels_sorted),reversed(ys_sorted))):
ax1.fill_between(dates, pp_ys, label=label.upper(),alpha=1)
plt.legend(loc='upper right',bbox_to_anchor=(1.1, 1.0))
plt.xlabel('year')
plt.ylabel('cumulative number of deputies')
plt.savefig(header+'cumulative_number_of_dep.pdf',format='pdf',bbox_inches="tight")
plt.close()
def modularity(dates,nets,param1,param2):
mods_pps = []
mods_comm = []
for net in nets:
pps = list(set(net.vs['political_party']))
param_int = [pps.index(p) for p in net.vs['political_party']]
vc = VertexClustering(net,param_int,params={'weight':net.es['weight']})
mods_pps.append(vc.modularity)
pps = list(set(net.vs['community']))
param_int = [pps.index(p) for p in net.vs['community']]
vc = VertexClustering(net,param_int,params={'weight':net.es['weight']})
mods_comm.append(vc.modularity)
plt.figure(figsize=(12,3))
plt.plot(dates,mods_pps,'o',ls='-',label='political party')
plt.plot(dates,mods_comm,'o',ls='-',label='community')
plt.xticks(dates,rotation=45)
plt.xlabel('year')
plt.ylabel('modularity')
plt.legend(loc='upper right')
plt.savefig(header+'modularity.pdf',bbox_inches="tight")
plt.close()
def shortest_path_mean(datas,nets):
means = []
for net in nets:
vcount = net.vcount()
dists = []
for v in net.vs:
path_lens = net.get_shortest_paths(v,to=net.vs,weights='weight')
for p in path_lens:
x = sum(net.es[idx]['distance'] for idx in p)
if x > 0:
dists.append(x)
m = mean(dists)
means.append(m)
dates = [int(d) for d in datas]
plot(dates,means,'year','shortest paths mean','shortest_paths_mean')
def clustering_coefficient(datas,nets):
clus_coefs = []
for net in nets:
clus = net.transitivity_local_undirected(weights=net.es['weight'])
clus = np.nanmean(clus)
clus_coefs.append(clus)
dates = [int(d) for d in datas]
plot(datas,clus_coefs,'year','clustering coefficient','clustering_coef')
def norm_mutual_info(dates,nets):
mutual_infos = []
for net in nets:
comms = net.vs['community']
pps = net.vs['political_party']
mutual_info = normalized_mutual_info_score(comms,pps,'geometric')
mutual_infos.append(mutual_info)
dates2 = [int(d) for d in dates]
plot(dates2,mutual_infos,'year','normalized mutual information','nmi')
def div_by_param(dates,nets,param):
divs = []
totals = []
for date,net in zip(dates,nets):
pps = net.vs[param]
unique,count = np.unique(pps,return_counts=True)
total = sum(count)
probs = count/total
entropy = -sum(np.log(probs)*probs)
div = np.exp(entropy)
divs.append(div)
totals.append(len(unique))
return divs,totals
def div(dates,nets):
pps_div,pps_total = div_by_param(dates,nets,'political_party')
comm_div,comm_total = div_by_param(dates,nets,'community')
plt.figure(figsize=(12,2))
plt.plot(dates,pps_div,label='political party diversity')
plt.plot(dates,comm_div,label='community diversity')
plt.plot(dates,pps_total,label='total of political parties')
plt.plot(dates,comm_total,label='total of communities')
plt.legend(loc='upper right',bbox_to_anchor=(1.2, 1.0))
labels = [str(int(x)) if float(x)%5 == 0 else '' for x in dates]
plt.xticks(np.arange(min(dates), max(dates)+1, 1.0),labels=labels)
plt.xlabel('year')
plt.savefig(header+'divs.pdf',format='pdf',bbox_inches="tight")
plt.close()
if __name__ == '__main__':
dates,nets = read_nets_by_years('data/1991-2019/by_year/dep*_0.8_leidenalg_dist.xnet')
degree(dates,nets)
modularity(dates,nets,'community','political_party')
shortest_path_mean(dates,nets)
dep_by_pp(dates,nets)
clustering_coefficient(dates,nets)
norm_mutual_info(dates,nets)
div(dates,nets)
|
carolmb/political
|
json_to_json.py
|
<reponame>carolmb/political
import json
import zipfile
import unidecode
input_file_zip = "data/deputadosData_1991-2019.zip"
input_file = "data/deputadosData_1991-2019.json"
output_file = "data/deputadosv2.json"
with zipfile.ZipFile(input_file_zip, 'r') as zip_ref:
zip_ref.extractall('data/')
file = open(input_file, 'r').read()
data = json.loads(file)
def name_filtering(name):
name = name.lower()
name = name.strip()
name = unidecode.unidecode(name)
return name
def get_mpv(nome, info, p):
o = dict()
o['nome'] = nome
o['data_apresentacao'] = info['DataApresentacao']
o['tema'] = info['tema']
o['partido_autor'] = info['partidoAutor']
o['objetivo'] = p['@ObjVotacao']
o['data'] = p['@Data']
o['votos'] = []
deputies = p['votos']['Deputado']
names = set()
for d in deputies:
dep = dict()
dep_name = name_filtering(d['@Nome'])
if dep_name in names:
continue
names.add(dep_name)
dep['nome'] = dep_name
dep['uf'] = d['@UF'].strip()
dep['voto'] = d['@Voto'].strip()
dep['partido'] = d['@Partido'].strip().lower()
dep['id_deputado'] = d['@ideCadastro'].strip()
o['votos'].append(dep)
o['resumo'] = p['@Resumo']
o['id'] = p['@codSessao']
return o
output = dict()
output['proposicoes'] = []
output_set = set()
for year, prop in data.items():
for nome, info in prop.items():
if not 'VOTES' in info:
continue
polls = info['VOTES']['Votacoes']['Votacao']
if type(polls) == type([]):
for p in polls:
o = get_mpv(nome, info, p)
if not str(o) in output_set:
output['proposicoes'].append(o)
output_set.add(str(o))
else:
o = get_mpv(nome, info, polls)
if not str(o) in output_set:
output['proposicoes'].append(o)
output_set.add(str(o))
output = json.dumps(output, indent=4, sort_keys=True)
file = open(output_file, 'w')
file.write(output)
|
carolmb/political
|
deputies_parser.py
|
<filename>deputies_parser.py<gh_stars>1-10
import os
import ast
import xnet
import json
import pickle
import zipfile
import concurrent.futures
import numpy as np
import cairocffi as cairo
import matplotlib.pyplot as plt
from igraph import Graph
from collections import defaultdict, namedtuple
Deputy = namedtuple('Deputy', ['name', 'political_party'])
def get_votes(common_votes, prop):
votes = prop['votos']
# para evitar votos repetidos
votes = [str(v) for v in votes]
votes = list(set(votes))
votes = [dict(ast.literal_eval(v)) for v in votes]
yes = [] # deputados que votaram sim
no = [] # deputados que votaram não
abst = []
obst = []
for v in votes:
d = Deputy(v['nome'],v['partido'])
if v['voto'] == 'Sim':
yes.append(d)
elif v['voto'] == 'Não':
no.append(d)
elif v['voto'] == 'Abstenção':
abst.append(d)
elif v['voto'] == 'Obstrução':
obst.append(d)
all_votes = yes + no + abst + obst
for d0 in all_votes:
for d1 in all_votes:
if d0 == d1:
break
if (d0 in yes and d1 in yes) or (d0 in no and d1 in no) or (d0 in abst and d1 in abst) or (d0 in obst and d1 in obst):
common_votes[frozenset([d0,d1])] += 1
else:
common_votes[frozenset([d0,d1])] -= 1
def set_info(v0, names, political_parties):
if not v0.name in names:
names.append(v0.name)
political_parties.append(v0.political_party)
def generate_graph(common_votes, w_total):
names = []
political_parties = []
edges = []
weights = []
for e,w in common_votes.items():
v0,v1 = tuple(e)
set_info(v0, names, political_parties)
set_info(v1, names, political_parties)
if w > 0: # só cria a aresta se a quantidade de votos em comum for positiva
v0_id = names.index(v0.name)
v1_id = names.index(v1.name)
edges.append((v0_id,v1_id))
weights.append(w/w_total)
g = Graph()
g.add_vertices(len(names))
g.add_edges(edges)
g.es['weight'] = weights
g.vs['name'] = names
g.vs['political_party'] = political_parties
return g
def generate_graph_year_3months(search,year,month,transition_years):
common_votes = defaultdict(lambda:0)
n_props = 0
props = search[year] + search[year + 1]
for prop in props:
_,mm,yyyy = tuple(prop['data'].split('/'))
mm = int(mm)
yyyy = int(yyyy)
if yyyy == year and (mm == month or mm == month + 1 or mm == month + 2):
# print(mm,yyyy,end=', ',sep='/')
get_votes(common_votes,prop)
n_props +=1
elif not year in transition_years:
if month + 2 > 12 and mm == (month + 2)%12 and yyyy == (year + 1):
# print(mm,yyyy,end=', ',sep='/')
get_votes(common_votes,prop)
n_props +=1
elif month + 1 > 12 and mm == (month + 1)%12 and yyyy == (year + 1):
# print(mm,yyyy,end=', ',sep='/')
get_votes(common_votes,prop)
n_props +=1
if n_props > 6:
g = generate_graph(common_votes, n_props)
return g
else:
print('Problem in year',year,'and month',month)
print('Number of propositions',n_props)
return None
def get_nets_by_year(search,years,output_dir):
for year in years:
common_votes = defaultdict(lambda:0)
props = search[year]
n_props = len(props)
for prop in props:
get_votes(common_votes,prop)
if n_props > 6:
g = generate_graph(common_votes, n_props)
xnet.igraph2xnet(g, output_dir+'dep_'+str(year)+'_obstr.xnet')
else:
print('Problem in year',year)
print('Number of propositions',n_props)
def get_nets_by_3months(search,year,output_dir):
for month in range(1,13): # 12 meses
print("Current year:", year,"Current month:",month)
g = generate_graph_year_3months(search,year,month,transition_years)
if g:
xnet.igraph2xnet(g, output_dir+'dep_'+str(year)+'_'+str(month)+'.xnet')
print()
if __name__ == '__main__':
with zipfile.ZipFile('data/deputadosv2.zip', 'r') as zip_ref:
zip_ref.extractall('data/')
input_file = "data/deputadosv2.json"
file = open(input_file, 'r').read()
propositions = json.loads(file)['proposicoes']
print("Total propositions:", len(propositions))
search = defaultdict(lambda:[])
for prop in propositions:
year = prop['data'].split('/')[2]
year = int(year)
search[year].append(prop)
output_dir = 'data/1991-2019/'
# Geração de grafos por ano:
years = list(range(1991,2020))
get_nets_by_year(search,years,output_dir+'by_year/')
# Geração dos grafos por mandato (com janela de 3 em 3 meses):
years = list(range(1997,2020)) # primeiro mandato considerado
transition_years = list(range(1998,2020,4)) # ano de eleição (véspera de início de novo mandato)
for year in years:
get_nets_by_3months(search,1996,output_dir+'mandate/')
|
carolmb/political
|
communities.py
|
<gh_stars>1-10
import glob
import xnet
import igraph
import leidenalg # pip install leidenalg
import subprocess
def get_largest_component(g):
components = g.components()
giant = components.giant()
return giant
def identify_communities_leidenalg(net):
giant = get_largest_component(net)
comms = leidenalg.find_partition(giant, leidenalg.ModularityVertexPartition)
comm_list = comms.subgraphs() # communities in current level
print('Number of communities identified:',len(comm_list))
net_copy = net.copy()
net_copy.vs['community'] = "-1"
for idx,comm in enumerate(comm_list):
for v1 in comm.vs:
v2 = net_copy.vs.find(name=v1['name'])
v2['community'] = str(idx+1)
return net_copy
filenames = glob.glob("data/1991-2019/by_year/*.xnet")
filenames = sorted(filenames)
graphs = []
for filename in filenames:
print(filename)
net = xnet.xnet2igraph(filename)
net = identify_communities_leidenalg(net)
output = filename[:-5] + '_leidenalg.xnet'
xnet.igraph2xnet(net,output)
|
carolmb/political
|
backbone.py
|
import glob
import xnet
import numpy as np
from igraph import *
from mpmath import mp # pip install mpmath
from scipy import integrate
mp.dps = 50
# source: https://github.com/aekpalakorn/python-backbone-network/blob/master/backbone.py
def disparity_filter(g):
total_vtx = g.vcount()
g.es['alpha_ij'] = 1
for v in range(total_vtx):
edges = g.incident(v)
k = len(edges)
if k > 1:
sum_w = mp.mpf(sum([g.es[e]['weight'] for e in edges]))
for e in edges:
w = g.es[e]['weight']
p_ij = mp.mpf(w)/sum_w
alpha_ij = 1 - (k-1) * integrate.quad(lambda x: (1-x)**(k-2), 0, p_ij)[0]
g.es[e]['alpha_ij'] = min(alpha_ij,g.es[e]['alpha_ij'])
def alpha_cut(alpha,g):
g_copy = g.copy()
to_delete = g_copy.es.select(alpha_ij_ge=alpha)
g_copy.delete_edges(to_delete)
return g_copy
def get_largest_component_size(g):
components = g.components()
giant = components.giant()
return giant.vcount()
def get_best_cut(net,preserve_percent,a_min,a_max):
a_min = mp.mpf(a_min)
a_max = mp.mpf(a_max)
error = 0.015
largest_size = get_largest_component_size(net)
min_erro = 1000
a_min_erro = 0.0
def get_current_percent(a):
nonlocal min_erro, a_min_erro, a_min, a_max
cuted_net = alpha_cut(a,net)
# print('number of edges',cuted_net.ecount())
preserved_size = get_largest_component_size(cuted_net)
# print('preserved size',preserved_size)
current_percent = mp.mpf(preserved_size)/mp.mpf(largest_size)
if min_erro > abs(current_percent-preserve_percent):
min_erro = abs(current_percent-preserve_percent)
a_min_erro = a
return cuted_net,current_percent,a
i = 0
a_min_perc = mp.mpf(get_largest_component_size(alpha_cut(a_min,net)))/mp.mpf(largest_size)
a_max_perc = mp.mpf(get_largest_component_size(alpha_cut(a_max,net)))/mp.mpf(largest_size)
a = 0.0
while True:
if i > 100:
cuted_net = alpha_cut(a_min_erro,net)
print('error infinity loop')
print('alpha %.2f; preserved %.2f' % (a_min_erro,min_erro+preserve_percent))
print()
return cuted_net
i += 1
a = (a_min+a_max)/2
cuted_net,current_percent,a = get_current_percent(a)
current_erro = current_percent-preserve_percent
if abs(current_erro) < error:
print('total iterations to find the graph',i)
print('alpha %.2f; preserved %.2f' % (a,current_percent))
print()
return cuted_net
if (a_min_perc-preserve_percent)*(current_percent-preserve_percent) > 0:
a_min = a
a_min_perc = current_percent
else:
a_max = a
a_max_perc = current_percent
def apply_backbone(net,a_min,a_max,preserve=0.8):
disparity_filter(net)
best = get_best_cut(net,preserve,a_min,a_max)
return best
if __name__ == '__main__':
filenames = glob.glob('data/1991-2019/mandate/*.xnet')
filenames = sorted(filenames)
print(filenames)
preserve = 0.8
a_min = 0.0001
a_max = 1
for filename in filenames:
print(filename)
net = xnet.xnet2igraph(filename)
if net.ecount() > 0: # 2002_6 problem
net = apply_backbone(net,a_min,a_max,preserve)
output = filename[:-5] + '_' + str(preserve) + '.xnet'
xnet.igraph2xnet(net,output)
|
carolmb/political
|
util.py
|
<reponame>carolmb/political<gh_stars>1-10
import numpy as np
def filter_pp_name(p):
if p == 'pds' or p == 'pdc' or p == 'ppb' or p == 'ppr' or p == 'psd':
p = 'pp'
elif p == 'pfl':
p = 'dem'
elif p == 'mdb':
p = 'pmdb'
elif p == 'pr':
p = 'pl'
# elif p == 'prb' or p == 'pmr':
# p = 'republicanos'
return p
def get_valid_pp_single_net(net,top_n=None,cut_percent=None):
pp = net.vs['political_party']
pp = np.asarray(pp)
unique,count = np.unique(pp,return_counts=True)
idxs = np.argsort(-count)
unique = unique[idxs]
count = count[idxs]
total = sum(count)
valid_pp = []
if cut_percent:
cut = cut_percent*total
for c,u in zip(count,unique):
if c > cut:
valid_pp.append(u)
return valid_pp
elif top_n:
valid_pp = unique[:top_n]
return valid_pp
def get_valid_pp(nets,begin,delta,top_n=3,cut_percent=None):
top3 = set()
filenames = []
i = begin + delta
for net in nets:
valid_pp = get_valid_pp_single_net(net,top_n=top_n,cut_percent=cut_percent)
print(i,valid_pp)
i += delta
top3 |= set(valid_pp)
return top3
|
jakereps/q2-vsearch
|
q2_vsearch/_transformer.py
|
<filename>q2_vsearch/_transformer.py
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import pandas as pd
import numpy as np
import qiime2
from .plugin_setup import plugin
from ._format import UchimeStatsFmt
# many of the numeric fields will contain * if a query is
# not chimeric, so being safe and making all fields other than
# score strings
_uchime_stats_header = collections.OrderedDict([
('score', np.number),
('feature-id', str),
('A', str),
('B', str),
('T', str),
('idQM', str),
('idQA', str),
('idQB', str),
('idAB', str),
('idQT', str),
('LY', str),
('LN', str),
('LA', str),
('RY', str),
('RN', str),
('RA', str),
('div', str),
('YN', str)])
def _stats_to_df(ff):
df = pd.read_csv(str(ff), sep='\t', index_col='feature-id',
names=_uchime_stats_header.keys(),
dtype=_uchime_stats_header)
return df
@plugin.register_transformer
def _1(ff: UchimeStatsFmt) -> qiime2.Metadata:
return qiime2.Metadata(_stats_to_df(ff))
@plugin.register_transformer
def _2(ff: UchimeStatsFmt) -> pd.DataFrame:
return _stats_to_df(ff)
|
jakereps/q2-vsearch
|
q2_vsearch/_join_pairs.py
|
<reponame>jakereps/q2-vsearch<filename>q2_vsearch/_join_pairs.py
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os.path
import yaml
from typing import List
import pandas as pd
from q2_types.per_sample_sequences import (
SingleLanePerSampleSingleEndFastqDirFmt,
SingleLanePerSamplePairedEndFastqDirFmt,
FastqManifestFormat, YamlFormat)
from ._cluster_features import run_command
_jp_defaults = {
'truncqual': None,
'minlen': 1,
'maxns': None,
'allowmergestagger': False,
'minovlen': 10,
'maxdiffs': 10,
'minmergelen': None,
'maxmergelen': None,
'maxee': None,
'qmin': 0,
'qminout': 0,
'qmax': 41,
'qmaxout': 41,
'threads': 1
}
def join_pairs(demultiplexed_seqs: SingleLanePerSamplePairedEndFastqDirFmt,
truncqual: int = _jp_defaults['truncqual'],
minlen: int = _jp_defaults['minlen'],
maxns: int = _jp_defaults['maxns'],
allowmergestagger: bool = _jp_defaults['allowmergestagger'],
minovlen: int = _jp_defaults['minovlen'],
maxdiffs: int = _jp_defaults['maxdiffs'],
minmergelen: int = _jp_defaults['minmergelen'],
maxmergelen: int = _jp_defaults['maxmergelen'],
maxee: float = _jp_defaults['maxee'],
qmin: int = _jp_defaults['qmin'],
qminout: int = _jp_defaults['qminout'],
qmax: int = _jp_defaults['qmax'],
qmaxout: int = _jp_defaults['qmaxout'],
threads: int = _jp_defaults['threads'],
) -> SingleLanePerSampleSingleEndFastqDirFmt:
_, result = _join_pairs_w_command_output(
demultiplexed_seqs, truncqual, minlen, maxns, allowmergestagger,
minovlen, maxdiffs, minmergelen, maxmergelen, maxee, qmin, qminout,
qmax, qmaxout, threads)
return result
def _join_pairs_w_command_output(
demultiplexed_seqs: SingleLanePerSamplePairedEndFastqDirFmt,
truncqual: int = _jp_defaults['truncqual'],
minlen: int = _jp_defaults['minlen'],
maxns: int = _jp_defaults['maxns'],
allowmergestagger: bool = _jp_defaults['allowmergestagger'],
minovlen: int = _jp_defaults['minovlen'],
maxdiffs: int = _jp_defaults['maxdiffs'],
minmergelen: int = _jp_defaults['minmergelen'],
maxmergelen: int = _jp_defaults['maxmergelen'],
maxee: float = _jp_defaults['maxee'],
qmin: int = _jp_defaults['qmin'],
qminout: int = _jp_defaults['qminout'],
qmax: int = _jp_defaults['qmax'],
qmaxout: int = _jp_defaults['qmaxout'],
threads: int = _jp_defaults['threads'],
) -> (List[str], SingleLanePerSampleSingleEndFastqDirFmt):
# this function exists only to simplify unit testing
result = SingleLanePerSampleSingleEndFastqDirFmt()
manifest = pd.read_csv(
os.path.join(str(demultiplexed_seqs),
demultiplexed_seqs.manifest.pathspec),
header=0, comment='#')
manifest.filename = manifest.filename.apply(
lambda x: os.path.join(str(demultiplexed_seqs), x))
phred_offset = yaml.load(open(
os.path.join(str(demultiplexed_seqs),
demultiplexed_seqs.metadata.pathspec)),
Loader=yaml.SafeLoader)['phred-offset']
id_to_fps = manifest.pivot(index='sample-id', columns='direction',
values='filename')
output_manifest = FastqManifestFormat()
output_manifest_fh = output_manifest.open()
output_manifest_fh.write('sample-id,filename,direction\n')
output_manifest_fh.write('# direction is not meaningful in this file '
'as these\n')
output_manifest_fh.write('# data may be derived from forward, reverse, '
'or \n')
output_manifest_fh.write('# joined reads\n')
for i, (sample_id, (fwd_fp, rev_fp)) in enumerate(id_to_fps.iterrows()):
# The barcode id, lane number and read number are not relevant
# here. We might ultimately want to use a dir format other than
# SingleLanePerSampleSingleEndFastqDirFmt which doesn't care
# about this information. Similarly, the direction of the read
# isn't relevant here anymore.
path = result.sequences.path_maker(sample_id=sample_id,
barcode_id=i,
lane_number=1,
read_number=1)
uncompressed_path = str(path).strip('.gz')
cmd = ['vsearch',
'--fastq_mergepairs', fwd_fp,
'--reverse', rev_fp,
'--fastqout', uncompressed_path,
'--fastq_ascii', str(phred_offset),
'--fastq_minlen', str(minlen),
'--fastq_minovlen', str(minovlen),
'--fastq_maxdiffs', str(maxdiffs),
'--fastq_qmin', str(qmin),
'--fastq_qminout', str(qminout),
'--fastq_qmax', str(qmax),
'--fastq_qmaxout', str(qmaxout),
'--minseqlength', '1',
'--fasta_width', '0']
if truncqual is not None:
cmd += ['--fastq_truncqual', str(truncqual)]
if maxns is not None:
cmd += ['--fastq_maxns', str(maxns)]
if minmergelen is not None:
cmd += ['--fastq_minmergelen', str(minmergelen)]
if maxmergelen is not None:
cmd += ['--fastq_maxmergelen', str(maxmergelen)]
if maxee is not None:
cmd += ['--fastq_maxee', str(maxee)]
cmd += ['--threads', str(threads)]
if allowmergestagger:
cmd.append('--fastq_allowmergestagger')
run_command(cmd)
run_command(['gzip', uncompressed_path])
output_manifest_fh.write(
'%s,%s,%s\n' % (sample_id, path.name, 'forward'))
output_manifest_fh.close()
result.manifest.write_data(output_manifest, FastqManifestFormat)
metadata = YamlFormat()
metadata.path.write_text(yaml.dump({'phred-offset': phred_offset}))
result.metadata.write_data(metadata, YamlFormat)
return cmd, result
|
jakereps/q2-vsearch
|
q2_vsearch/_chimera.py
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import tempfile
import biom
from q2_types.feature_data import DNAFASTAFormat
from ._cluster_features import _fasta_with_sizes, run_command
from ._format import UchimeStatsFmt
_uchime_defaults = {'dn': 1.4,
'mindiffs': 3,
'mindiv': 0.8,
'minh': 0.28,
'xn': 8.0}
def uchime_ref(sequences: DNAFASTAFormat,
table: biom.Table,
reference_sequences: DNAFASTAFormat,
dn: float = _uchime_defaults['dn'],
mindiffs: int = _uchime_defaults['mindiffs'],
mindiv: float = _uchime_defaults['mindiv'],
minh: float = _uchime_defaults['minh'],
xn: float = _uchime_defaults['xn'],
threads: int = 1) \
-> (DNAFASTAFormat, DNAFASTAFormat, UchimeStatsFmt):
cmd, chimeras, nonchimeras, uchime_stats = \
_uchime_ref(sequences, table, reference_sequences, dn, mindiffs,
mindiv, minh, xn, threads)
return chimeras, nonchimeras, uchime_stats
def _uchime_ref(sequences, table, reference_sequences, dn, mindiffs,
mindiv, minh, xn, threads):
# this function only exists to simplify testing
chimeras = DNAFASTAFormat()
nonchimeras = DNAFASTAFormat()
uchime_stats = UchimeStatsFmt()
with tempfile.NamedTemporaryFile() as fasta_with_sizes:
_fasta_with_sizes(str(sequences), fasta_with_sizes.name, table)
cmd = ['vsearch',
'--uchime_ref', fasta_with_sizes.name,
'--uchimeout', str(uchime_stats),
'--nonchimeras', str(nonchimeras),
'--chimeras', str(chimeras),
'--dn', str(dn),
'--mindiffs', str(mindiffs),
'--mindiv', str(mindiv),
'--minh', str(minh),
'--xn', str(xn),
'--db', str(reference_sequences),
'--qmask', 'none', # ensures no lowercase DNA chars
'--xsize',
'--threads', str(threads),
'--minseqlength', '1',
'--fasta_width', '0']
run_command(cmd)
return cmd, chimeras, nonchimeras, uchime_stats
def uchime_denovo(sequences: DNAFASTAFormat,
table: biom.Table,
dn: float = _uchime_defaults['dn'],
mindiffs: int = _uchime_defaults['mindiffs'],
mindiv: float = _uchime_defaults['mindiv'],
minh: float = _uchime_defaults['minh'],
xn: float = _uchime_defaults['xn']) \
-> (DNAFASTAFormat, DNAFASTAFormat, UchimeStatsFmt):
cmd, chimeras, nonchimeras, uchime_stats = \
_uchime_denovo(sequences, table, dn, mindiffs, mindiv, minh, xn)
return chimeras, nonchimeras, uchime_stats
def _uchime_denovo(sequences, table, dn, mindiffs, mindiv, minh, xn):
# this function only exists to simplify testing
chimeras = DNAFASTAFormat()
nonchimeras = DNAFASTAFormat()
uchime_stats = UchimeStatsFmt()
with tempfile.NamedTemporaryFile() as fasta_with_sizes:
_fasta_with_sizes(str(sequences), fasta_with_sizes.name, table)
cmd = ['vsearch',
'--uchime_denovo', fasta_with_sizes.name,
'--uchimeout', str(uchime_stats),
'--nonchimeras', str(nonchimeras),
'--chimeras', str(chimeras),
'--dn', str(dn),
'--mindiffs', str(mindiffs),
'--mindiv', str(mindiv),
'--minh', str(minh),
'--xn', str(xn),
'--qmask', 'none', # ensures no lowercase DNA chars
'--xsize',
'--minseqlength', '1',
'--fasta_width', '0']
run_command(cmd)
return cmd, chimeras, nonchimeras, uchime_stats
|
jakereps/q2-vsearch
|
q2_vsearch/_stats.py
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import fileinput
import pkg_resources
import subprocess
import pandas as pd
from multiprocessing import Pool, cpu_count
from q2_types.per_sample_sequences import (
CasavaOneEightSingleLanePerSampleDirFmt
)
import q2templates
TEMPLATES = pkg_resources.resource_filename('q2_vsearch', 'assets')
def _get_stats_easy(cmds_packed) -> None:
filelist, cmds = cmds_packed
processes = []
for cmd in cmds:
processes.append(subprocess.Popen(cmd, stdin=subprocess.PIPE))
with fileinput.input(files=filelist, mode='r',
openhook=fileinput.hook_compressed) as fh:
for line in fh:
for p in processes:
p.stdin.write(line)
for p in processes:
p.stdin.close()
p.wait()
def _build_cmds(output_dir: str, filelist, direction='forward'):
datafiles = {direction: {}}
results = os.path.join(
output_dir, 'fastq_stats_{0}.txt'.format(direction))
stats = ['vsearch', '--quiet', '--fastq_stats', '-', '--log', results]
datafiles[direction]['stats'] = os.path.basename(results)
results = os.path.join(
output_dir, 'fastq_eestats_{0}.txt'.format(direction))
eestats = ['vsearch', '--quiet', '--fastq_eestats',
'-', '--output', results]
datafiles[direction]['eestats'] = os.path.basename(results)
results = os.path.join(
output_dir, 'fastq_eestats2_{0}.txt'.format(direction))
eestats2 = ['vsearch', '--quiet', '--fastq_eestats2',
'-', '--output', results]
datafiles[direction]['eestats2'] = os.path.basename(results)
return (datafiles, [(filelist, [stats, eestats, eestats2])])
def _get_html(output_dir, datafiles):
html = {}
for direction in datafiles:
html[direction] = {}
for stats_type in datafiles[direction]:
filename = datafiles[direction][stats_type]
filename = os.path.join(output_dir, filename)
data_df = pd.read_csv(filename, sep='\t')
html[direction][stats_type] = q2templates.df_to_html(data_df,
index=False)
return html
def _fastq_stats(output_dir: str, sequences, threads) -> None:
# read manifest
manifest = sequences.manifest
# check if paired reads available
try:
paired = manifest['reverse'][0] is not None
except KeyError:
paired = False
# get commands and filelist
datafiles, cmds = _build_cmds(output_dir, manifest['forward'].tolist())
if (paired):
datafiles_rev, cmds_rev = _build_cmds(output_dir,
manifest['reverse'].tolist(),
'reverse')
datafiles.update(datafiles_rev)
cmds.extend(cmds_rev)
# multiprocessing
cpus = cpu_count()
try:
if (cpus < threads):
threads = cpus
except TypeError:
# QIIME itself checks for allowed input format and values
# (see plugin_setup.py)
threads = cpus
if (threads < 4): # read once and write once (3 or 6 times)
# refactor into (filelist, [single_command]) to only spawn one
# additional process in _get_stats_easy
for cmd_packed in cmds:
filelist, cmds = cmd_packed
for cmd in cmds:
_get_stats_easy((filelist, [cmd]))
else: # read once and write three times (1 or 2 times)
# three additional processes spawn in _get_stats_easy
# so only one (single) or two (paired) worker are needed
jobs = 1
if (paired and threads >= 8): # parallel fwd/rev
jobs = 2
with Pool(processes=jobs) as pool:
pool.map(_get_stats_easy, cmds)
pool.close()
html = _get_html(output_dir, datafiles)
index = os.path.join(TEMPLATES, 'index.html')
eestats_template = os.path.join(TEMPLATES, 'fastq_eestats.html')
eestats2_template = os.path.join(TEMPLATES, 'fastq_eestats2.html')
context = {
'paired': paired,
'datafiles': datafiles,
'html': html,
'tabs': [{'title': 'fastq_stats',
'url': 'index.html'},
{'title': 'fastq_eestats',
'url': 'fastq_eestats.html'},
{'title': 'fastq_eestats2',
'url': 'fastq_eestats2.html'}],
}
templates = [index, eestats_template, eestats2_template]
q2templates.render(templates, output_dir, context=context)
def fastq_stats(output_dir: str,
sequences: CasavaOneEightSingleLanePerSampleDirFmt,
threads: int = 1
) -> None:
_fastq_stats(output_dir, sequences, threads)
|
jakereps/q2-vsearch
|
q2_vsearch/tests/test_cluster_sequences.py
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import skbio
import biom
import numpy as np
from qiime2.plugin.testing import TestPluginBase
from qiime2.util import redirected_stdio
from q2_types.per_sample_sequences import QIIME1DemuxDirFmt
from q2_vsearch._cluster_sequences import (dereplicate_sequences,
_parse_uc)
class DereplicateSequences(TestPluginBase):
package = 'q2_vsearch.tests'
def test_dereplicate_sequences(self):
input_sequences_fp = self.get_data_path('seqs-1')
input_sequences = QIIME1DemuxDirFmt(input_sequences_fp, 'r')
exp_table = biom.Table(np.array([[2, 1],
[0, 1],
[0, 2]]),
['4574b947a0159c0da35a1f30f989681a1d9f64ef',
'1768cf7fca79f84d651b34d878de2492c6a7b971',
'16a1263bde4f2f99422630d1bb87935c4236d1ba'],
['sample1', 's2'])
with redirected_stdio(stderr=os.devnull):
obs_table, obs_sequences = dereplicate_sequences(
sequences=input_sequences)
# order of identifiers is important for biom.Table equality
obs_table = \
obs_table.sort_order(exp_table.ids(axis='observation'),
axis='observation')
self.assertEqual(obs_table, exp_table)
# sequences are reverse-sorted by abundance in output
obs_seqs = list(skbio.io.read(str(obs_sequences),
constructor=skbio.DNA, format='fasta'))
exp_seqs = [skbio.DNA('AAACGTTACGGTTAACTATACATGCAGAAGACTAATCGG',
metadata={'id': ('4574b947a0159c0da35a1f30f'
'989681a1d9f64ef'),
'description': 'sample1_1'}),
skbio.DNA('ACGTACGTACGTACGTACGTACGTACGTACGTGCATGGTGCGACCG',
metadata={'id': ('16a1263bde4f2f99422630d1bb'
'87935c4236d1ba'),
'description': 's2_42'}),
skbio.DNA('AAACGTTACGGTTAACTATACATGCAGAAGACTA',
metadata={'id': ('1768cf7fca79f84d651b34d878d'
'e2492c6a7b971'),
'description': 's2_2'})]
self.assertEqual(obs_seqs, exp_seqs)
def test_dereplicate_sequences_underscores_in_ids(self):
input_sequences_fp = self.get_data_path('seqs-2')
input_sequences = QIIME1DemuxDirFmt(input_sequences_fp, 'r')
exp_table = biom.Table(np.array([[2, 1],
[0, 1],
[0, 2]]),
['4574b947a0159c0da35a1f30f989681a1d9f64ef',
'1768cf7fca79f84d651b34d878de2492c6a7b971',
'16a1263bde4f2f99422630d1bb87935c4236d1ba'],
['sa_mple1', 's2'])
with redirected_stdio(stderr=os.devnull):
obs_table, obs_sequences = dereplicate_sequences(
sequences=input_sequences)
# order of identifiers is important for biom.Table equality
obs_table = \
obs_table.sort_order(exp_table.ids(axis='observation'),
axis='observation')
self.assertEqual(obs_table, exp_table)
# sequences are reverse-sorted by abundance in output
obs_seqs = list(skbio.io.read(str(obs_sequences),
constructor=skbio.DNA, format='fasta'))
exp_seqs = [skbio.DNA('AAACGTTACGGTTAACTATACATGCAGAAGACTAATCGG',
metadata={'id': ('4574b947a0159c0da35a1f30f'
'989681a1d9f64ef'),
'description': 'sa_mple1_1'}),
skbio.DNA('ACGTACGTACGTACGTACGTACGTACGTACGTGCATGGTGCGACCG',
metadata={'id': ('16a1263bde4f2f99422630d1bb'
'87935c4236d1ba'),
'description': 's2_42'}),
skbio.DNA('AAACGTTACGGTTAACTATACATGCAGAAGACTA',
metadata={'id': ('1768cf7fca79f84d651b34d878d'
'e2492c6a7b971'),
'description': 's2_2'})]
self.assertEqual(obs_seqs, exp_seqs)
def test_dereplicate_sequences_prefix(self):
input_sequences_fp = self.get_data_path('seqs-1')
input_sequences = QIIME1DemuxDirFmt(input_sequences_fp, 'r')
exp_table = biom.Table(np.array([[2, 2],
[2, 0]]),
['4574b947a0159c0da35a1f30f989681a1d9f64ef',
'16a1263bde4f2f99422630d1bb87935c4236d1ba'],
['s2', 'sample1'])
with redirected_stdio(stderr=os.devnull):
obs_table, obs_sequences = dereplicate_sequences(
sequences=input_sequences, derep_prefix=True)
# order of identifiers is important for biom.Table equality
obs_table = \
obs_table.sort_order(exp_table.ids(axis='observation'),
axis='observation')
self.assertEqual(obs_table, exp_table)
# sequences are reverse-sorted by abundance in output
obs_seqs = list(skbio.io.read(str(obs_sequences),
constructor=skbio.DNA, format='fasta'))
exp_seqs = [skbio.DNA('AAACGTTACGGTTAACTATACATGCAGAAGACTAATCGG',
metadata={'id': ('4574b947a0159c0da35a1f30f'
'989681a1d9f64ef'),
'description': 's2_1'}),
skbio.DNA('ACGTACGTACGTACGTACGTACGTACGTACGTGCATGGTGCGACCG',
metadata={'id': ('16a1263bde4f2f99422630d1bb'
'87935c4236d1ba'),
'description': 's2_42'})]
self.assertEqual(obs_seqs, exp_seqs)
class ParseUc(TestPluginBase):
# These tests and the test data below them is copied from the biom-format
# project temporarily to fix a bug in handling of sample ids with
# underscores in them (https://github.com/biocore/biom-format/issues/758).
# This code will be contribued back upstream to the
# biom-format project, and will be removed from this plugin when a
# biom-format release is available that contains this fix.
package = 'q2_vsearch.tests'
def test_empty(self):
""" empty uc file returns empty Table
"""
actual = _parse_uc(uc_empty.split('\n'))
expected = biom.Table(np.array([[]]),
observation_ids=[],
sample_ids=[])
self.assertEqual(actual, expected)
def test_minimal(self):
""" single new seed observed
"""
actual = _parse_uc(uc_minimal.split('\n'))
expected = biom.Table(np.array([[1.0]]),
observation_ids=['f2_1539'],
sample_ids=['f2'])
self.assertEqual(actual, expected)
def test_lib_minimal(self):
""" single library seed observed
"""
actual = _parse_uc(uc_lib_minimal.split('\n'))
expected = biom.Table(np.array([[1.0]]),
observation_ids=['295053'],
sample_ids=['f2'])
self.assertEqual(actual, expected)
def test_invalid(self):
""" invalid query sequence identifier detected
"""
self.assertRaises(ValueError, _parse_uc, uc_invalid_id.split('\n'))
def test_seed_hits(self):
""" multiple new seeds observed
"""
actual = _parse_uc(uc_seed_hits.split('\n'))
expected = biom.Table(np.array([[2.0, 1.0], [0.0, 1.0]]),
observation_ids=['f2_1539', 'f3_44'],
sample_ids=['f2', 'f3'])
self.assertEqual(actual, expected)
def test_mixed_hits(self):
""" new and library seeds observed
"""
actual = _parse_uc(uc_mixed_hits.split('\n'))
expected = biom.Table(np.array([[2.0, 1.0], [0.0, 1.0], [1.0, 0.0]]),
observation_ids=['f2_1539', 'f3_44', '295053'],
sample_ids=['f2', 'f3'])
self.assertEqual(actual, expected)
# the following tests are new with respect to biom-format project 2.1.6
def test_underscore_in_sample_id(self):
""" single new seed observed for sample with underscores in id
"""
actual = _parse_uc(uc_minimal_w_underscores.split('\n'))
expected = biom.Table(np.array([[1.0]]),
observation_ids=['sample_id_w_underscores_42'],
sample_ids=['sample_id_w_underscores'])
print(actual)
print(expected)
self.assertEqual(actual, expected)
def test_uc_w_comments_and_blank_lines(self):
""" uc contains comments and blank lines
"""
actual = _parse_uc(uc_w_comments_and_blank_lines.split('\n'))
expected = biom.Table(np.array([[1.0]]),
observation_ids=['f2_1539'],
sample_ids=['f2'])
self.assertEqual(actual, expected)
# no hits or library seeds
uc_empty = """
"""
# label not in qiime post-split-libraries format
uc_invalid_id = """
S 0 133 * * * * * 1539 *
"""
# contains single new (de novo) seed hit
uc_minimal = """
S 0 133 * * * * * f2_1539 *
"""
# contains single new (de novo) seed hit
uc_w_comments_and_blank_lines = """# sdfsdfsdf
# dfasdfsdf
# sdffsdfsd
S 0 133 * * * * * f2_1539 *
# sdfsdfsdfsdfsdfsdsdfsf
# asdasddpeanutdfdffsdfsdfsdfsdsdfsd sdfsdfsdf sdfdsf
"""
# contains single seed hit for a sample with underscores in its id
uc_minimal_w_underscores = """
S 0 133 * * * * * sample_id_w_underscores_42 *
"""
# contains single library (reference) seed hit
uc_lib_minimal = """
L 3 1389 * * * * * 295053 *
H 3 133 100.0 + 0 0 519I133M737I f2_1539 295053
"""
# contains new seed (de novo) hits only
uc_seed_hits = """
S 0 133 * * * * * f2_1539 *
H 0 141 100.0 + 0 0 133M8D f3_42 f2_1539
H 0 141 100.0 + 0 0 133M8D f2_43 f2_1539
S 0 133 * * * * * f3_44 *
"""
# contains library (reference) and new seed (de novo) hits
uc_mixed_hits = """
S 0 133 * * * * * f2_1539 *
H 0 141 100.0 + 0 0 133M8D f3_42 f2_1539
H 0 141 100.0 + 0 0 133M8D f2_43 f2_1539
S 0 133 * * * * * f3_44 *
L 3 1389 * * * * * 295053 *
H 3 133 100.0 + 0 0 519I133M737I f2_1539 295053
"""
|
jakereps/q2-vsearch
|
q2_vsearch/tests/test_stats.py
|
<reponame>jakereps/q2-vsearch<filename>q2_vsearch/tests/test_stats.py<gh_stars>1-10
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import tempfile
import glob
from qiime2.plugin.testing import TestPluginBase
from qiime2 import Artifact
class StatsTests(TestPluginBase):
package = 'q2_vsearch.tests'
def setUp(self):
super().setUp()
self.input_seqs_paired = Artifact.import_data(
'SampleData[PairedEndSequencesWithQuality]',
self.get_data_path('demux-1'))
self.input_seqs_single = Artifact.import_data(
'SampleData[SequencesWithQuality]',
self.get_data_path('demux-1_se'))
self.viz = self.plugin.visualizers['fastq_stats']
def _test_fastq_stats(self, paired=False, threads=1):
default_filelist = ['fastq_stats_forward.txt',
'fastq_eestats2_forward.txt',
'fastq_eestats_forward.txt']
if (paired):
default_filelist.extend(['fastq_stats_reverse.txt',
'fastq_eestats2_reverse.txt',
'fastq_eestats_reverse.txt'])
default_filelist.sort()
with tempfile.TemporaryDirectory() as output_dir:
if (paired):
self.input_seqs = self.input_seqs_paired
else:
self.input_seqs = self.input_seqs_single
self.result = self.viz(self.input_seqs, threads)
self.result.visualization.export_data(output_dir)
pattern = output_dir + '/*.txt'
filelist = [os.path.basename(x) for x in glob.glob(pattern)]
filelist.sort()
self.assertListEqual(default_filelist, filelist)
for filename in filelist:
with open(os.path.join(output_dir, filename),
'r') as inputfile:
default = inputfile.readlines()
with open(os.path.join(output_dir, filename),
'r') as inputfile:
data = inputfile.readlines()
if (filename.startswith('fastq_stats_')):
default = default[3:-4]
data = data[3:-4]
self.assertListEqual(default, data)
def test_fastq_stats_single(self):
self._test_fastq_stats(paired=False, threads=1)
def test_fastq_stats_paired(self):
self._test_fastq_stats(paired=True, threads=1)
|
jakereps/q2-vsearch
|
q2_vsearch/plugin_setup.py
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import importlib
import qiime2.plugin
import q2_vsearch._cluster_features
import q2_vsearch._cluster_sequences
import q2_vsearch._join_pairs
import q2_vsearch._chimera
import q2_vsearch._stats
from q2_vsearch._type import UchimeStats
from q2_vsearch._format import UchimeStatsFmt, UchimeStatsDirFmt
from q2_types.feature_data import FeatureData, Sequence
from q2_types.feature_table import FeatureTable, Frequency
from q2_types.sample_data import SampleData
from q2_types.per_sample_sequences import (
Sequences, SequencesWithQuality, PairedEndSequencesWithQuality,
JoinedSequencesWithQuality)
citations = qiime2.plugin.Citations.load('citations.bib', package='q2_vsearch')
plugin = qiime2.plugin.Plugin(
name='vsearch',
version=q2_vsearch.__version__,
website='https://github.com/qiime2/q2-vsearch',
package='q2_vsearch',
user_support_text=None,
short_description='Plugin for clustering and dereplicating with vsearch.',
description=('This plugin wraps the vsearch application, and provides '
'methods for clustering and dereplicating features and '
'sequences.'),
citations=[citations['rognes2016vsearch']]
)
plugin.register_formats(UchimeStatsFmt, UchimeStatsDirFmt)
plugin.register_semantic_types(UchimeStats)
plugin.register_semantic_type_to_format(
UchimeStats,
artifact_format=UchimeStatsDirFmt)
plugin.methods.register_function(
function=q2_vsearch._cluster_features.cluster_features_de_novo,
inputs={
'table': FeatureTable[Frequency],
'sequences': FeatureData[Sequence]},
parameters={
'perc_identity': qiime2.plugin.Float % qiime2.plugin.Range(
0, 1, inclusive_start=False, inclusive_end=True),
'threads': qiime2.plugin.Int % qiime2.plugin.Range(
0, 256, inclusive_start=True, inclusive_end=True)
},
outputs=[
('clustered_table', FeatureTable[Frequency]),
('clustered_sequences', FeatureData[Sequence]),
],
input_descriptions={
'table': 'The feature table to be clustered.',
'sequences': 'The sequences corresponding to the features in table.',
},
parameter_descriptions={
'perc_identity': ('The percent identity at which clustering should be '
'performed. This parameter maps to vsearch\'s --id '
'parameter.'),
'threads': ('The number of threads to use for computation. Passing 0 '
'will launch one thread per CPU core.')
},
output_descriptions={
'clustered_table': 'The table following clustering of features.',
'clustered_sequences': 'Sequences representing clustered features.',
},
name='De novo clustering of features.',
description=('Given a feature table and the associated feature '
'sequences, cluster the features based on user-specified '
'percent identity threshold of their sequences. This is not '
'a general-purpose de novo clustering method, but rather is '
'intended to be used for clustering the results of '
'quality-filtering/dereplication methods, such as DADA2, or '
'for re-clustering a FeatureTable at a lower percent '
'identity than it was originally clustered at. When a group '
'of features in the input table are clustered into a single '
'feature, the frequency of that single feature in a given '
'sample is the sum of the frequencies of the features that '
'were clustered in that sample. Feature identifiers and '
'sequences will be inherited from the centroid feature '
'of each cluster. See the vsearch documentation for details '
'on how sequence clustering is performed.')
)
plugin.methods.register_function(
function=q2_vsearch._cluster_features.cluster_features_closed_reference,
inputs={
'table': FeatureTable[Frequency],
'sequences': FeatureData[Sequence],
'reference_sequences': FeatureData[Sequence]
},
parameters={
'perc_identity': qiime2.plugin.Float % qiime2.plugin.Range(
0, 1, inclusive_start=False, inclusive_end=True),
'strand': qiime2.plugin.Str % qiime2.plugin.Choices(['plus', 'both']),
'threads': qiime2.plugin.Int % qiime2.plugin.Range(
0, 256, inclusive_start=True, inclusive_end=True)
},
outputs=[
('clustered_table', FeatureTable[Frequency]),
('clustered_sequences', FeatureData[Sequence]),
('unmatched_sequences', FeatureData[Sequence]),
],
input_descriptions={
'table': 'The feature table to be clustered.',
'sequences': 'The sequences corresponding to the features in table.',
'reference_sequences': 'The sequences to use as cluster centroids.',
},
parameter_descriptions={
'perc_identity': ('The percent identity at which clustering should be '
'performed. This parameter maps to vsearch\'s --id '
'parameter.'),
'strand': ('Search plus (i.e., forward) or both (i.e., forward and '
'reverse complement) strands.'),
'threads': ('The number of threads to use for computation. Passing 0 '
'will launch one thread per CPU core.')
},
output_descriptions={
'clustered_table': 'The table following clustering of features.',
'clustered_sequences': 'The sequences representing clustered '
'features, relabeled by the reference IDs.',
'unmatched_sequences': 'The sequences which failed to match any '
'reference sequences. This output maps to '
'vsearch\'s --notmatched parameter.'
},
name='Closed-reference clustering of features.',
description=('Given a feature table and the associated feature '
'sequences, cluster the features against a reference '
'database based on user-specified '
'percent identity threshold of their sequences. This is not '
'a general-purpose closed-reference clustering method, but '
'rather is intended to be used for clustering the results of '
'quality-filtering/dereplication methods, such as DADA2, or '
'for re-clustering a FeatureTable at a lower percent '
'identity than it was originally clustered at. When a group '
'of features in the input table are clustered into a single '
'feature, the frequency of that single feature in a given '
'sample is the sum of the frequencies of the features that '
'were clustered in that sample. Feature identifiers '
'will be inherited from the centroid feature '
'of each cluster. See the vsearch documentation for details '
'on how sequence clustering is performed.')
)
plugin.pipelines.register_function(
function=q2_vsearch._cluster_features.cluster_features_open_reference,
inputs={
'table': FeatureTable[Frequency],
'sequences': FeatureData[Sequence],
'reference_sequences': FeatureData[Sequence]
},
parameters={
'perc_identity': qiime2.plugin.Float % qiime2.plugin.Range(
0, 1, inclusive_start=False, inclusive_end=True),
'strand': qiime2.plugin.Str % qiime2.plugin.Choices(['plus', 'both']),
'threads': qiime2.plugin.Int % qiime2.plugin.Range(
0, 256, inclusive_start=True, inclusive_end=True)
},
outputs=[
('clustered_table', FeatureTable[Frequency]),
('clustered_sequences', FeatureData[Sequence]),
('new_reference_sequences', FeatureData[Sequence]),
],
input_descriptions={
'table': 'The feature table to be clustered.',
'sequences': 'The sequences corresponding to the features in table.',
'reference_sequences': 'The sequences to use as cluster centroids.',
},
parameter_descriptions={
'perc_identity': ('The percent identity at which clustering should be '
'performed. This parameter maps to vsearch\'s --id '
'parameter.'),
'strand': ('Search plus (i.e., forward) or both (i.e., forward and '
'reverse complement) strands.'),
'threads': ('The number of threads to use for computation. Passing 0 '
'will launch one thread per CPU core.')
},
output_descriptions={
'clustered_table': 'The table following clustering of features.',
'clustered_sequences': 'Sequences representing clustered features.',
'new_reference_sequences': 'The new reference sequences. This can be '
'used for subsequent runs of '
'open-reference clustering for consistent '
'definitions of features across '
'open-reference feature tables.',
},
name='Open-reference clustering of features.',
description='Given a feature table and the associated feature sequences, '
'cluster the features against a reference database based on '
'user-specified percent identity threshold of their sequences.'
' Any sequences that don\'t match are then clustered de novo. '
'This is not a general-purpose clustering method, but rather '
'is intended to be used for clustering the results of '
'quality-filtering/dereplication methods, such as DADA2, or '
'for re-clustering a FeatureTable at a lower percent identity '
'than it was originally clustered at. When a group of '
'features in the input table are clustered into a single '
'feature, the frequency of that single feature in a given '
'sample is the sum of the frequencies of the features that '
'were clustered in that sample. Feature identifiers will be '
'inherited from the centroid feature of each cluster. For '
'features that match a reference sequence, the centroid '
'feature is that reference sequence, so its identifier will '
'become the feature identifier. The clustered_sequences '
'result will contain feature representative sequences that '
'are derived from the sequences input for all features in '
'clustered_table. This will always be the most abundant '
'sequence in the cluster. The new_reference_sequences result '
'will contain the entire reference database, plus feature '
'representative sequences for any de novo features. This is '
'intended to be used as a reference database in subsequent '
'iterations of cluster_features_open_reference, if '
'applicable. See the vsearch documentation for details on how '
'sequence clustering is performed.',
citations=[citations['rideout2014subsampled']]
)
plugin.methods.register_function(
function=q2_vsearch._cluster_sequences.dereplicate_sequences,
inputs={
'sequences': (SampleData[Sequences] |
SampleData[SequencesWithQuality] |
SampleData[JoinedSequencesWithQuality])
},
parameters={
'derep_prefix': qiime2.plugin.Bool,
},
outputs=[
('dereplicated_table', FeatureTable[Frequency]),
('dereplicated_sequences', FeatureData[Sequence]),
],
input_descriptions={
'sequences': 'The sequences to be dereplicated.',
},
parameter_descriptions={
'derep_prefix': ('Merge sequences with identical prefixes. If a '
'sequence is identical to the prefix of two or more '
'longer sequences, it is clustered with the shortest '
'of them. If they are equally long, it is clustered '
'with the most abundant.'),
},
output_descriptions={
'dereplicated_table': 'The table of dereplicated sequences.',
'dereplicated_sequences': 'The dereplicated sequences.',
},
name='Dereplicate sequences.',
description=('Dereplicate sequence data and create a feature table and '
'feature representative sequences. Feature identifiers '
'in the resulting artifacts will be the sha1 hash '
'of the sequence defining each feature. If clustering of '
'features into OTUs is desired, the resulting artifacts '
'can be passed to the cluster_features_* methods in this '
'plugin.')
)
plugin.methods.register_function(
function=q2_vsearch._join_pairs.join_pairs,
inputs={
'demultiplexed_seqs': SampleData[PairedEndSequencesWithQuality]
},
parameters={
'truncqual': qiime2.plugin.Int % qiime2.plugin.Range(0, None),
'minlen': qiime2.plugin.Int % qiime2.plugin.Range(0, None),
'maxns': qiime2.plugin.Int % qiime2.plugin.Range(0, None),
'allowmergestagger': qiime2.plugin.Bool,
'minovlen': qiime2.plugin.Int % qiime2.plugin.Range(0, None),
'maxdiffs': qiime2.plugin.Int % qiime2.plugin.Range(0, None),
'minmergelen': qiime2.plugin.Int % qiime2.plugin.Range(0, None),
'maxmergelen': qiime2.plugin.Int % qiime2.plugin.Range(0, None),
'maxee': qiime2.plugin.Float % qiime2.plugin.Range(0., None),
'qmin': qiime2.plugin.Int % qiime2.plugin.Range(
-5, 2, inclusive_start=True, inclusive_end=True),
'qminout': qiime2.plugin.Int % qiime2.plugin.Range(
-5, 2, inclusive_start=True, inclusive_end=True),
'qmax': qiime2.plugin.Int % qiime2.plugin.Range(
40, 41, inclusive_start=True, inclusive_end=True),
'qmaxout': qiime2.plugin.Int % qiime2.plugin.Range(
40, 41, inclusive_start=True, inclusive_end=True),
'threads': qiime2.plugin.Int % qiime2.plugin.Range(
0, 8, inclusive_start=True, inclusive_end=True)
},
outputs=[
('joined_sequences', SampleData[JoinedSequencesWithQuality])
],
input_descriptions={
'demultiplexed_seqs': ('The demultiplexed paired-end sequences to '
'be joined.'),
},
parameter_descriptions={
'truncqual': ('Truncate sequences at the first base with the '
'specified quality score value or lower.'),
'minlen': ('Sequences shorter than minlen after truncation are '
'discarded.'),
'maxns': ('Sequences with more than maxns N characters are '
'discarded.'),
'allowmergestagger': ('Allow joining of staggered read pairs.'),
'minovlen': ('Minimum overlap length of forward and reverse reads '
'for joining.'),
'maxdiffs': ('Maximum number of mismatches in the forward/reverse '
'read overlap for joining.'),
'minmergelen': ('Minimum length of the joined read to be retained.'),
'maxmergelen': ('Maximum length of the joined read to be retained.'),
'maxee': ('Maximum number of expected errors in the joined read '
'to be retained.'),
'qmin': ('The minimum allowed quality score in the input.'),
'qminout': ('The minimum allowed quality score to use in output.'),
'qmax': ('The maximum allowed quality score in the input.'),
'qmaxout': ('The maximum allowed quality score to use in output.'),
'threads': ('The number of threads to use for computation. Does '
'not scale much past 4 threads.')
},
output_descriptions={
'joined_sequences': ('The joined sequences.'),
},
name='Join paired-end reads.',
description=('Join paired-end sequence reads using vsearch\'s '
'merge_pairs function. The qmin, qminout, qmax, and qmaxout '
'parameters should only need to be modified when working '
'with older fastq sequence data. See the vsearch '
'documentation for details on how paired-end joining is '
'performed, and for more information on the parameters to '
'this method.')
)
plugin.methods.register_function(
function=q2_vsearch._chimera.uchime_ref,
inputs={
'sequences': FeatureData[Sequence],
'table': FeatureTable[Frequency],
'reference_sequences': FeatureData[Sequence]},
parameters={
'dn': qiime2.plugin.Float % qiime2.plugin.Range(0., None),
'mindiffs': qiime2.plugin.Int % qiime2.plugin.Range(1, None),
'mindiv': qiime2.plugin.Float % qiime2.plugin.Range(0., None),
'minh': qiime2.plugin.Float % qiime2.plugin.Range(
0., 1.0, inclusive_end=True),
'xn': qiime2.plugin.Float % qiime2.plugin.Range(
1., None, inclusive_start=False),
'threads': qiime2.plugin.Int % qiime2.plugin.Range(
0, 256, inclusive_start=True, inclusive_end=True)
},
outputs=[
('chimeras', FeatureData[Sequence]),
('nonchimeras', FeatureData[Sequence]),
('stats', UchimeStats)
],
input_descriptions={
'sequences': 'The feature sequences to be chimera-checked.',
'table': ('Feature table (used for computing total feature '
'abundances).'),
'reference_sequences': 'The non-chimeric reference sequences.'
},
parameter_descriptions={
'dn': ('No vote pseudo-count, corresponding to the parameter n in '
'the chimera scoring function.'),
'mindiffs': 'Minimum number of differences per segment.',
'mindiv': 'Minimum divergence from closest parent.',
'minh': ('Minimum score (h). Increasing this value tends to reduce '
'the number of false positives and to decrease sensitivity.'),
'xn': ('No vote weight, corresponding to the parameter beta in the '
'scoring function.'),
'threads': ('The number of threads to use for computation. Passing 0 '
'will launch one thread per CPU core.')
},
output_descriptions={
'chimeras': 'The chimeric sequences.',
'nonchimeras': 'The non-chimeric sequences.',
'stats': 'Summary statistics from chimera checking.'
},
name='Reference-based chimera filtering with vsearch.',
description=('Apply the vsearch uchime_ref method to identify chimeric '
'feature sequences. The results of this method can be used '
'to filter chimeric features from the corresponding feature '
'table. For additional details, please refer to the vsearch '
'documentation.')
)
plugin.methods.register_function(
function=q2_vsearch._chimera.uchime_denovo,
inputs={
'sequences': FeatureData[Sequence],
'table': FeatureTable[Frequency]},
parameters={
'dn': qiime2.plugin.Float % qiime2.plugin.Range(0., None),
'mindiffs': qiime2.plugin.Int % qiime2.plugin.Range(1, None),
'mindiv': qiime2.plugin.Float % qiime2.plugin.Range(0., None),
'minh': qiime2.plugin.Float % qiime2.plugin.Range(
0., 1.0, inclusive_end=True),
'xn': qiime2.plugin.Float % qiime2.plugin.Range(
1., None, inclusive_start=False)
},
outputs=[
('chimeras', FeatureData[Sequence]),
('nonchimeras', FeatureData[Sequence]),
('stats', UchimeStats)
],
input_descriptions={
'sequences': 'The feature sequences to be chimera-checked.',
'table': ('Feature table (used for computing total feature '
'abundances).'),
},
parameter_descriptions={
'dn': ('No vote pseudo-count, corresponding to the parameter n in '
'the chimera scoring function.'),
'mindiffs': 'Minimum number of differences per segment.',
'mindiv': 'Minimum divergence from closest parent.',
'minh': ('Minimum score (h). Increasing this value tends to reduce '
'the number of false positives and to decrease sensitivity.'),
'xn': ('No vote weight, corresponding to the parameter beta in the '
'scoring function.'),
},
output_descriptions={
'chimeras': 'The chimeric sequences.',
'nonchimeras': 'The non-chimeric sequences.',
'stats': 'Summary statistics from chimera checking.'
},
name='De novo chimera filtering with vsearch.',
description=('Apply the vsearch uchime_denovo method to identify chimeric '
'feature sequences. The results of this method can be used '
'to filter chimeric features from the corresponding feature '
'table. For additional details, please refer to the vsearch '
'documentation.')
)
plugin.visualizers.register_function(
function=q2_vsearch._stats.fastq_stats,
inputs={
'sequences': SampleData[
SequencesWithQuality | PairedEndSequencesWithQuality],
},
parameters={
'threads': qiime2.plugin.Int % qiime2.plugin.Range(
1, None) | qiime2.plugin.Str % qiime2.plugin.Choices(['auto'])
},
input_descriptions={
'sequences': 'Fastq sequences'
},
parameter_descriptions={
'threads': 'The number of threads used for computation.',
},
name='Fastq stats with vsearch.',
description='A fastq overview via vsearch\'s fastq_stats, fastq_eestats '
'and fastq_eestats2 utilities. Please see '
'https://github.com/torognes/vsearch for detailed '
'documentation of these tools.',
)
importlib.import_module('q2_vsearch._transformer')
|
jakereps/q2-vsearch
|
q2_vsearch/_cluster_features.py
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import tempfile
import subprocess
import sqlite3
import biom
import skbio
import pandas as pd
from qiime2 import Metadata
from q2_types.feature_data import DNAFASTAFormat
class VSearchError(Exception):
pass
def run_command(cmd, verbose=True):
print("Running external command line application. This may print "
"messages to stdout and/or stderr.")
print("The command being run is below. This command cannot "
"be manually re-run as it will depend on temporary files that "
"no longer exist.")
print("\nCommand:", end=' ')
print(" ".join(cmd), end='\n\n')
subprocess.run(cmd, check=True)
def _uc_to_sqlite(uc):
'''Parse uc-style file into a SQLite in-memory database.
This populates an in-memory database with the following schema (displayed
below with dummy data):
feature_id | cluster_id | count
-----------|------------|-------
feature1 | r1 | 204
feature2 | r2 | 4
feature3 | r1 | 15
feature4 | r2 | 24
feature5 | r2 | 16
'''
conn = sqlite3.connect(':memory:')
c = conn.cursor()
# The PK constraint ensures that there are no duplicate Feature IDs
c.execute('CREATE TABLE feature_cluster_map (feature_id TEXT PRIMARY KEY,'
'cluster_id TEXT NOT NULL, count INTEGER);')
c.execute('CREATE INDEX idx1 ON '
'feature_cluster_map(feature_id, cluster_id);')
conn.commit()
insert_stmt = 'INSERT INTO feature_cluster_map VALUES (?, ?, ?);'
for line in uc:
line = line.strip()
if len(line) == 0 or line.startswith(b'#'):
continue
else:
fields = line.split(b'\t')
if fields[0] == b'S':
sequence_id = fields[8].decode('utf-8').split(';')[0]
c.execute(insert_stmt, (sequence_id, sequence_id, None))
elif fields[0] == b'H':
centroid_id = fields[9].decode('utf-8').split(';')[0]
sequence_id = fields[8].decode('utf-8').split(';size=')
sequence_id, count = sequence_id[0], sequence_id[1]
c.execute(insert_stmt, (sequence_id, centroid_id, count))
else:
pass
conn.commit()
return conn
def _collapse_f_from_sqlite(conn):
c = conn.cursor()
# This query produces the following results (displayed below with dummy
# data):
# feature_id | cluster_id
# -----------|------------
# feature1 | r1
# feature2 | r2
# feature3 | r1
# feature4 | r2
# feature4 | r2
c.execute('SELECT feature_id, cluster_id FROM feature_cluster_map;')
id_to_centroid = dict(c.fetchall())
if len(id_to_centroid) == 0:
raise ValueError("No sequence matches were identified by vsearch.")
def collapse_f(id_, x):
return id_to_centroid[id_]
return collapse_f
def _fasta_from_sqlite(conn, input_fasta_fp, output_fasta_fp):
input_seqs = skbio.read(input_fasta_fp, format='fasta',
constructor=skbio.DNA)
c = conn.cursor()
# Create a second in-memory table with the following schema (displayed
# below with dummy data):
# feature_id | sequence_string
# -----------|------------------
# feature1 | ACGTACGTACGTACGT
# feature2 | GGGGAAAACCCCTTTT
# feature3 | TCAGAAAATTTTTCAG
# feature4 | AAAAAAAAAAAAAAAA
# feature5 | GGGGGGGGGGGGGGGG
c.execute('CREATE TABLE rep_seqs (feature_id TEXT PRIMARY KEY, '
'sequence_string TEXT NOT NULL);')
c.executemany(
'INSERT INTO rep_seqs VALUES (?, ?);',
[(seq.metadata['id'], str(seq)) for seq in input_seqs]
)
conn.commit()
# Preemptively sort the table to deal with tie-breaking, later.
# This is a table, not a view, because we want/need sqlite's rowid.
c.execute('CREATE TABLE sorted_feature_cluster_map AS '
'SELECT * FROM feature_cluster_map ORDER BY cluster_id ASC,'
'feature_id ASC;')
c.execute('CREATE INDEX idx2 ON '
'sorted_feature_cluster_map(cluster_id, count);')
conn.commit()
# The results from this query should look like the following (displayed
# below with dummy data):
# cluster_id | sequence_string
# -----------|------------------
# r1 | ACGTACGTACGTACGT
# r2 | AAAAAAAAAAAAAAAA
c.execute('''SELECT fcm.cluster_id, rs.sequence_string, MAX(fcm.count)
FROM sorted_feature_cluster_map fcm
INNER JOIN rep_seqs rs ON rs.feature_id = fcm.feature_id
GROUP BY fcm.cluster_id
ORDER BY fcm.cluster_id ASC;
''')
with open(output_fasta_fp, 'w') as output_seqs:
while True:
partial_results = c.fetchmany(size=100)
if partial_results:
output_seqs.writelines(
['>%s\n%s\n' % (i, s) for (i, s, _) in partial_results]
)
else:
break
def _fasta_with_sizes(input_fasta_fp, output_fasta_fp, table):
table_ids = table.ids(axis='observation')
sizes = {id_: size for id_, size in zip(table_ids,
table.sum(axis='observation'))}
output_fasta_f = open(output_fasta_fp, 'w')
sequence_ids = set()
for e in skbio.io.read(input_fasta_fp, constructor=skbio.DNA,
format='fasta'):
feature_id = e.metadata['id']
feature_seq = str(e)
sequence_ids.add(feature_id)
try:
feature_size = sizes[feature_id]
except KeyError:
raise ValueError('Feature %s is present in sequences, but not '
'in table. The set of features in sequences must '
'be identical to the set of features in table.'
% feature_id)
output_fasta_f.write('>%s;size=%d\n%s\n' %
(feature_id, feature_size, feature_seq))
output_fasta_f.close()
_error_on_nonoverlapping_ids(set(table_ids), sequence_ids,
check_extra_table_ids=True,
check_extra_sequence_ids=False)
def cluster_features_de_novo(sequences: DNAFASTAFormat, table: biom.Table,
perc_identity: float, threads: int = 1
) -> (biom.Table, DNAFASTAFormat):
clustered_sequences = DNAFASTAFormat()
with tempfile.NamedTemporaryFile() as fasta_with_sizes:
with tempfile.NamedTemporaryFile() as out_uc:
_fasta_with_sizes(str(sequences), fasta_with_sizes.name, table)
cmd = ['vsearch',
'--cluster_size', fasta_with_sizes.name,
'--id', str(perc_identity),
'--centroids', str(clustered_sequences),
'--uc', out_uc.name,
'--qmask', 'none', # ensures no lowercase DNA chars
'--xsize',
'--threads', str(threads),
'--minseqlength', '1',
'--fasta_width', '0']
run_command(cmd)
out_uc.seek(0)
conn = _uc_to_sqlite(out_uc)
collapse_f = _collapse_f_from_sqlite(conn)
table = table.collapse(collapse_f, norm=False, min_group_size=1,
axis='observation',
include_collapsed_metadata=False)
return table, clustered_sequences
def _error_on_nonoverlapping_ids(table_ids, sequence_ids,
check_extra_table_ids=True,
check_extra_sequence_ids=True):
if check_extra_table_ids:
extra_table_ids = table_ids - sequence_ids
if len(extra_table_ids):
raise ValueError('Some feature ids are present in table, but not '
'in sequences. The set of features in sequences '
'must be identical to the set of features in '
'table. Feature ids present in table but not '
'sequences are: %s' % ', '.join(extra_table_ids))
if check_extra_sequence_ids:
extra_sequence_ids = sequence_ids - table_ids
if len(extra_sequence_ids):
raise ValueError('Some feature ids are present in sequences, but '
'not in table. The set of features in sequences '
'must be identical to the set of features in '
'table. Feature ids present in sequences but not '
'table are: %s' % ', '.join(extra_sequence_ids))
def cluster_features_closed_reference(sequences: DNAFASTAFormat,
table: biom.Table,
reference_sequences: DNAFASTAFormat,
perc_identity: float,
strand: str = 'plus',
threads: int = 1
) -> (biom.Table, DNAFASTAFormat,
DNAFASTAFormat):
table_ids = set(table.ids(axis='observation'))
sequence_ids = {e.metadata['id'] for e in skbio.io.read(
str(sequences), constructor=skbio.DNA, format='fasta')}
_error_on_nonoverlapping_ids(table_ids, sequence_ids)
matched_seqs, unmatched_seqs = DNAFASTAFormat(), DNAFASTAFormat()
with tempfile.NamedTemporaryFile() as fasta_with_sizes, \
tempfile.NamedTemporaryFile() as out_uc, \
tempfile.NamedTemporaryFile() as tmp_unmatched_seqs:
_fasta_with_sizes(str(sequences), fasta_with_sizes.name, table)
cmd = ['vsearch',
'--usearch_global', fasta_with_sizes.name,
'--id', str(perc_identity),
'--db', str(reference_sequences),
'--uc', out_uc.name,
'--strand', str(strand),
'--qmask', 'none', # ensures no lowercase DNA chars
'--notmatched', tmp_unmatched_seqs.name,
'--threads', str(threads),
'--minseqlength', '1',
'--fasta_width', '0']
run_command(cmd)
out_uc.seek(0)
# It is possible for there to be no unmatched sequences --- if that
# is the case, skip thie following clean-up.
if os.path.getsize(tmp_unmatched_seqs.name) > 0:
# We don't really need to sort the matched sequences, this
# is just to let us use --xsize, which strips the counts from
# the Feature ID. It would be more ideal if --usearch_global,
# above let us pass in --xsize, but unfortunately it isn't
# supported.
cmd = ['vsearch',
'--sortbysize', tmp_unmatched_seqs.name,
'--xsize',
'--output', str(unmatched_seqs),
'--minseqlength', '1',
'--fasta_width', '0']
run_command(cmd)
try:
conn = _uc_to_sqlite(out_uc)
collapse_f = _collapse_f_from_sqlite(conn)
_fasta_from_sqlite(conn, str(sequences), str(matched_seqs))
except ValueError:
raise VSearchError('No matches were identified to '
'reference_sequences. This can happen if '
'sequences are not homologous to '
'reference_sequences, or if sequences are '
'not in the same orientation as reference_'
'sequences (i.e., if sequences are reverse '
'complemented with respect to reference '
'sequences). Sequence orientation can be '
'adjusted with the strand parameter.')
unmatched_ids = [e.metadata['id']
for e in skbio.io.read(open(str(unmatched_seqs)),
constructor=skbio.DNA,
format='fasta')]
table.filter(ids_to_keep=unmatched_ids, invert=True, axis='observation',
inplace=True)
table = table.collapse(collapse_f, norm=False, min_group_size=1,
axis='observation',
include_collapsed_metadata=False)
return table, matched_seqs, unmatched_seqs
def cluster_features_open_reference(ctx, sequences, table, reference_sequences,
perc_identity, strand='plus', threads=1):
cluster_features_closed_reference = ctx.get_action(
'vsearch', 'cluster_features_closed_reference')
filter_features = ctx.get_action('feature_table', 'filter_features')
cluster_features_de_novo = ctx.get_action(
'vsearch', 'cluster_features_de_novo')
merge = ctx.get_action('feature_table', 'merge')
merge_seqs = ctx.get_action('feature_table', 'merge_seqs')
skipped_closed_ref = True
try:
closed_ref_table, rep_seqs, unmatched_seqs = \
cluster_features_closed_reference(
sequences=sequences, table=table,
reference_sequences=reference_sequences,
perc_identity=perc_identity,
strand=strand, threads=threads)
skipped_closed_ref = False
except VSearchError: # No matches
pass
# If cluster_features_closed_reference fails to match, we need to
# pass the source data into cluster_features_de_novo wholesale.
if skipped_closed_ref:
unmatched_seqs, closed_ref_table = sequences, table
# It is possible that all of the sequences matched the reference database,
# if that is the case, don't worry about running cluster_features_de_novo.
if unmatched_seqs.view(pd.Series).size > 0:
unmatched_seqs_md = unmatched_seqs.view(Metadata)
unmatched_table, = filter_features(table=table,
metadata=unmatched_seqs_md)
de_novo_table, de_novo_seqs = cluster_features_de_novo(
sequences=unmatched_seqs, table=unmatched_table,
perc_identity=perc_identity, threads=threads)
if skipped_closed_ref:
merged_reference_seqs, = merge_seqs(data=[reference_sequences,
de_novo_seqs])
outputs = (de_novo_table, de_novo_seqs, merged_reference_seqs)
else:
merged_table, = merge(
tables=[closed_ref_table, de_novo_table],
overlap_method='error_on_overlapping_feature')
merged_rep_seqs, = merge_seqs(data=[rep_seqs, de_novo_seqs])
merged_reference_seqs, = merge_seqs(data=[reference_sequences,
de_novo_seqs])
outputs = (merged_table, merged_rep_seqs, merged_reference_seqs)
else: # skipped de novo
outputs = (closed_ref_table, rep_seqs, reference_sequences)
return outputs
|
jakereps/q2-vsearch
|
q2_vsearch/tests/test_plugin_setup.py
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
from q2_vsearch.plugin_setup import plugin as vsearch_plugin
class PluginSetupTests(unittest.TestCase):
def test_plugin_setup(self):
self.assertEqual(vsearch_plugin.name, 'vsearch')
|
Neelakurinji123/kindle-weather-display-for-openwrt
|
openwrt/opt/kindle-weather-station/createSVGv2.py
|
<gh_stars>0
#!/usr/bin/env python3
# encoding=utf-8
# -*- coding: utf-8 -*-
# Written by : <EMAIL>
# Date : 16 February 2022
#
# This script was modified from:
# Author : <NAME> - http://www.iero.org
# Public domain source code
# Published March 2015
# Update October 2016
# This code creates an SVG image, ready for Kindle 600x800 screen.
# With weather information from Netatmo weather station and
# forecast from forecast.io.
# Please fill settings.json file
import time as t
import math
import sys
import json
import re
import math
from datetime import datetime, timedelta, date
from pytz import timezone
import pytz
import locale
import shutil
from decimal import Decimal, ROUND_HALF_EVEN, ROUND_HALF_UP
from subprocess import Popen
from OpenWeatherMapAPIv2 import OpenWeatherMap
settings = "settings.json"
svgfile = "/tmp/KindleStation.svg"
pngfile = "/tmp/KindleStation.png"
pngtmpfile = "/tmp/.KindleStation.png"
flatten_pngfile = "/tmp/KindleStation_flatten.png"
error_image = "./img/error_service_unavailable.png"
i18nfile = "i18n.json"
coverter = 'convert'
class Header:
def __init__(self, p):
self.p = p
def text(self):
tz = timezone(self.p.t_timezone)
p = self.p
curt_weather = self.p.current_weather()
t_now = self.p.t_now
tz = timezone(self.p.t_timezone)
sunrise_and_sunset = self.p.sunrise_and_sunset
svg_text = str()
svg_text += '''<?xml version="1.0" encoding="{}"?>
<svg xmlns="http://www.w3.org/2000/svg" height="800" width="600" version="1.1" xmlns:xlink="http://www.w3.org/1999/xlink">\n'''.format(p.encoding)
svg_text += '<g font-family="{}">\n'.format(p.font)
#svg_text += '<g font-family="{}">\n'.format("Chalkboard")
#svg_text += '<g font-family="{}">\n'.format("Arial")
return svg_text
class Maintenant:
def __init__(self, p, base_x, base_y):
self.p = p
self.base_x = base_x
self.base_y = base_y
def text(self):
p = self.p
curt_weather = p.current_weather()
tz = timezone(p.t_timezone)
t_now = p.t_now
x = self.base_x
y = self.base_y
sunrise_and_sunset = p.sunrise_and_sunset
svg_text = str()
if sunrise_and_sunset == True:
if curt_weather[11] == 0:
t_sunrise = "n/a"
else:
t_sunrise = str(datetime.fromtimestamp(curt_weather[11], tz).strftime("%H:%M"))
if curt_weather[12] == 0:
t_sunset = "n/a"
else:
t_sunset = str(datetime.fromtimestamp(curt_weather[12], tz).strftime("%H:%M"))
# localtime
maintenant = (str.lower(datetime.fromtimestamp(t_now, tz).strftime("%a, %d %b %H:%M")))
w = maintenant.split()
d = read_i18n(p)
w[0] = d["abbreviated_weekday"][w[0][:-1]] + ',' if not d == dict() else w[0]
w[2] = d["abbreviated_month"][w[2]] if not d == dict() else w[2]
svg_text += SVGtext("start", "30px", (x + 20), (y + 40), ' '.join(w)).svg()
svg_text += SVGtext("end", "30px", (x + 445), (y + 40), t_sunrise).svg()
svg_text += SVGtext("end", "30px", (x + 580),(y + 40),t_sunset).svg()
else:
maintenant = str.lower(datetime.fromtimestamp(t_now, tz).strftime("%a %Y/%m/%d %H:%M"))
w = maintenant.split()
d = read_i18n(p)
w[0] = d["abbreviated_weekday"][w[0]] if not d == dict() else w[0]
svg_text += SVGtext("start", "30px", (x + 20), (y + 40), p.city).svg()
svg_text += SVGtext("end", "30px", (x + 580), (y + 40), ' '.join(w)).svg()
return svg_text
def icon(self):
p = self.p
tz = timezone(p.t_timezone)
curt_weather = p.current_weather()
sunrise_and_sunset = self.p.sunrise_and_sunset
x = self.base_x
y = self.base_y
svg_icon = str()
if p.sunrise_and_sunset == True:
svg_icon += SVGtransform("(1.1,0,0,1.1," + str(x + 332) + "," + str(y + 14) + ")", p.header_icons['sunrise']).svg()
svg_icon += SVGtransform("(1.1,0,0,1.1," + str(x + 467) + "," + str(y + 14) + ")", p.header_icons['sunset']).svg()
return svg_icon
class CurrentWeather:
def add_curt_weather_prec(self):
p = self.p
curt_weather = p.current_weather()
x = self.base_x
y = self.base_y
svg_text = str()
# probability of precipitation
if (curt_weather[2] == 'Rain' or curt_weather[2] == 'Drizzle' or
curt_weather[2] == 'Snow' or curt_weather[2] == 'Sleet' or curt_weather[2] == 'Clouds'):
# r = Decimal(curt_weather[14]).quantize(Decimal('0.1'), rounding=ROUND_HALF_EVEN)
r = Decimal(curt_weather[15]).quantize(Decimal('0.1'), rounding=ROUND_HALF_EVEN)
if r == 0:
svg_text += SVGtext("end", "45px", (x + 200 - int(s_padding(r) * 0.64)), (y + 135), "n/a").svg()
else:
svg_text += SVGtext("end", "45px", (x + 195 - int(s_padding(r) * 0.64)), (y + 135), \
Decimal(float(r)).quantize(Decimal('0.1'), rounding=ROUND_HALF_UP)).svg()
return svg_text
def add_curt_weather_temp(self):
p = self.p
curt_weather = p.current_weather()
today_forecast = p.daily_forecast(0)
x = self.base_x
y = self.base_y
disc_offset = self.disc_offset
wordwrap= self.wordwrap
svg_text = str()
# Temperature
tempEntier = math.floor(curt_weather[5])
tempDecimale = 10 * (curt_weather[5] - tempEntier)
svg_text += SVGtext("end", "100px", (x + 155), (y + 315), int(tempEntier)).svg()
svg_text += SVGtext("start", "50px", (x + 150), (y + 310), "." + str(int(tempDecimale))).svg()
svg_text += SVGcircle((x + 170), (y + 245), 7, "black", 3, "none").svg()
svg_text += SVGtext("start", "35px", (x + 180), (y + 265), p.unit['temp']).svg()
# Max temp
svg_text += SVGtext("end", "35px", (x + 280), (y + 275), int(math.ceil(today_forecast[7]))).svg()
svg_text += SVGcircle((x + 285), (y + 255), 4, "black", 3, "none").svg()
svg_text += SVGtext("start", "25px", (x + 290), (y + 267), p.unit['temp']).svg()
# Line
svg_text += SVGline((x + 220), (x + 320), (y + 282), (y + 282), "fill:none;stroke:black;stroke-width:1px;").svg()
# Min temp
svg_text += SVGtext("end", "35px", (x + 280), (y + 315), int(math.ceil(today_forecast[6]))).svg()
svg_text += SVGcircle((x + 285), (y + 295), 4, "black", 3, "none").svg()
svg_text += SVGtext("start", "25px", (x + 290), (y + 307), p.unit['temp']).svg()
return svg_text
def add_curt_weather_pres(self):
p = self.p
curt_weather = p.current_weather()
x = self.base_x
y = self.base_y
# Pressure
svg_text = SVGtext("end", "30px", (x + 280 + self.disc_offset),(y + 370), str(round(curt_weather[6])) + p.unit['pressure']).svg()
return svg_text
def add_curt_weather_humi(self):
p = self.p
curt_weather = p.current_weather()
x = self.base_x
y = self.base_y
# Humidity
svg_text = SVGtext("end", "30px", (x + 155 + self.disc_offset), (y + 370), str(round(curt_weather[7])) + "%").svg()
return svg_text
def add_curt_weather_wind(self):
p = self.p
curt_weather = p.current_weather()
x = self.base_x
y = self.base_y
# Wind
svg_text = SVGtext("end", "30px", (x + 85 + self.disc_offset),(y + 370), str(int(curt_weather[8])) + p.unit['wind_speed']).svg()
return svg_text
def add_curt_weather_disc(self):
curt_weather = self.p.current_weather()
x = self.base_x
y = self.base_y
svg_text = str()
# Description
disc = text_split(length=self.wordwrap, text=curt_weather[3])
for w in disc:
svg_text += SVGtext("end", "30px", (x + 280 + self.disc_offset), (y + 410), w).svg()
y += 35
return svg_text
def add_curt_weather_icon(self):
x = self.base_x
y = self.base_y
svg_icon = SVGtransform("(4,0,0,4," + str(x - 30) + "," + str(y - 80) + ")", self.p.current_icons[0]).svg()
return svg_icon
def add_curt_weather_wind_icon(self):
p = self.p
curt_weather = p.current_weather()
x = self.base_x
y = self.base_y
r = p.current_icons['cardinal']
_x = x - 10 - len(str(int(curt_weather[8]))) * 17 + self.disc_offset
svg_icon = SVGtransform("(1.6,0,0,1.6," + str(_x) + "," + str(y + 336) + ")", r).svg()
return svg_icon
class CurrentWeatherNoAlerts(CurrentWeather):
def __init__(self, p, base_x, base_y, disc_offset, wordwrap):
self.p = p
self.base_x = base_x
self.base_y = base_y
self.disc_offset = disc_offset
self.wordwrap = wordwrap
def text(self):
prec = super(CurrentWeatherNoAlerts, self).add_curt_weather_prec()
temp = super(CurrentWeatherNoAlerts, self).add_curt_weather_temp()
pres = super(CurrentWeatherNoAlerts, self).add_curt_weather_pres()
humi = super(CurrentWeatherNoAlerts, self).add_curt_weather_humi()
wind = super(CurrentWeatherNoAlerts, self).add_curt_weather_wind()
disc = super(CurrentWeatherNoAlerts, self).add_curt_weather_disc()
svg_text = prec + temp + pres + humi + wind + disc
return svg_text
def icon(self):
p = self.p
curt_weather = p.current_weather()
disc_offset = 0
svg_icon = str()
svg_icon += super(CurrentWeatherNoAlerts, self).add_curt_weather_icon()
if int(curt_weather[8]) != 0:
svg_icon += super(CurrentWeatherNoAlerts, self).add_curt_weather_wind_icon()
return svg_icon
class CurrentWeatherAlerts(CurrentWeather):
def __init__(self, p, base_x, base_y, disc_offset, wordwrap):
self.p = p
self.base_x = base_x
self.base_y = base_y
self.disc_offset = disc_offset
self.wordwrap = wordwrap
def text(self):
base_x = self.base_x
base_y = self.base_y
svg_text = str()
svg_text += super(CurrentWeatherAlerts, self).add_curt_weather_prec()
self.base_x = 270
self.base_y = -160
svg_text += super(CurrentWeatherAlerts, self).add_curt_weather_temp()
self.base_x = 270
self.base_y = -165
svg_text += super(CurrentWeatherAlerts, self).add_curt_weather_pres()
self.base_x = 270
self.base_y = -165
svg_text += super(CurrentWeatherAlerts, self).add_curt_weather_humi()
self.base_x = 465
self.base_y = -125
svg_text += super(CurrentWeatherAlerts, self).add_curt_weather_wind()
self.base_x = 270
self.base_y = -165
svg_text += super(CurrentWeatherAlerts, self).add_curt_weather_disc()
return svg_text
def icon(self):
p = self.p
svg_icon = str()
curt_weather = p.current_weather()
self.base_x = -5
self.base_y = 45
svg_icon += super(CurrentWeatherAlerts, self).add_curt_weather_icon()
if int(curt_weather[8]) != 0:
self.base_x = 450
self.base_y = -125
svg_icon += super(CurrentWeatherAlerts, self).add_curt_weather_wind_icon()
return svg_icon
class HourlyWeather:
def __init__(self, p, base_x, base_y, h_hour, h_range, h_step, pitch):
self.p = p
self.base_x = base_x
self.base_y = base_y
self.h_hour = h_hour
self.h_range = h_range
self.h_step = h_step
self.pitch = pitch
# Hourly weather document area (base_x=370 ,base_y=40)
def text(self):
p = self.p
x = self.base_x
y = self.base_y
curt_weather = p.current_weather()
today_forecast = p.daily_forecast(0)
disc_offset = 0
wordwrap = 0
h_hour = self.h_hour
h_range = self.h_range
h_step = self.h_step
pitch = self.pitch
svg_text = str()
# 3h forecast
for i in range(h_hour, h_range, h_step):
hourly = p.hourly_forecast(i)
hrs = {3: "three hours later", 6: "six hours later", 9: "nine hours later"}
d = read_i18n(p)
if not d == dict():
for k in hrs.keys():
hrs[k] = d["hours"][hrs[k]]
svg_text += SVGtext("start", "25px", (x - 0), (y + 165), hrs[i]).svg()
svg_text += temp_unit(x=(x + 30), y=(y + 96), text=round(hourly[5]), unit=p.unit['temp'])
# probability of precipitation
w = hourly[2]
if w == 'Rain' or w == 'Drizzle' or w == 'Snow' or w == 'Sleet' or w == 'Clouds':
r = Decimal(hourly[7]).quantize(Decimal('0.1'), rounding=ROUND_HALF_EVEN)
if r == 0:
s1 = SVGtext("end", "25px", int(x + 140 - s_padding(r) * 0.357), (y + 92), 'n/a')
svg_text += s1.svg()
else:
s1 = SVGtext("end", "25px", int(x + 137 - s_padding(r) * 0.357), (y + 92), r)
svg_text += s1.svg()
y += pitch
return svg_text
def icon(self):
p = self.p
x = self.base_x
y = self.base_y
h_hour = self.h_hour
h_range = self.h_range
h_step = self.h_step
pitch = self.pitch
svg_icon = str()
for i in range(h_hour, h_range, h_step):
svg_icon += SVGtransform("(2.3,0,0,2.3," + str(x + 8) + "," + str(y - 32) + ")", p.hourly_icons[i]).svg()
y += pitch
return svg_icon
class DailyWeather:
def __init__(self, p, base_x, base_y, d_range, pitch):
self.p = p
self.base_x = 0
self.base_y = 500
self.pitch = 90
self.d_range = 4
def text(self):
p = self.p
x = self.base_x
y = self.base_y
daily = p.daily_forecast
pitch = self.pitch
d_range = self.d_range
disc_offset = 0
wordwrap = 0
svg_text = str()
minTemp = math.floor(min([daily(1)[6], daily(2)[6] , daily(3)[6]]))
maxTemp = math.ceil(max([daily(1)[7], daily(2)[7] , daily(3)[7]]))
pasTemp = 120 / (maxTemp-minTemp)
d = read_i18n(p)
# Drawing temp bars
for i in range(1, d_range):
forecast = p.daily_forecast(i)
tLow = math.floor(forecast[6])
tHigh = math.ceil(forecast[7])
jour = datetime.fromtimestamp(forecast[0], tz)
tMin = (int)(x + 355 + pasTemp * (tLow - minTemp))
tMax = (int)(x + 440 + pasTemp * (tHigh - minTemp))
w = str.lower(jour.strftime("%A"))
w = d["full_weekday"][w] if not d == dict() else w
svg_text += SVGtext("end", "35px", (x + 185), (y + 75), w).svg()
svg_text += temp_unit(x=tMin, y=(y + 75), text=int(tLow), unit=p.unit['temp'])
svg_text += temp_unit(x=int(tMax - s_padding(tHigh)), y=(y + 75), text=int(tHigh), unit=p.unit['temp'])
svg_text += SVGline(int(tMin + 40), int(tMax - 65), (y + 75 - 10), (y + 75 - 10), "fill:none;stroke:black;stroke-linecap:round;stroke-width:10px;").svg()
y += pitch
return svg_text
def icon(self):
p = self.p
x = self.base_x
y = self.base_y
pitch = self.pitch
d_range = self.d_range
disc_offset = 0
svg_icon = str()
for i in range(1, d_range):
svg_icon += SVGtransform("(1.9,0,0,1.9,{},{})".format((x + 160), (y -30)), p.daily_icons[i]).svg()
y += pitch
return svg_icon
class Alerts:
def __init__(self, p, base_x, base_y, max_y):
self.p = p
self.base_x = base_x
self.base_y = base_y
self.max_y = max_y
def text(self):
p = self.p
x = self.base_x
y = self.base_y
max_y = self.max_y
alerts = p.weather_alerts()
svg_text = str()
_c = text_split(length=35, text=alerts[0]['event'], start_text="ALERT: ")
for v in _c:
svg_text += SVGtext2("start", "bold", "30px", "20", y, str(v)).svg()
y += 40
x += 30
svg_text += SVGtext("start", "20px", x, y, "Description:").svg()
x += 10
length = 60
#length = 57
_c = alerts[0]['description']
_c = re.sub(r'\n', ' ', _c, flags=re.MULTILINE)
flag = True
_list = text_split(length=length, text=_c, match='\*')
for v in _list:
y += 30
if y > max_y:
v = v[:-2]
v += "..."
_text = SVGtext("start", "18px", x, y, str(v))
svg_text += _text.svg()
flag = False
break
else:
_text = SVGtext("start", "18px", x, y, str(v))
svg_text += _text.svg()
return svg_text
class DrawGraph:
def __init__(self, p, base_x, base_y, canvas, object):
self.p = p
self.base_x = base_x
self.base_y = base_y
self.canvas = canvas
self.object = object
def draw(self):
if self.object['type'] == "line":
res = DrawGraph.line_graph(self)
elif self.object['type'] == "bar":
res = DrawGraph.bar_graph(self)
elif self.object['type'] == "tile":
res = DrawGraph.tile(self)
return res
def line_graph(self):
p = self.p
x = self.base_x
y = self.base_y
w = self.canvas["width"]
h = self.canvas["height"]
bgcolor = self.canvas["bgcolor"]
axis = self.canvas["axis"]
axis_color = self.canvas["axis_color"]
grid = self.canvas["grid"]
grid_color = self.canvas["grid_color"]
stroke = self.object["stroke"]
stroke_color = self.object["stroke-color"]
fill = self.object["fill"]
stroke_linecap = self.object["stroke-linecap"]
label = bool(eval(self.object["label"]))
label_adjust = bool(eval(self.object["label_adjust"]))
name = self.object["name"]
start = self.object["start"]
end = self.object["end"]
step = self.object["step"]
basis = self.object["basis"]
svg = '<g font-family="{}">\n'.format(p.font)
d = read_i18n(p)
# Canvas
style = "fill:{};stroke:{};stroke-width:{}px;".format(bgcolor, bgcolor, (0))
svg += SVGrect(x=(x - 10), y=(y - h + 10), width=(w + 10), height=(h - 45), style=style).svg()
style = "fill:none;stroke:{};stroke-width:{}px;".format(axis_color, axis)
# Graph
points = str()
_text = str()
if basis == "hour":
t_min = min([p.hourly_forecast(n)[5] for n in range(start, end, step)])
t_max = max([p.hourly_forecast(n)[5] for n in range(start, end, step)])
t_step = 45 / (t_max - t_min) if (t_max - t_min) != 0 else 1
elif basis == "day":
t_min = min([p.daily_forecast(n)[5] for n in range(start, end, step)])
t_max = max([p.daily_forecast(n)[5] for n in range(start, end, step)])
t_step = 45 / (t_max - t_min) if (t_max - t_min) != 0 else 1
for n in range(start, end, step):
if basis == "hour":
hourly = p.hourly_forecast(n)
heure = datetime.fromtimestamp(hourly[0], tz).strftime('%H')
_x = x + 10 + int((w - 22) / (end - start - 1)) * n
_y = y - (hourly[5] - t_min) * t_step - 45
points += "{},{} ".format(_x, _y)
points2 = points + "{},{} {},{}".format(_x, (y - 35), (x + 10), (y - 35))
if int(heure) % 3 == 0:
svg += SVGtext("end", "16px", (_x + 14), (_y - 9), "{} {}".format(round(int(hourly[5])), p.unit['temp'])).svg()
svg += SVGcircle((_x + 3), (_y - 20), 2, "black", 1, "none").svg()
if label == True and label_adjust == True:
svg += SVGtext("middle", "16px", _x, (y - 9), "{}:00".format(heure)).svg()
elif label == True and label_adjust == False:
svg += SVGtext("middle", "16px", _x, (y - 15), "{}:00".format(heure)).svg()
elif basis == "day":
daily = p.daily_forecast(n)
jour = str.lower(datetime.fromtimestamp(daily[0], tz).strftime('%a'))
jour = d["abbreviated_weekday"][jour] if not d == dict() else jour
_x = x + 25 + int((w - 50) / (end - start - 1)) * n
_y = y - (daily[5] - t_min) * t_step - 45
points += "{},{} ".format(_x, _y)
points2 = points + "{},{} {},{}".format(_x, (y - 35), (x + 25), (y - 35))
svg += SVGtext("end", "16px", (_x + 14), (_y - 9), "{} {}".format(int(daily[5]), p.unit['temp'])).svg()
svg += SVGcircle((_x + 3), (_y - 20), 2, "black", 1, "none").svg()
if label == True and label_adjust == True:
svg += SVGtext("middle", "16px", _x, (y - 9), "{}".format(jour)).svg()
elif label == True and label_adjust == False:
svg += SVGtext("middle", "16px", _x, (y - 15), "{}".format(jour)).svg()
style2 = "fill:{};stroke:{};stroke-width:{}px;stroke-linecap:{};".format(fill, fill, "0", stroke_linecap)
svg += SVGpolyline(points2, style2).svg()
style = "fill:none;stroke:{};stroke-width:{}px;stroke-linecap:{};".format(stroke_color, stroke, stroke_linecap)
svg += SVGpolyline(points, style).svg()
# Text
svg += SVGtext("start", "16px", x, (y - h + 27), name).svg()
svg += '</g>'
return svg
def bar_graph(self):
p = self.p
x = self.base_x
y = self.base_y
w = self.canvas["width"]
h = self.canvas["height"]
bgcolor = self.canvas["bgcolor"]
axis = self.canvas["axis"]
axis_color = self.canvas["axis_color"]
grid = self.canvas["grid"]
grid_color = self.canvas["grid_color"]
stroke = self.object["stroke"]
stroke_color = self.object["stroke-color"]
graph_fill = self.object["fill"]
stroke_linecap = self.object["stroke-linecap"]
label = bool(eval(self.object["label"]))
label_adjust = bool(eval(self.object["label_adjust"]))
name = self.object["name"]
start = self.object["start"]
end = self.object["end"]
step = self.object["step"]
basis = self.object["basis"]
l_sum = float()
d = read_i18n(p)
svg = '<g font-family="{}">\n'.format(p.font)
#data = p.daily_forecast(0)
#hourly = p.hourly_forecast(5)
# Canvas
style = "fill:{};stroke:{};stroke-width:{}px;".format(bgcolor, bgcolor, (0))
svg += SVGrect(x=(x - 10), y=(y - h + 10), width=(w + 10), height=(h - 45), style=style).svg()
if basis == "hour" and name == "precipitation":
# Graph
l_min = min([p.hourly_forecast(n)[10] if not p.hourly_forecast(n)[10] is None else 0 for n in range(start, end, step)])
l_max = max([p.hourly_forecast(n)[10] if not p.hourly_forecast(n)[10] is None else 0 for n in range(start, end, step)])
l_sum = round(sum([p.hourly_forecast(n)[10] if not p.hourly_forecast(n)[10] is None else 0 for n in range(start, end, step)]), 2)
if l_max >= 100:
l_step = 60 / (l_max - l_min) if (l_max - l_min) != 0 else 1
elif 100 > l_max >= 50:
l_step = 55 / (l_max - l_min) if (l_max - l_min) != 0 else 1
elif 50 > l_max >= 10:
l_step = 50 / (l_max - l_min) if (l_max - l_min) != 0 else 1
elif 10 > l_max >= 1:
l_step = 40 / 10
else:
l_step = 30
style = "fill:{};stroke:{};stroke-linecap:{};stroke-width:{}px;".format(graph_fill, stroke_color, stroke_linecap, stroke)
for n in range(start, end, step):
hourly = p.hourly_forecast(n)
heure = datetime.fromtimestamp(hourly[0], tz).strftime('%H')
_x = x + 10 + int((w - 22) / (end - start - 1)) * n
_y = y - (hourly[10] - l_min) * l_step - 35
svg += SVGline(_x, _x, (y - 35), _y, style).svg()
if l_max == hourly[10] and l_max != 0:
svg += SVGtext("middle", "16px", _x, (_y - 5), "max:{}".format(round(hourly[10],2))).svg()
style2 = "fill:{};stroke:{};stroke-linecap:{};stroke-width:{}px;".format(axis_color, axis_color, stroke_linecap, "1")
svg += SVGline(_x, _x, _y, (_y - 3), style2).svg()
if int(heure) % 3 == 0:
if label == True and label_adjust == True:
svg += SVGtext("middle", "16px", _x, (y - 9), "{}:00".format(heure)).svg()
elif label == True and label_adjust == False:
svg += SVGtext("middle", "16px", _x, (y - 15), "{}:00".format(heure)).svg()
elif basis == "day" and name == "precipitation":
# Graph
l_min = min([p.daily_forecast(n)[10] if not p.daily_forecast(n)[10] is None else 0 for n in range(start, end, step)])
l_max = max([p.daily_forecast(n)[10] if not p.daily_forecast(n)[10] is None else 0 for n in range(start, end, step)])
l_sum = round(sum([p.daily_forecast(n)[10] if not p.daily_forecast(n)[10] is None else 0 for n in range(start, end, step)]), 2)
if l_max >= 100:
l_step = 60 / (l_max - l_min) if (l_max - l_min) != 0 else 1
elif 100 > l_max >= 50:
l_step = 55 / (l_max - l_min) if (l_max - l_min) != 0 else 1
elif 50 > l_max >= 10:
l_step = 50 / (l_max - l_min) if (l_max - l_min) != 0 else 1
elif 10 > l_max >= 1:
l_step = 40 / 10
else:
l_step = 30
style = "fill:{};stroke:{};stroke-linecap:{};stroke-width:{}px;".format(graph_fill, stroke_color, stroke_linecap, stroke)
for n in range(start, end, step):
daily = p.daily_forecast(n)
jour = str.lower(datetime.fromtimestamp(daily[0], tz).strftime('%a'))
jour = d["abbreviated_weekday"][jour] if not d == dict() else jour
_x = x + 25 + int((w - 50) / (end - start - 1)) * n
_y = y - (daily[10] - l_min) * l_step - 35
svg += SVGline(_x, _x, (y - 35), _y, style).svg()
if l_max == daily[10] and l_max != 0:
svg += SVGtext("middle", "16px", _x, (_y - 5), "max:{}".format(round(daily[10],2))).svg()
style2 = "fill:{};stroke:{};stroke-linecap:{};stroke-width:{}px;".format(axis_color, axis_color, stroke_linecap, "1")
svg += SVGline(_x, _x, _y, (_y - 3), style2).svg()
if label == True and label_adjust == True:
svg += SVGtext("middle", "16px", _x, (y - 9), "{}".format(jour)).svg()
elif label == True and label_adjust == False:
svg += SVGtext("middle", "16px", _x, (y - 15), "{}".format(jour)).svg()
style = "fill:none;stroke:{};stroke-width:{}px;".format(axis_color, 1)
svg += SVGline(x1=(x - 10), x2=(x + w), y1=(y - 35), y2=(y - 35), style=style).svg()
# Text
svg += SVGtext("start", "16px", x, (y - h + 27), "{} (total: {} mm)".format(name, l_sum)).svg()
svg += '</g>'
return svg
def tile(self):
p = self.p
x = self.base_x
y = self.base_y
w = self.canvas["width"]
h = self.canvas["height"]
bgcolor = self.canvas["bgcolor"]
axis = self.canvas["axis"]
axis_color = self.canvas["axis_color"]
grid_y = self.canvas["grid"]
grid_y_color = self.canvas["grid_color"]
stroke = self.object["stroke"] if "stroke" in self.object else None
stroke_color = self.object["stroke-color"] if "stroke-color" in self.object else None
fill = self.object["fill"] if "fill" in self.object else None
stroke_linecap = self.object["stroke-linecap"] if "stroke-linecap" in self.object else None
label = bool(eval(self.object["label"]))
label_adjust = bool(eval(self.object["label_adjust"]))
name = self.object["name"]
start = self.object["start"]
end = self.object["end"]
step = self.object["step"]
basis = self.object["basis"]
svg = svg = '<g font-family="{}">\n'.format(p.font)
icons = str()
d = read_i18n(p)
tz = timezone(p.t_timezone)
t_now = p.t_now
# Canvas
style = "fill:{};stroke:{};stroke-width:{}px;".format(bgcolor, bgcolor, (0))
svg += SVGrect(x=(x - 10), y=(y - h + 10), width=(w + 10), height=(h - 45), style=style).svg()
style = "fill:none;stroke:{};stroke-width:{}px;".format(axis_color, axis)
# Graph
points = str()
_text = str()
for n in range(start, end, step):
if basis == "day" and name == "weather":
daily = p.daily_forecast(n)
jour = str.lower(datetime.fromtimestamp(daily[0], tz).strftime('%a'))
jour = d["abbreviated_weekday"][jour] if not d == dict() else jour
_x = x + 25 + int((w - 50) / (end - start - 1)) * n
_y = y - 45
icons += SVGtransform("(1.0,0,0,1.0,{},{})".format((_x - 53), (_y - 105)), p.daily_icons[n]).svg()
svg += SVGtext("start", "16px", (_x - 32), (_y - 10), "hi:").svg()
svg += SVGtext("end", "16px", (_x + 26), (_y - 10), "{} {}".format(round(daily[7]), p.unit['temp'])).svg()
svg += SVGcircle((_x + 15), (_y - 21), 2, "black", 1, "none").svg()
svg += SVGtext("start", "16px", (_x - 32), (_y + 7), "lo:").svg()
svg += SVGtext("end", "16px", (_x + 26), (_y + 7), "{} {}".format(round(daily[6]), p.unit['temp'])).svg()
svg += SVGcircle((_x + 15), (_y - 4), 2, "black", 1, "none").svg()
if n < (end - 1):
style = "fill:none;stroke:{};stroke-linecap:{};stroke-width:{}px;".format(grid_y_color, stroke_linecap, grid_y)
icons += SVGline((_x + 30), (_x + 30), (_y - h + 55), (_y + 10), style).svg()
if label == True and label_adjust == True:
svg += SVGtext("middle", "16px", _x, (y - 9), "{}".format(jour)).svg()
elif label == True and label_adjust == False:
svg += SVGtext("middle", "16px", _x, (y - 15), "{}".format(jour)).svg()
elif basis == "day" and name == "moon phase":
daily = p.daily_forecast(n)
jour = str.lower(datetime.fromtimestamp(daily[0], tz).strftime('%a'))
jour = d["abbreviated_weekday"][jour] if not d == dict() else jour
day = int(datetime.fromtimestamp(daily[0], tz).strftime('%-d'))
mon = int(datetime.fromtimestamp(daily[0], tz).strftime('%-m'))
yrs = int(datetime.fromtimestamp(daily[0], tz).strftime('%Y'))
lat = float(p.lat)
_x = x + 25 + int((w - 50) / (end - start - 1)) * n
_y = y - 45
r =14
# icon
style = "fill:{};stroke:{};stroke-width:{}px;".format(fill, stroke_color, 1)
icons += SVGcircle((_x - 3), (_y - 53), (r + 2), stroke_color, stroke, "none").svg()
# moon phase: 360d = 2pi(rad)
#lat = -1 # test
pi = math.pi
rad = daily[20] * pi * 2 # One call API: 0,1=new moon, 0.25=1st qurater moon, 0.5=full moon, 0.75=lst quarter moon
c = 0.025
m = rad * c * math.cos(rad)
rx = _x - 3
ry = _y - 53
rp = r + 2
#rp = r - 2 # test
ra1 = 1 * rp
ra2 = (math.cos(rad) * rp)
ra3 = 1 * rp
def phase(rad):
if (2 * pi / 60) > rad >= 0 or (2 * pi / 60) > (pi * 2 - rad) >= 0:
res = 'n'
elif (2 * pi / 60) > abs(rad - pi * 0.5) >= 0:
res = '1'
elif (2 * pi / 60) > abs(rad - pi) >= 0:
res = 'f'
elif (2 * pi / 60) > abs(rad - pi * 1.5) >= 0:
res = '3'
else:
res = str()
return res
def ramadhan(day, mon, yrs):
ra = Gregorian(yrs, mon, day).to_hijri()
if ra.month_name() == "Ramadhan":
res = "r"
else:
res = str()
return res
if lat >= 0:
if phase(rad) == "n":
px1 = math.cos(pi * 0.5 - m) * rp + rx
py1 = math.sin(pi * 0.5 - m ) * rp + ry
px2 = math.cos(pi * 0.5 - m) * rp + rx
py2 = -math.sin(pi * 0.5 - m) * rp + ry
dm = "M{} {} A{} {} 0 1 1 {} {} {} {} 0 0 1 {} {}z".format(px1, py1, ra1, ra1, px2, py2, ra2, ra3, px1, py1)
ps = phase(rad)
ra = ramadhan(day, mon, yrs) if p.ramadhan == True else str()
elif rad < pi * 0.5:
px1 = math.cos(pi * 0.5 - m) * rp + rx
py1 = math.sin(pi * 0.5 - m) * rp + ry
px2 = math.cos(pi * 0.5 - m) * rp + rx
py2 = -math.sin(pi * 0.5 - m) * rp + ry
dm = "M{} {} A{} {} 0 1 1 {} {} {} {} 0 0 1 {} {}z".format(px1, py1, ra1, ra1, px2, py2, ra2, ra3+1, px1, py1)
ps = phase(rad)
ra = ramadhan(day, mon, yrs) if p.ramadhan == True else str()
elif pi > rad >= pi * 0.5:
px1 = math.cos(pi * 0.5 + m) * rp + rx
py1 = math.sin(pi * 0.5 + m) * rp + ry
px2 = math.cos(pi * 0.5 + m) * rp + rx
py2 = -math.sin(pi * 0.5 + m) * rp + ry
dm = "M{} {} A{} {} 0 1 1 {} {} {} {} 0 0 0 {} {}z".format(px1, py1, ra1, ra1, px2, py2, ra2, ra3, px1, py1)
ps = phase(rad)
ra = ramadhan(day, mon, yrs) if p.ramadhan == True else str()
elif pi * 1.5 > rad >= pi:
px1 = math.cos(pi * 1.5 + m) * rp + rx
py1 = math.sin(pi * 1.5 + m) * rp + ry
px2 = math.cos(pi * 1.5 + m) * rp + rx
py2 = -math.sin(pi * 1.5 + m) * rp + ry
dm = "M{} {} A{} {} 0 1 1 {} {} {} {} 0 0 0 {} {}z".format(px1, py1, ra1, ra1, px2, py2, ra2, ra3, px1, py1)
ps = phase(rad)
ra = ramadhan(day, mon, yrs) if p.ramadhan == True else str()
else:
px1 = math.cos(pi * 1.5 - m) * rp + rx
py1 = math.sin(pi * 1.5 - m) * rp + ry
px2 = math.cos(pi * 1.5 - m) * rp + rx
py2 = -math.sin(pi * 1.5 - m) * rp + ry
dm = "M{} {} A{} {} 0 1 1 {} {} {} {} 0 0 1 {} {}z".format(px1, py1, ra1, ra1, px2, py2, ra2, ra3+1.75, px1, py1)
ps = phase(rad)
ra = ramadhan(day, mon, yrs) if p.ramadhan == True else str()
else:
if phase(rad) == "n":
px1 = math.cos(pi * 0.5 + m) * rp + rx
py1 = math.sin(pi * 0.5 + m) * rp + ry
px2 = math.cos(pi * 0.5 + m) * rp + rx
py2 = -math.sin(pi * 0.5 + m) * rp + ry
dm = "M{} {} A{} {} 0 1 1 {} {} {} {} 0 0 1 {} {}z".format(px1, py1, ra1, ra1, px2, py2, ra2, ra3, px1, py1)
ps = phase(rad)
ra = ramadhan(day, mon, yrs) if p.ramadhan == True else str()
elif rad < pi * 0.5:
px1 = math.cos(pi * 1.5 - m) * rp + rx
py1 = math.sin(pi * 1.5 - m) * rp + ry
px2 = math.cos(pi * 1.5 - m) * rp + rx
py2 = -math.sin(pi * 1.5 - m) * rp +ry
dm = "M{} {} A{} {} 0 1 1 {} {} {} {} 0 0 1 {} {}z".format(px1, py1, ra1, ra1, px2, py2, ra2, ra3+1, px1, py1)
ps = phase(rad)
ra = ramadhan(day, mon, yrs) if p.ramadhan == True else str()
elif pi > rad >= pi * 0.5:
px1 = math.cos(pi * 1.5 + m) * rp + rx
py1 = math.sin(pi * 1.5 + m) * rp + ry
px2 = math.cos(pi * 1.5 + m) * rp + rx
py2 = -math.sin(pi * 1.5 + m) * rp + ry
dm = "M{} {} A{} {} 0 1 1 {} {} {} {} 0 0 0 {} {}z".format(px1, py1, ra1, ra1, px2, py2, ra2, ra3, px1, py1)
ps = phase(rad)
ra = ramadhan(day, mon, yrs) if p.ramadhan == True else str()
elif pi * 1.5 > rad >= pi:
px1 = math.cos(pi * 0.5 + m) * rp + rx
py1 = math.sin(pi * 0.5 + m) * rp + ry
px2 = math.cos(pi * 0.5 + m) * rp + rx
py2 = -math.sin(pi * 0.5 + m) * rp + ry
dm = "M{} {} A{} {} 0 1 1 {} {} {} {} 0 0 0 {} {}z".format(px1, py1, ra1, ra1, px2, py2, ra2, ra3, px1, py1)
ps = phase(rad)
ra = ramadhan(day, mon, yrs) if p.ramadhan == True else str()
else:
px1 = math.cos(pi * 0.5 - m) * rp + rx
py1 = math.sin(pi * 0.5 - m) * rp + ry
px2 = math.cos(pi * 0.5 - m) * rp + rx
py2 = -math.sin(pi * 0.5 - m) * rp + ry
dm = "M{} {} A{} {} 0 1 1 {} {} {} {} 0 0 1 {} {}z".format(px1, py1, ra1, ra1, px2, py2, ra2, ra3+1.75, px1, py1)
ps = phase(rad)
ra = ramadhan(day, mon, yrs) if p.ramadhan == True else str()
icons += SVGpath(dm, style).svg() if ps != 'f' else ''
# moonrise and moonset time
#t_moonrise = str(daily[20]) # test
t_moonrise = "00:00" if daily[18] == 0 else str(datetime.fromtimestamp(daily[18], tz).strftime("%H:%M"))
t_moonset = "00:00" if daily[19] == 0 else str(datetime.fromtimestamp(daily[19], tz).strftime("%H:%M"))
svg += SVGtext("start", "16px", (_x - 32), (_y - 10), "r:").svg()
svg += SVGtext("end", "16px", (_x + 24), (_y - 10), "{}".format(t_moonrise)).svg()
svg += SVGtext("start", "16px", (_x - 32), (_y + 7), "s:").svg()
svg += SVGtext("end", "16px", (_x + 24), (_y + 7), "{}".format(t_moonset)).svg()
# moon phase and ramadhan
svg += SVGtext("start", "16px", (_x - 32), (_y - 68), "{}".format(ps)).svg()
svg += SVGtext("end", "16px", (_x + 24), (_y - 68), "{}".format(ra)).svg()
# grid
if n < (end - 1):
style = "fill:none;stroke:{};stroke-linecap:{};stroke-width:{}px;".format(grid_y_color, stroke_linecap, grid_y)
icons += SVGline((_x + 30), (_x + 30), (_y - h + 55), (_y + 10), style).svg()
# label
if label == True and label_adjust == True:
svg += SVGtext("middle", "16px", _x, (y - 9), "{}".format(jour)).svg()
elif label == True and label_adjust == False:
svg += SVGtext("middle", "16px", _x, (y - 15), "{}".format(jour)).svg()
# Text
#svg += SVGtext("start", "16px", x, (y - h + 27), "{}".format(name)).svg()
svg += '</g>'
svg += icons
return svg
# Reguler font
class SVGtext:
def __init__(self, anchor, fontsize, x, y, v):
self.anchor = anchor
self.fontsize = fontsize
self.x = x
self.y = y
self.v = v
def svg(self):
res = '<text style="text-anchor:{};" font-size="{}" x="{}" y="{}">{}</text>\n'.\
format(self.anchor, self.fontsize, self.x, self.y, self.v)
return res
# Bold font
class SVGtext2:
def __init__(self, anchor, fontweight, fontsize, x, y, v):
self.anchor = anchor
self.fontweight = fontweight
self.fontsize = fontsize
self.x = x
self.y = y
self.v = v
def svg(self):
res = '<text style="text-anchor:{};" font-weight="{}" font-size="{}" x="{}" y="{}">{}</text>\n'.\
format(self.anchor, self.fontweight, self.fontsize, self.x, self.y, self.v)
return res
class SVGcircle:
def __init__(self, cx, cy, r, stroke, width, fill):
self.cx = cx
self.cy = cy
self.r = r
self.stroke = stroke
self.width = width
self.fill = fill
def svg(self):
res = '<circle cx="{}" cy="{}" r="{}" stroke="{}" stroke-width="{}" fill="{}"/>\n'.\
format(self.cx, self.cy, self.r, self.stroke, self.width, self.fill)
return res
class SVGline:
def __init__(self, x1, x2, y1, y2, style):
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
self.style = style
def svg(self):
res = '<line x1="{}" x2="{}" y1="{}" y2="{}" style="{}"/>\n'.\
format(self.x1, self.x2, self.y1, self.y2, self.style)
return res
class SVGtransform:
def __init__(self, matrix, obj):
self.matrix = matrix
self.obj = obj
def svg(self):
res = '<g transform="matrix{}">{}</g>\n'.format(self.matrix, self.obj)
return res
class SVGpolyline:
def __init__(self, points, style):
self.points = points
self.style = style
def svg(self):
res = '<polyline points="{}" style="{}"/>\n'.format(self.points, self.style)
return res
class SVGrect:
def __init__(self, x, y, width, height, style):
self.x = x
self.y = y
self.width = width
self.height = height
self.style = style
def svg(self):
res = '<rect x="{}" y="{}" width="{}" height="{}" style="{}"/>\n'.format(self.x, self.y, self.width, self.height, self.style)
return res
class SVGpath:
def __init__(self, d, style):
self.d = d
self.style = style
def svg(self):
res = '<path d="{}" style="{}"/>\n'.format(self.d, self.style)
return res
def s_padding(x):
if x >= 100 : return -5
elif 100 > x >= 10 : return 10
elif 10 > x >= 0 : return 30
elif -10 < x < 0 : return 20
elif x <= -10 : return 0
def text_split(length, text, start_text="" , match=""):
text_list = text.split()
b1 = start_text
s = list()
n = int(0)
for v in text_list:
n += 1
b2 = b1
b1 += v + " "
if len(text_list) == n:
s += [b1 + "\n"]
elif re.match(r'{}'.format(match), v) and not match == '':
s += [b2 + "\n"]
b1 = v + " "
elif re.match(r'^\*$', v) and not match == '':
s += [b2 + "\n"]
b1 = v + " "
elif len(b1) < length:
continue
elif len(b1) >= length:
s += [b2 + "\n"]
b1 = v + " "
return s
def temp_unit(x, y, text, unit):
svg = str()
svg += SVGtext("end", "35px", x, y, text).svg()
svg += SVGcircle((x + 5), (y - 25), 4, "black", 2, "none").svg()
svg += SVGtext("start", "25px", (x + 10), (y - 10), unit).svg()
return svg
def read_i18n(p):
with open(i18nfile, 'r') as f:
try:
res = json.load(f)["locale"][p.t_locale]
except:
res = dict()
return res
def create_svg(p, t_now, tz, utc, svgfile, pngfile):
svg_header = str()
svg_text = str()
svg_draw = str()
svg_footer = str()
f_svg = open(svgfile,"w", encoding=p.encoding)
header = Header(p=p)
svg_header += header.text()
maintenant = Maintenant(p=p, base_x=0, base_y=0)
svg_text += maintenant.text()
svg_draw += maintenant.icon()
if p.graph == True and len(p.graph_object) > 2:
# Current weather
base_x = -5
base_y = 45
disc_offset = 0
wordwrap = 0
current = CurrentWeatherAlerts(p=p, base_x=base_x, base_y=base_y, disc_offset=disc_offset, wordwrap=wordwrap)
svg_text += current.text()
svg_draw += current.icon()
# Graph area x=0,600 y=240,800(240+140+140+140+140)
base_x = 40
#base_y = 380
base_y = 420
canvas = {"width": 530, "height": 140, "bgcolor": "rgb(220,220,220)", "axis": 0, \
"axis_color": "rgb(0,0,0)", "grid": 3, "grid_color": "rgb(255,255,255)"}
for obj in p.graph_object:
svg_draw += DrawGraph(p=p, base_x=base_x, base_y=base_y, canvas=canvas, object=obj).draw()
base_y += 130
else:
# Current weather
base_x = 5
base_y = 40
disc_offset = 35
wordwrap = 20
current = CurrentWeatherNoAlerts(p=p, base_x=base_x, base_y=base_y, disc_offset=disc_offset, wordwrap=wordwrap)
svg_text += current.text()
svg_draw += current.icon()
# Hourly weather
base_x = 370
base_y = 40
h_hour = 3
h_range = 12
h_step = 3
pitch = 155
hourly = HourlyWeather(p, base_x=base_x, base_y=base_y, h_hour=h_hour, h_range=h_range, h_step=h_step, pitch=pitch)
svg_text += hourly.text()
svg_draw += hourly.icon()
# Daily weather
# area x=0,600 y=520,800(520+140+140)
if p.graph == True and len(p.graph_object) <= 2:
base_x = 40
base_y = 660
canvas = {"width": 530, "height": 140, "bgcolor": "rgb(220,220,220)", "axis": 0, \
"axis_color": "rgb(0,0,0)", "grid": 3, "grid_color": "rgb(255,255,255)"}
for obj in p.graph_object:
svg_draw += DrawGraph(p=p, base_x=base_x, base_y=base_y, canvas=canvas, object=obj).draw()
base_y += 140
else:
base_x = 0
base_y = 500
d_range = 4
pitch = 90
daily= DailyWeather(p=p, base_x=base_x, base_y=base_y, d_range=d_range, pitch=pitch)
svg_text += daily.text()
svg_draw += daily.icon()
svg_text += '</g>\n'
svg_footer += '</svg>'
f_svg.write(svg_header + svg_text + svg_draw + svg_footer)
f_svg.close()
def create_alerts_svg(p, t_now, tz, utc, svgfile, pngfile):
svg_header = str()
svg_text = str()
svg_draw = str()
svg_footer = str()
f_svg = open(svgfile,"w", encoding=p.encoding)
header = Header(p=p)
svg_header += header.text()
maintenant = Maintenant(p=p, base_x=0, base_y=0)
svg_text += maintenant.text()
svg_draw += maintenant.icon()
# Current weather
base_x = -5
base_y = 45
disc_offset = 0
wordwrap = 0
current = CurrentWeatherAlerts(p=p, base_x=base_x, base_y=base_y, disc_offset=disc_offset, wordwrap=wordwrap)
svg_text += current.text()
svg_draw += current.icon()
base_x = 0
base_y = 340
max_y = 800
alerts = Alerts(p, base_x, base_y, max_y)
svg_text += alerts.text()
svg_text += '</g>\n'
svg_footer += '</svg>'
f_svg.write(svg_header + svg_text + svg_draw + svg_footer)
# close file
f_svg.close()
# image processing
def img_processing(p, svgfile, pngfile, pngtmpfile, mode):
if p.cloudconvert == False and (p.encoding == 'iso-8859-1' or p.encoding == 'iso-8859-5'):
if converter == 'convert':
args = ['convert', '-size', '600x800', '-background', 'white', '-depth', '8', svgfile, pngfile]
elif covnerter == 'gm':
args = ['gm', 'convert', '-size', '600x800', '-background', 'white', '-depth', '8', \
'-resize', '600x800', '-colorspace', 'gray', '-type', 'palette', '-geometry', '600x800', \
svgfile, pngfile]
output = Popen(args)
elif p.cloudconvert == True:
# cloudconvert API
import cloudconvert
import json
with open('cloudconvert.json') as f:
data = json.load(f)
# print(data['api_key'])
cloudconvert.configure(api_key=data['api_key'], sandbox=False)
try:
# upload
job = cloudconvert.Job.create(payload={
'tasks': {
'upload-my-file': {
'operation': 'import/upload'
}
}
})
upload_task_id = job['tasks'][0]['id']
upload_task = cloudconvert.Task.find(id=upload_task_id)
res = cloudconvert.Task.upload(file_name=svgfile, task=upload_task)
res = cloudconvert.Task.find(id=upload_task_id)
# convert
job = cloudconvert.Job.create(payload={
"tasks": {
'convert-my-file': {
'operation': 'convert',
'input': res['id'],
'output_format': 'png',
'some_other_option': 'value'
},
'export-my-file': {
'operation': 'export/url',
'input': 'convert-my-file'
}
}
})
# download
exported_url_task_id = job['tasks'][1]['id']
res = cloudconvert.Task.wait(id=exported_url_task_id) # Wait for job completion
file = res.get("result").get("files")[0]
res = cloudconvert.download(filename=pngfile, url=file['url']) # download and return filename
except Exception as e:
print(e)
if mode == 'darkmode':
args = ['convert', '-flatten', pngfile, pngtmpfile]
output = Popen(args)
t.sleep(3)
args = ['convert', '-negate', pngtmpfile, flatten_pngfile]
output = Popen(args)
elif mode == 'lightmode':
args = ['convert', '-flatten', pngfile, flatten_pngfile]
output = Popen(args)
else:
args = ['convert', '-flatten', pngfile, flatten_pngfile]
output = Popen(args)
#t.sleep(3)
if __name__ == "__main__":
# Using custom settings.xml
if len(sys.argv) > 1:
settings = sys.argv[1]
try:
p = OpenWeatherMap(settings)
except Exception as e:
shutil.copyfile(error_image, flatten_pngfile)
print(e)
exit(1)
curt_weather = p.current_weather()
# timezone setting
t_now = p.t_now
tz = timezone(p.t_timezone)
utc = pytz.utc
if p.darkmode == 'True':
mode = 'darkmode'
elif p.darkmode == 'Auto':
if curt_weather[11] > t_now or curt_weather[12] < t_now:
mode = 'darkmode'
else:
mode = 'lightmode'
elif p.darkmode == 'False':
mode = 'lightmode'
else:
mode = 'lightmode'
if p.ramadhan == True:
from hijri_converter import Hijri, Gregorian
# locale setting
#locale.setlocale(locale.LC_TIME, p.t_locale)
locale.setlocale(locale.LC_TIME, "en_US.utf-8")
if p.alerts == True and not (p.weather_alerts() is None):
create_alerts_svg(p=p, t_now=t_now, tz=tz, utc=utc, svgfile=svgfile, pngfile=pngfile)
else:
create_svg(p=p, t_now=t_now, tz=tz, utc=utc, svgfile=svgfile, pngfile=pngfile)
t.sleep(1)
try:
img_processing(p=p, svgfile=svgfile, pngfile=pngfile, pngtmpfile=pngtmpfile, mode=mode)
except Exception as e:
shutil.copyfile(error_image, flatten_pngfile)
print(e)
exit(1)
|
Neelakurinji123/kindle-weather-display-for-openwrt
|
openwrt/opt/kindle-weather-station/IconExtras.py
|
<gh_stars>0
#!/usr/bin/env python3
#
# Weather icons by <NAME> from the Noun Project
#
#def getDrizzle():
# # 45%, x=0px, y=-5px
# return '''<path d="" />'''
#
#def getThunderstorm():
# # 45%, x=0px, y=-5px
# return '''<path d="" />'''
#
#def getSleet():
# # 45%, x=0px, y=-5px
# return '''<path d="" />'''
#
#def getMist():
# # 45%, x=0px, y=-5px
# return '''<path d="" />'''
#def getSmoke():
# return '''<path d="" />'''
#
#def getHaze():
# return '''<path d="" />'''
#
#def getDust():
# return '''<path d="" />'''
#
#def getSand():
# return '''<path d="" />'''
#
#def getAsh():
# return '''<path d="" />'''
#def getSquall():
# # 45%, x=0px, y=-5px
# return '''<path d="" />'''
#
#def getTornado():
# # 45%, x=0px, y=-10px
# return '''<path d="" />'''
#
#def getCyclone():
# # 45%, x=0px, y=-17.5px
# return '''<path d="" />'''
#
#def getSnow2():
# # 45%, x=0px, y=-7.5px
# return '''<path d="" />'''
|
Neelakurinji123/kindle-weather-display-for-openwrt
|
openwrt/opt/kindle-weather-station/test/json_checker.py
|
<gh_stars>0
#!/usr/bin/env python3
# encoding=utf-8
# -*- coding: utf-8 -*-
import json
import sys
file = sys.argv[1] if len(sys.argv) >= 3 else None
if len(sys.argv) < 3:
print("usage:", sys.argv[0], "jsonfile", "key1", "[key2]", "[ key3]")
exit(0)
elif len(sys.argv) == 3:
key1 = sys.argv[2]
elif len(sys.argv) == 4:
key1 = sys.argv[2]
key2 = sys.argv[3]
elif len(sys.argv) == 5:
key1 = sys.argv[2]
key2 = sys.argv[3]
key3 = sys.argv[4]
with open(file, 'r') as f:
if len(sys.argv) == 3:
d = json.load(f)[key1]
print(d)
elif len(sys.argv) == 4:
d = json.load(f)[key1][key2]
print(d)
elif len(sys.argv) == 5:
d = json.load(f)[key1][key2][key3]
print(d)
|
RobSpectre/Caesar-Cipher
|
caesarcipher/cmdline.py
|
<filename>caesarcipher/cmdline.py
import logging
import argparse
from caesarcipher import CaesarCipher
from caesarcipher import CaesarCipherError
# Parser configuration
parser = argparse.ArgumentParser(description="Caesar Cipher - encode, decode "
"or crack messages with an "
"English alphabet offset.",
epilog="Written by <NAME> "
"Olympics London.\n"
"http://www.brooklynhacker.com")
parser.add_argument('message',
help="Message to be encoded, decoded or cracked.")
parser.add_argument('-e', '--encode', action="store_true",
help="Encode this message.")
parser.add_argument('-d', '--decode', action="store_true",
help="Decode this message.")
parser.add_argument('-c', '--crack', action="store_true",
help="Crack this ciphertext to find most likely message.")
parser.add_argument('-v', '--verbose', action="store_true",
help="Turn on verbose output.")
parser.add_argument('-o', '--offset',
help="Integer offset to encode/decode message against.")
parser.add_argument('-a', '--alphabet',
help="String of alphabet you want to use to apply the "
"cipher against.")
def main():
caesar_cipher = CaesarCipher()
parser.parse_args(namespace=caesar_cipher)
# Logging configuration
if caesar_cipher.verbose is True:
log_level = logging.DEBUG
log_format = "%(asctime)s - %(levelname)s: %(message)s"
else:
log_level = logging.INFO
log_format = "%(message)s"
logging.basicConfig(level=log_level, format=log_format)
# Non-required arguments and error conditions.
if caesar_cipher.offset:
caesar_cipher.offset = int(caesar_cipher.offset)
if caesar_cipher.offset is False and caesar_cipher.decode is True:
raise CaesarCipherError("Message cannot be decoded without "
"selecting an offset. Please try "
"again with -o switch.")
if caesar_cipher.encode is True and caesar_cipher.decode is True:
raise CaesarCipherError("Please select to encode or encode a message, "
"not both.")
# Required arguments.
if caesar_cipher.decode is True:
logging.info("Decoded message: {0}".format(caesar_cipher.decoded))
elif caesar_cipher.crack is True:
logging.info("Cracked message: {0}".format(caesar_cipher.cracked))
elif caesar_cipher.encode is True:
logging.info("Encoded message: {0}".format(caesar_cipher.encoded))
else:
logging.error("Please select a message to encode, decode or "
"crack. For more information, use --help.")
|
RobSpectre/Caesar-Cipher
|
caesarcipher/__init__.py
|
<reponame>RobSpectre/Caesar-Cipher<filename>caesarcipher/__init__.py<gh_stars>10-100
try:
from caesarcipher.caesarcipher import CaesarCipher
from caesarcipher.caesarcipher import CaesarCipherError
except ImportError:
from caesarcipher import CaesarCipher
from caesarcipher import CaesarCipherError
__title__ = 'caesarcipher'
__version__ = '1.0'
__author__ = '<NAME>'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014 <NAME>'
|
RobSpectre/Caesar-Cipher
|
caesarcipher/caesarcipher.py
|
<reponame>RobSpectre/Caesar-Cipher
from random import randrange
import string
import math
import logging
class CaesarCipher(object):
def __init__(self, message=None, encode=False, decode=False, offset=False,
crack=None, verbose=None, alphabet=None):
"""
A class that encodes, decodes and cracks strings using the Caesar shift
cipher.
Accepts messages in a string and encodes or decodes by shifting the
value of the letter by an arbitrary integer to a different letter in
the alphabet provided.
http://en.wikipedia.org/wiki/Caesar_cipher
Do not ever use this for real communication, but definitely use it for
fun events like the Hacker Olympics.
Attributes:
message: The string you wish to encode.
encode: A boolean indicating desire to encode the string, used as
command line script flag.
decoded: A boolean indicating desire to decode the string, used as
command line script flag.
cracked: A boolean indicating to desire to crack the string, used
as command line script flag.
verbose: A boolean indicating the desire to turn on debug output,
use as command line script flag.
offset: Integer by which you want to shift the value of a letter.
alphabet: A tuple containing the ASCII alphabet in uppercase.
Examples:
Encode a string with a random letter offset.
>>> cipher = CaesarCipher('I want to encode this string.')
>>> cipher.encoded
'W kobh hc sbqcrs hvwg ghfwbu.'
Encode a string with a specific letter offset.
>>> cipher = CaesarCipher('I want to encode this string.',
... offset=14)
>>> cipher.encoded
'W kobh hc sbqcrs hvwg ghfwbu.'
Decode a string with a specific letter offset.
>>> cipher = CaesarCipher('W kobh hc sbqcrs hvwg ghfwbu.',
... offset=14)
>>> cipher.decoded
'I want to encode this string.'
Crack a string of ciphertext without knowing the letter offset.
>>> cipher = CaesarCipher('W kobh hc sbqcrs hvwg ghfwbu.')
>>> cipher.cracked
'I want to encode this string.'
"""
self.message = message
self.encode = encode
self.decode = decode
self.offset = offset
self.verbose = verbose
self.crack = crack
self.alphabet = alphabet
# Frequency of letters used in English, taken from Wikipedia.
# http://en.wikipedia.org/wiki/Letter_frequency
self.frequency = {
'a': 0.08167,
'b': 0.01492,
'c': 0.02782,
'd': 0.04253,
'e': 0.130001,
'f': 0.02228,
'g': 0.02015,
'h': 0.06094,
'i': 0.06966,
'j': 0.00153,
'k': 0.00772,
'l': 0.04025,
'm': 0.02406,
'n': 0.06749,
'o': 0.07507,
'p': 0.01929,
'q': 0.00095,
'r': 0.05987,
's': 0.06327,
't': 0.09056,
'u': 0.02758,
'v': 0.00978,
'w': 0.02360,
'x': 0.00150,
'y': 0.01974,
'z': 0.00074}
# Get ASCII alphabet if one is not provided by the user.
if alphabet is None:
self.alphabet = tuple(string.ascii_lowercase)
def cipher(self):
"""Applies the Caesar shift cipher.
Based on the attributes of the object, applies the Caesar shift cipher
to the message attribute. Accepts positive and negative integers as
offsets.
Required attributes:
message
offset
Returns:
String with cipher applied.
"""
# If no offset is selected, pick random one with sufficient distance
# from original.
if self.offset is False:
self.offset = randrange(5, 25)
logging.info("Random offset selected: {0}".format(self.offset))
logging.debug("Offset set: {0}".format(self.offset))
# Cipher
ciphered_message_list = list(self.message)
for i, letter in enumerate(ciphered_message_list):
if letter.isalpha():
# Use default upper and lower case characters if alphabet
# not supplied by user.
if letter.isupper():
alphabet = [character.upper()
for character in self.alphabet]
else:
alphabet = self.alphabet
logging.debug("Letter: {0}".format(letter))
logging.debug("Alphabet: {0}".format(alphabet))
value = alphabet.index(letter)
cipher_value = value + self.offset
if cipher_value > 25 or cipher_value < 0:
cipher_value = cipher_value % 26
logging.debug("Cipher value: {0}".format(cipher_value))
ciphered_message_list[i] = alphabet[cipher_value]
logging.debug("Ciphered letter: {0}".format(letter))
self.message = ''.join(ciphered_message_list)
return self.message
def calculate_entropy(self, entropy_string):
"""Calculates the entropy of a string based on known frequency of
English letters.
Args:
entropy_string: A str representing the string to calculate.
Returns:
A negative float with the total entropy of the string (higher
is better).
"""
total = 0
for char in entropy_string:
if char.isalpha():
prob = self.frequency[char.lower()]
total += - math.log(prob) / math.log(2)
logging.debug("Entropy score: {0}".format(total))
return total
@property
def cracked(self):
"""Attempts to crack ciphertext using frequency of letters in English.
Returns:
String of most likely message.
"""
logging.info("Cracking message: {0}".format(self.message))
entropy_values = {}
attempt_cache = {}
message = self.message
for i in range(25):
self.message = message
self.offset = i * -1
logging.debug("Attempting crack with offset: "
"{0}".format(self.offset))
test_cipher = self.cipher()
logging.debug("Attempting plaintext: {0}".format(test_cipher))
entropy_values[i] = self.calculate_entropy(test_cipher)
attempt_cache[i] = test_cipher
sorted_by_entropy = sorted(entropy_values, key=entropy_values.get)
self.offset = sorted_by_entropy[0] * -1
cracked_text = attempt_cache[sorted_by_entropy[0]]
self.message = cracked_text
logging.debug("Entropy scores: {0}".format(entropy_values))
logging.debug("Lowest entropy score: "
"{0}".format(str(entropy_values[sorted_by_entropy[0]])))
logging.debug("Most likely offset: {0}".format(self.offset))
logging.debug("Most likely message: {0}".format(cracked_text))
return cracked_text
@property
def encoded(self):
"""Encodes message using Caesar shift cipher
Returns:
String encoded with cipher.
"""
logging.info("Encoding message: {0}".format(self.message))
return self.cipher()
@property
def decoded(self):
"""Decodes message using Caesar shift cipher
Inverse operation of encoding, applies negative offset to Caesar shift
cipher.
Returns:
String decoded with cipher.
"""
logging.info("Decoding message: {0}".format(self.message))
self.offset = self.offset * -1
return self.cipher()
class CaesarCipherError(Exception):
def __init__(self, message):
logging.error("ERROR: {0}".format(message))
logging.error("Try running with --help for more information.")
|
RobSpectre/Caesar-Cipher
|
tests/test_caesarcipher.py
|
<gh_stars>10-100
import unittest
from caesarcipher import CaesarCipher
from caesarcipher import CaesarCipherError
class CaesarCipherEncodeTest(unittest.TestCase):
def test_encode_with_known_offset(self):
message = "Twilio"
test_cipher = CaesarCipher(message, encode=True, offset=1)
self.assertEquals(test_cipher.encoded, "Uxjmjp")
def test_encode_long_phrase_with_known_offset(self):
message = "The quick brown fox jumps over the lazy dog."
test_cipher = CaesarCipher(message, encode=True, offset=7)
self.assertEquals(test_cipher.encoded,
"Aol xbpjr iyvdu mve qbtwz vcly aol shgf kvn.")
def test_encode_with_mirror_offset(self):
message = "The quick brown fox jumps over the lazy dog."
test_cipher = CaesarCipher(message, encode=True, offset=26)
self.assertEquals(test_cipher.encoded,
"The quick brown fox jumps over the lazy dog.")
def test_encode_with_offset_greater_than_alphabet_length(self):
message = "The quick brown fox jumps over the lazy dog."
test_cipher = CaesarCipher(message, encode=True, offset=28)
self.assertEquals(test_cipher.encoded,
"Vjg swkem dtqyp hqz lworu qxgt vjg ncba fqi.")
def test_encode_with_very_large_offset(self):
message = "The quick brown fox jumps over the lazy dog."
test_cipher = CaesarCipher(message, encode=True, offset=10008)
self.assertEquals(test_cipher.encoded,
"Rfc osgai zpmul dmv hsknq mtcp rfc jyxw bme.")
def test_encode_decode_consistent(self):
message = "The quick brown fox jumps over the lazy dog."
setup_cipher = CaesarCipher(message, encode=True, offset=14)
encoded_message = setup_cipher.encoded
test_cipher = CaesarCipher(encoded_message, decode=True, offset=14)
self.assertEquals(message, test_cipher.decoded)
def test_encode_with_arbitrary_alphabet(self):
message = "The quick brown fox jumps over the lazy dog."
alphabet = 'ueyplkizjgncdbqshoaxmrwftv'
test_cipher = CaesarCipher(message, offset=7, alphabet=alphabet)
self.assertEquals('Kfj rzbad mytpo ltu szenw tijy kfj cvqg xth.',
test_cipher.encoded)
class CaesarCipherDecodeTest(unittest.TestCase):
def test_decode_with_known_offset(self):
message = "UXJMJP"
test_cipher = CaesarCipher(message, encode=True, offset=1)
self.assertEquals(test_cipher.decoded, "TWILIO")
def test_decode_long_phrase_with_known_offset(self):
message = "AOL XBPJR IYVDU MVE QBTWZ VCLY AOL SHGF KVN."
test_cipher = CaesarCipher(message, decode=True, offset=7)
self.assertEquals(test_cipher.decoded,
"THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG.")
def test_decode_with_offset_greater_than_alphabet_length(self):
message = "VJG SWKEM DTQYP HQZ LWORU QXGT VJG NCBA FQI."
test_cipher = CaesarCipher(message, decode=True, offset=28)
self.assertEquals(test_cipher.decoded,
"THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG.")
def test_decode_with_very_large_offset(self):
message = "RFC OSGAI ZPMUL DMV HSKNQ MTCP RFC JYXW BME."
test_cipher = CaesarCipher(message, decode=True, offset=10008)
self.assertEquals(test_cipher.decoded,
"THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG.")
def test_encode_decode_persistence(self):
message = "The quick brown fox jumps over the lazy dog."
test_cipher = CaesarCipher(message, encode=True, offset=14)
test_cipher.encoded
self.assertEquals(message, test_cipher.decoded)
def test_decode_with_arbitrary_alphabet(self):
message = "Kfj rzbad mytpo ltu szenw tijy kfj cvqg xth."
alphabet = 'ueyplkizjgncdbqshoaxmrwftv'
test_cipher = CaesarCipher(message, offset=7, alphabet=alphabet)
self.assertEquals('The quick brown fox jumps over the lazy dog.',
test_cipher.decoded)
class CaesarCipherRegressionTest(unittest.TestCase):
def test_all_offsets(self):
message = "The quick brown fox jumps over the lazy dog."
for i in range(0, 100):
test_cipher = CaesarCipher(message, encode=True, offset=i)
test_cipher.encoded
self.assertEquals(message, test_cipher.decoded)
class CaesarCipherErrorTest(unittest.TestCase):
def test_caesar_cipher_error(self):
def raiseCaesarCipherError():
raise CaesarCipherError("This test is bullshit to hit 100%"
" coverage.")
self.assertRaises(CaesarCipherError, raiseCaesarCipherError)
class CaesarCipherCrackTest(unittest.TestCase):
def test_calculate_entropy_zero_offset(self):
message = "The quick brown fox jumps over the lazy dog."
test_cipher = CaesarCipher(message, crack=True)
confirmed_entropy_value = 179.14217305030957
test_entropy_value = test_cipher.calculate_entropy(message)
self.assertEquals(confirmed_entropy_value, test_entropy_value)
def test_crack(self):
ciphertext = "Rfc osgai zpmul dmv hsknq mtcp rfc jyxw bme."
plaintext = "The quick brown fox jumps over the lazy dog."
test_crack = CaesarCipher(ciphertext, crack=True)
self.assertEquals(plaintext, test_crack.cracked)
def test_crack_one_word(self):
ciphertext = "Yxo"
plaintext = "One"
test_crack = CaesarCipher(ciphertext, crack=True)
self.assertEquals(plaintext, test_crack.cracked)
def test_crack_difficult_word(self):
message = "A quixotic issue to test."
test_cipher = CaesarCipher(message).encoded
cracked_text = CaesarCipher(test_cipher).cracked
self.assertEquals(message, cracked_text)
class CaesarCipherCrackRegressionTest(unittest.TestCase):
def test_lots_of_cracks(self):
plaintexts = [
"London calling to the faraway towns",
"Now war is declared and battle come down",
"London calling to the underworld",
"Come out of the cupboard, you boys and girls",
"London calling, now don't look to us",
"<NAME> has bitten the dust",
"London calling, see we ain't got no swing",
"'Cept for the ring of that truncheon thing",
"The ice age is coming, the sun is zooming in",
"Meltdown expected, the wheat is growin' thin",
"Engines stop running, but I have no fear",
"Cause London is drowning, and I, I live by the river",
"London calling to the imitation zone",
"Forget it, brother, you can go it alone",
"London calling to the zombies of death",
"Quit holding out and draw another breath",
"London calling and I don't want to shout",
"But when we were talking I saw you nodding out",
"London calling, see we ain't got no high",
"Except for that one with the yellowy eye",
"Now get this",
"London calling, yes, I was there, too",
"An' you know what they said? Well, some of it was true!",
"London calling at the top of the dial",
"And after all this, won't you give me a smile?",
"I never felt so much a' like a'like a'like",
"When they kick at your front door",
"How you gonna come?",
"With your hands on your head",
"Or on the trigger of your gun",
"When the law break in",
"How you gonna go?",
"Shot down on the pavement",
"Or waiting on death row",
"You can crush us",
"You can bruise us",
"But you'll have to answer to",
"Oh, the guns of Brixton",
"The money feels good",
"And your life you like it well",
"But surely your time will come",
"As in heaven, as in hell",
"You see, he feels like Ivan",
"Born under the Brixton sun",
"His game is called survivin'",
"At the end of the harder they come",
"You know it means no mercy",
"They caught him with a gun",
"No need for the Black Maria",
"Goodbye to the Brixton sun",
"You can crush us",
"You can bruise us",
"Yes, even shoot us",
"But oh-the guns of Brixton",
"Shot down on the pavement",
"Waiting in death row",
"His game is called survivin'",
"As in heaven as in hell",
"Anybody who makes speeches written ",
"by someone else is just a robot."]
ciphertexts = [
"Cfeufe trcczex kf kyv wrirnrp kfnej",
"Tuc cgx oy jkirgxkj gtj hgzzrk iusk juct",
"Twvlwv kittqvo bw bpm cvlmzewztl",
"Lxvn xdc xo cqn ldykxjam, hxd kxhb jwm praub",
"Bedted sqbbydw, dem ted'j beea je ki",
"Yqxwh Knjcunvjwrj qjb krccnw cqn mdbc",
"Hkjzkj ywhhejc, oaa sa wej'p ckp jk osejc",
"'Lnyc oxa cqn arwp xo cqjc cadwlqnxw cqrwp",
"Lzw auw syw ak ugeafy, lzw kmf ak rggeafy af",
"Rjqyitbs jcujhyji, ymj bmjfy nx lwtbns' ymns",
"Oxqsxoc cdyz bexxsxq, led S rkfo xy pokb",
"Usmkw Dgfvgf ak vjgofafy, sfv A, A danw tq lzw janwj",
"Cfeufe trcczex kf kyv zdzkrkzfe qfev",
"Oxapnc rc, kaxcqna, hxd ljw px rc juxwn",
"Twvlwv kittqvo bw bpm hwujqma wn lmibp",
"Mqep dkhzejc kqp wjz znws wjkpdan xnawpd",
"Gjiyji xvggdib viy D yji'o rvio oj ncjpo",
"Mfe hspy hp hpcp elwvtyr T dlh jzf yzootyr zfe",
"Jmlbml ayjjgle, qcc uc ygl'r emr lm fgef",
"Votvgk wfi kyrk fev nzky kyv pvccfnp vpv",
"Stb ljy ymnx",
"Ehgwhg vteebgz, rxl, B ptl maxkx, mhh",
"Iv' gwc svwe epib bpmg aiql? Emtt, awum wn qb eia bzcm!",
"Svukvu jhsspun ha aol avw vm aol kphs",
"Reu rwkvi rcc kyzj, nfe'k pfl xzmv dv r jdzcv?",
"E jaran bahp ok iqyd w' hega w'hega w'hega",
"Lwtc iwtn zxrz pi ndjg ugdci sddg",
"Yfn pfl xfeer tfdv?",
"Lxiw ndjg wpcsh dc ndjg wtps",
"Il ih nby nlcaayl iz siol aoh",
"Bmjs ymj qfb gwjfp ns",
"Mtb dtz ltssf lt?",
"Hwdi sdlc dc iwt epktbtci",
"Tw bfnynsl ts ijfym wtb",
"Qgm usf ujmkz mk",
"Gwc kiv jzcqam ca",
"Jcb gwc'tt pidm bw ivaemz bw",
"Mf, rfc eslq md Zpgvrml",
"Kyv dfevp wvvcj xffu",
"Wjz ukqn heba ukq hega ep sahh",
"Rkj ikhubo oekh jycu mybb secu",
"Xp fk ebxsbk, xp fk ebii",
"Hxd bnn, qn onnub urtn Rejw",
"Uhkg ngwxk max Ukbqmhg lng",
"Opz nhtl pz jhsslk zbycpcpu'",
"Fy ymj jsi tk ymj mfwijw ymjd htrj",
"Fvb ruvd pa tlhuz uv tlyjf",
"Znke igamnz nos cozn g mat",
"Yz yppo qzc esp Mwlnv Xlctl",
"Zhhwurx mh max Ukbqmhg lng",
"Vlr zxk zorpe rp",
"Oek sqd rhkyiu ki",
"Lrf, rira fubbg hf",
"Kdc xq-cqn pdwb xo Kargcxw",
"Dsze ozhy zy esp algpxpye",
"Osalafy af vwslz jgo",
"Efp dxjb fp zxiiba prosfsfk'",
"Rj ze yvrmve rj ze yvcc",
"Ylwzmbw ufm kyicq qnccafcq upgrrcl ",
"ur lhfxhgx xelx bl cnlm t khuhm."]
for i, ciphertext in enumerate(ciphertexts):
test_cipher = CaesarCipher(ciphertext, crack=True)
self.assertEquals(plaintexts[i], test_cipher.cracked)
|
RobSpectre/Caesar-Cipher
|
setup.py
|
<reponame>RobSpectre/Caesar-Cipher
from caesarcipher import __version__
scripts = ['bin/caesarcipher']
setup_args = {
'name': 'caesarcipher',
'version': __version__,
'url': 'https://github.com/RobSpectre/Caesar-Cipher',
'description': 'A Python package and command line script for encoding, '
'decoding and cracking Caesar ciphers.',
'long_description': open('README.rst').read(),
'author': '<NAME>',
'author_email': '<EMAIL>',
'license': 'MIT',
'packages': ['caesarcipher', 'tests'],
'scripts': ['bin/caesarcipher'],
'include_package_data': True,
'classifiers': [
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Environment :: Console',
'Topic :: Security :: Cryptography',
]
}
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(**setup_args)
|
mepesh/eqapp_rev_2
|
eqapp_rev_2.py
|
<reponame>mepesh/eqapp_rev_2
from flask import Flask, redirect, render_template
from datetime import datetime
import csv,random
app = Flask(__name__)
#creating csv file
with open('templates/eqdata.csv','wb') as fwrite:
writer = csv.writer(fwrite)
for i in range(1,10):
writer.writerow((i,datetime.now().strftime("%Y-%m-%d"),
datetime.now().strftime('%H:%M:%S'),'apple','ball','cat',
random.uniform(4,8)))
@app.route('/')
def main():
return render_template('eqMain.html')
@app.route('/index')
def tab1():
return render_template('tab1_index.html')
@app.route('/eqhis')
def history():
return render_template('tab2_eqhis.html')
@app.route('/eqrealdata')
def real():
#reading the created csv file
with open('templates/eqdata.csv', 'rb') as fread:
reader = csv.reader(fread)
dict={}
for a in reader:
dict[a[0]]=[ a[1], a[2], a[3], a[4], a[5],a[6]]
#publishing the values to web
return render_template('tab3_eqrealdata.html', result=dict)
@app.route('/about')
def about():
return render_template('tab4_about.html')
if __name__ == '__main__':
app.run(debug=True)
|
agtbaskara/apriltag-node-stream
|
camera_calibration.py
|
<reponame>agtbaskara/apriltag-node-stream<filename>camera_calibration.py
import cv2
import numpy as np
import os
import glob
# Defining the dimensions of checkerboard
CHECKERBOARD = (6,9)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# Creating vector to store vectors of 3D points for each checkerboard image
objpoints = []
# Creating vector to store vectors of 2D points for each checkerboard image
imgpoints = []
# Defining the world coordinates for 3D points
objp = np.zeros((1, CHECKERBOARD[0] * CHECKERBOARD[1], 3), np.float32)
objp[0,:,:2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)
prev_img_shape = None
# Extracting path of individual image stored in a given directory
images = glob.glob('./calibration_data/*.jpg')
for fname in images:
img = cv2.imread(fname)
img = cv2.resize(img, (640,480))
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chess board corners
# If desired number of corners are found in the image then ret = true
ret, corners = cv2.findChessboardCorners(gray, CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)
"""
If desired number of corner are detected,
we refine the pixel coordinates and display
them on the images of checker board
"""
if ret == True:
objpoints.append(objp)
# refining pixel coordinates for given 2d points.
corners2 = cv2.cornerSubPix(gray, corners, (11,11),(-1,-1), criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, CHECKERBOARD, corners2, ret)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
h,w = img.shape[:2]
"""
Performing camera calibration by
passing the value of known 3D points (objpoints)
and corresponding pixel coordinates of the
detected corners (imgpoints)
"""
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
print("Camera matrix : \n")
print(mtx)
print("dist : \n")
print(dist)
print("rvecs : \n")
print(rvecs)
print("tvecs : \n")
print(tvecs)
|
agtbaskara/apriltag-node-stream
|
aprlitag_test.py
|
import cv2
import glob
from pupil_apriltags import Detector
fx = 472.13107208
fy = 472.3044311
cx = 322.23325564
cy = 238.25801953
tag_size = 0.07
camera_params = ([fx, fy, cx, cy])
at_detector = Detector(families='tag36h11',
nthreads=1,
quad_decimate=1.0,
quad_sigma=0.0,
refine_edges=1,
decode_sharpening=0.25,
debug=0)
images = glob.glob('./sample_data/*.jpg')
for fname in images:
img = cv2.imread(fname)
img = cv2.resize(img, (640,480))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
tags = at_detector.detect(gray, estimate_tag_pose=True, camera_params=camera_params, tag_size=tag_size)
print(tags)
|
agtbaskara/apriltag-node-stream
|
program_server.py
|
<reponame>agtbaskara/apriltag-node-stream<filename>program_server.py<gh_stars>0
# run this program on the Mac to display image streams from multiple RPis
import cv2
import imagezmq
from pupil_apriltags import Detector
fx = 472.13107208
fy = 472.3044311
cx = 322.23325564
cy = 238.25801953
tag_size = 0.055
camera_params = ([fx, fy, cx, cy])
at_detector = Detector(families='tag36h11',
nthreads=1,
quad_decimate=1.0,
quad_sigma=0.0,
refine_edges=1,
decode_sharpening=0.25,
debug=0)
image_hub = imagezmq.ImageHub()
while True: # show streamed images until Ctrl-C
node_name, img = image_hub.recv_image()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
tags = at_detector.detect(gray, estimate_tag_pose=True, camera_params=camera_params, tag_size=tag_size)
if len(tags):
print("Rotation Matrix:")
print(tags[0].pose_R)
print("Translation Matrix:")
print(tags[0].pose_t)
# Draw Bounding Box
corner0 = (int(tags[0].corners[0][0]), int(tags[0].corners[0][1]))
corner1 = (int(tags[0].corners[1][0]), int(tags[0].corners[1][1]))
corner2 = (int(tags[0].corners[2][0]), int(tags[0].corners[2][1]))
corner3 = (int(tags[0].corners[3][0]), int(tags[0].corners[3][1]))
img = cv2.line(img, corner0, corner1, (180, 105, 255), 5)
img = cv2.line(img, corner1, corner2, (180, 105, 255), 5)
img = cv2.line(img, corner2, corner3, (180, 105, 255), 5)
img = cv2.line(img, corner3, corner0, (180, 105, 255), 5)
cv2.imshow(node_name, img) # 1 window for each RPi
cv2.waitKey(1)
image_hub.send_reply(b'OK')
|
agtbaskara/apriltag-node-stream
|
program_node.py
|
<filename>program_node.py
# run this program on each RPi to send a labelled image stream
import socket
import time
from imutils.video import VideoStream
import imagezmq
import cv2
server_address = "tcp://127.0.0.1:5555"
sender = imagezmq.ImageSender(connect_to=server_address)
node_name = socket.gethostname() # send RPi hostname with each image
camera = VideoStream().start()
time.sleep(2.0) # allow camera sensor to warm up
print("Start Stream")
while True: # send images as stream until Ctrl-C
image = camera.read()
image = cv2.resize(image, (640,480))
sender.send_image(node_name, image)
|
dhimmel/schema
|
tools/rnc-validate-format.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import subprocess
import argparse
from shutil import which
from glob import glob
# to validate and pretty print the rnc schema files before commit:
# tools/rnc-validate-format.py
parser = argparse.ArgumentParser(description="process csl schema files")
parser.add_argument("--commit", action="store_true", help="commit the file(s)")
args, unknown = parser.parse_known_args()
rncdir = os.path.join("schemas", "styles")
def rnc_format(rncfile):
try:
which("trang") is not None
except Exception:
raise SystemExit("trang commnd not found; please install")
rng_new = rncfile + "_new.rng"
# round-trip the rnc file through trang
subprocess.run(["trang", rncfile, rng_new])
subprocess.run(["trang", rng_new, rncfile])
# remove the intermediate files
tempfiles = glob(os.path.join(rncdir, "csl*.rng"))
for file in tempfiles:
os.remove(file)
def rnc_pre_commit():
# this will only work as part of a pre-commit process, where it will run
# rnc_format on any staged rnc files, and then restage
cmd = ["git", "diff", "--name-only", "--cached"]
changed_files = subprocess.check_output(cmd, text=True).splitlines()
for changed_file in changed_files:
if changed_file.endswith(".rnc"):
rnc_format(changed_file)
subprocess.run(["git", "add", changed_file])
def main():
if args.commit:
rnc_pre_commit()
else:
rnc_format(os.path.join("schemas", "styles", "csl.rnc"))
print("\n====> rnc schema files validated and formatted .....")
if __name__ == "__main__":
main()
|
msimonin/test_oslo_messaging
|
bar_chart.py
|
<filename>bar_chart.py
import numpy as np
import matplotlib
matplotlib.use('tkagg')
matplotlib.rcParams.update({'font.size': 20})
import matplotlib.pyplot as plt
# data to plot
latency = ['0.3ms', '10ms', '50ms']
# create plot
def draw(rpc_call, rpc_cast, latency, filename):
fig, ax = plt.subplots()
index = np.arange(len(latency))
bar_width = 0.35
opacity = 0.8
rects1 = plt.bar(index, rpc_call, bar_width,
alpha=opacity,
color = '#ff3c00',
label='RPC call')
rects2 = plt.bar(index + bar_width, rpc_cast, bar_width,
alpha=opacity,
color='#ff997a',
label='RPC cast')
plt.xlabel('')
plt.ylabel('msg/s')
plt.title('Latency impact on RPCs')
plt.xticks(index + bar_width, latency)
plt.legend(loc=7)
plt.tight_layout()
#plt.show()
plt.savefig(filename, facecolor='white', transparent=True)
rpc_call = [218., 62., 16.]
mcall = max(rpc_call)
#rpc_call = [x/mcall * 100 for x in rpc_call]
rpc_cast = [1062., 84., 18.]
mcast = max(rpc_cast)
#rpc_cast = [x/mcast * 100 for x in rpc_cast]
draw(rpc_call, rpc_cast, latency, 'far_client.png')
rpc_call = [215., 37., 14.]
mcall = max(rpc_call)
#rpc_call = [x/mcall * 100 for x in rpc_call]
rpc_cast = [1129., 1029., 1170.]
mcast = max(rpc_cast)
#rpc_cast = [x/mcast * 100 for x in rpc_cast]
draw(rpc_call, rpc_cast, latency, 'far_server.png')
|
bhilkanchan/Twitter-Bots
|
allbots.py
|
import tweepy
import time
import os
from datetime import date
from dotenv import load_dotenv
load_dotenv()
API_KEY = os.environ["API_KEY"]
API_KEY_SECRET = os.environ["API_KEY_SECRET"]
ACCESS_TOKEN = os.environ["ACCESS_TOKEN"]
ACCESS_TOKEN_SECRET = os.environ["ACCESS_TOKEN_SECRET"]
auth = tweepy.OAuthHandler(API_KEY, API_KEY_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True)
countfollow = 0
countmessage = 0
requestcount = 0
#Add your username here
myusername = ""
#Customize the message
messagetext = "Hey"
# bot to follow and message users who tweet in a specific area
def followmessage(querytext, geocodetext):
global countmessage
global countfollow
global requestcount
# use cursor to divide the results in 100 per page
cursor = tweepy.Cursor(api.search_tweets, q=querytext, geocode=geocodetext)
for page in cursor.pages(20):
for tweet in page:
try:
if(requestcount <= 99000):
# Get the friendship status between you and the user
friendship = api.get_friendship(source_screen_name=myusername, target_screen_name=tweet.user.screen_name)
requestcount = requestcount+1
# Check that you are not following the user and user's messaging option is open
if(friendship[0].following == False and countfollow <= 950 and friendship[0].can_dm == True):
api.create_friendship(screen_name=tweet.user.screen_name)
requestcount = requestcount+1
print("Request count is:", requestcount)
print("followed:", tweet.user.screen_name)
countfollow = countfollow+1
print('Follow count:', countfollow)
api.send_direct_message(recipient_id=tweet.user.id, text=messagetext)
requestcount = requestcount+1
print("Request count is:", requestcount)
print("sent message to:", tweet.user.screen_name)
countmessage = countmessage+1
print('Message count:', countmessage)
time.sleep(20)
else:
break
except tweepy.HTTPException as e:
print(e)
if(e.api_codes == 226):
time.sleep(600)
else:
time.sleep(5)
countlike = 0
# bot to like the tweets of users with specific words
def liketweetbot(querytext):
global countlike
global requestcount
for tweet in api.search_tweets(q=querytext):
try:
if(requestcount <= 99000):
tweetid = api.get_status(tweet.id)
requestcount = requestcount+1
if(tweetid.favorited == False and countlike < 950):
api.create_favorite(tweet.id)
requestcount = requestcount+1
print("Request count is:", requestcount)
print("Liked tweet of user:", tweet.user.screen_name)
countlike = countlike+1
print('Tweet like count:', countlike)
time.sleep(30)
else:
break
except tweepy.HTTPException as e:
print(e)
if(e.api_codes == 226):
time.sleep(600)
else:
time.sleep(5)
countretweet = 0
# bot to retweet the tweets of users with specific words
def retweetbot(querytext):
global countretweet
global requestcount
for tweet in api.search_tweets(q=querytext):
try:
if(requestcount <= 99000):
setretweeted = api.get_status(tweet.id)
requestcount = requestcount+1
if(setretweeted.retweeted == False and countretweet < 950):
api.retweet(tweet.id)
requestcount = requestcount+1
print("retweeted", tweet.id)
countretweet = countretweet+1
time.sleep(60)
else:
break
except tweepy.HTTPException as e:
print(e)
if(e.api_codes == 226):
time.sleep(600)
else:
time.sleep(5)
def main():
# Geocode of New York is added here, change the geocode to your required place
followmessage(" ", "40.730610,-73.935242,5mi")
time.sleep(300)
if __name__ == "__main__":
main()
# Insert query to like the tweet
liketweetbot(" ")
time.sleep(100)
# Insert query to retweet the tweet
retweetbot(" ")
time.sleep(100)
print("message count:", countmessage, date.today())
print("follow count:", countfollow, date.today())
print("Like count:", countlike, date.today())
print("retweet count:", countretweet, date.today())
|
diana-gv/django-social-network
|
social_network/__init__.py
|
# coding=utf-8
import logging
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from social_graph import Graph
from .signals import (
follower_relationship_created,
follower_relationship_destroyed,
friendship_created,
social_group_comment_created,
social_group_photo_created
)
from .utils import (
followed_by_edge,
follower_of_edge,
member_of_edge,
integrated_by_edge,
friendship_edge,
)
logger = logging.getLogger(__name__)
User = get_user_model()
Manager = User._default_manager
graph = Graph()
SOCIAL_GROUP_COMMENT_EVENT_TYPE_NAME = 'social_group_comment'
SOCIAL_GROUP_SHARED_LINK_EVENT_TYPE_NAME = 'social_group_shared_link'
SOCIAL_GROUP_PHOTO_EVENT_TYPE_NAME = 'social_group_photo'
SERVER_SUCCESS_MESSAGE = _(u"Your request has been successfully processed.")
SERVER_ERROR_MESSAGE = _(u"An error has occurred while processing your request'.")
##---------------------------------Inject functionality to Django User model---------------------------###
def get_site(self):
return Site.objects.get_current()
setattr(User, 'get_site', get_site)
def followers(self):
return graph.edge_count(self, followed_by_edge(), self.get_site())
setattr(User, 'followers', followers)
def following(self):
return graph.edge_count(self, follower_of_edge(), self.get_site())
setattr(User, 'following', following)
def follower_list(self):
count = self.followers()
return [node for node, attributes, time in graph.edge_range(self, followed_by_edge(), 0, count, self.get_site())]
setattr(User, 'follower_list', follower_list)
def following_list(self):
count = self.following()
return [node for node, attributes, time in graph.edge_range(self, follower_of_edge(), 0, count, self.get_site())]
setattr(User, 'following_list', following_list)
def followed_by(self, user):
return graph.edge_get(self, followed_by_edge(), user, self.get_site()) is not None
setattr(User, 'followed_by', followed_by)
def follow(self, user):
_edge = graph.edge(self, user, follower_of_edge(), self.get_site(), {})
if _edge:
follower_relationship_created.send(sender=self.__class__, followed=user, user=self)
return _edge
setattr(User, 'follow', follow)
def stop_following(self, user):
_deleted = graph.no_edge(self, user, follower_of_edge(), self.get_site())
if _deleted:
follower_relationship_destroyed.send(sender=self.__class__, followed=user, user=self)
return _deleted
setattr(User, 'stop_following', stop_following)
def friend_of(self, user):
return graph.edge_get(self, friendship_edge(), user, self.get_site()) is not None
setattr(User, 'friend_of', friend_of)
def friends(self):
return graph.edge_count(self, friendship_edge(), self.get_site())
setattr(User, 'friends', friends)
def friend_list(self):
return [node for node, attributes, time in graph.edge_range(self, friendship_edge(), self.get_site())]
setattr(User, 'friend_list', friend_list)
def make_friend_of(self, user):
_edge = graph.edge(self, user, friendship_edge(), self.get_site(), {})
if _edge:
friendship_created.send(sender=self.__class__, friend=user, user=self)
return _edge
setattr(User, 'make_friend_of', make_friend_of)
def social_groups(self):
return graph.edge_count(self, member_of_edge(), self.get_site())
setattr(User, 'social_groups', social_groups)
def social_group_list(self):
count = self.social_groups()
return [group for group, attributes, time in graph.edge_range(self, member_of_edge(), 0, count, self.get_site())]
setattr(User, 'social_group_list', social_group_list)
def specific_role_social_group_list(self, role):
count = self.social_groups()
return [group for group, attributes, time in graph.edge_range(self, member_of_edge(), 0, count, self.get_site())
if attributes['role'] == role]
setattr(User, 'specific_role_social_group_list', specific_role_social_group_list)
def is_member_of(self, group):
return graph.edge_get(group, integrated_by_edge(), self, group.site) is not None
setattr(User, 'is_member_of', is_member_of)
def is_admin_of(self, group):
return self in group.administrators.all()
setattr(User, 'is_admin_of', is_admin_of)
def is_creator_of(self, group):
return self == group.creator
setattr(User, 'is_creator_of', is_creator_of)
def join(self, group):
return group.add_member(self)
setattr(User, 'join', join)
def followed_by_users(self, user):
follower_of = follower_of_edge()
count = graph.edge_count(user, follower_of)
ids = [node.pk for node, attributes, time in graph.edge_range(user, follower_of, 0, count)]
return self.get_queryset().filter(pk__in=ids)
setattr(Manager.__class__, 'followed_by', followed_by_users)
def members_of(self, group):
integrated_by = integrated_by_edge()
count = graph.edge_count(group, integrated_by)
ids = [node.pk for node, attributes, time in graph.edge_range(group, integrated_by, 0, count)]
return self.get_queryset().filter(pk__in=ids)
setattr(Manager.__class__, 'members_of', members_of)
|
diana-gv/django-social-network
|
social_network/views.py
|
<gh_stars>1-10
import json
import logging
from django.contrib.sites.models import Site
from django.http import HttpResponse
from django.http.response import HttpResponseBadRequest
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from django.views.generic import CreateView, ListView, View, DetailView, TemplateView, UpdateView
from . import SERVER_SUCCESS_MESSAGE, User, Manager, SERVER_ERROR_MESSAGE
from utils import intmin
from models import FriendRequest, SocialGroup, GroupMembershipRequest, GroupFeedItem
from forms import (
FriendRequestForm,
SocialGroupForm,
GroupCommentForm,
GroupMembershipRequestForm,
GroupPhotoForm,
FeedCommentForm,
GroupSharedLinkForm)
logger = logging.getLogger(__name__)
class JSONResponseEnabledMixin(object):
"""
A mixin that can be used to render a JSON response.
"""
json_response_class = HttpResponse
json_enabled_methods = ['post']
def dispatch(self, request, *args, **kwargs):
if request.method.lower() in self.json_enabled_methods:
self.json_enabled = True
else:
self.json_enabled = False
return super(JSONResponseEnabledMixin, self).dispatch(request, *args, **kwargs)
def render_to_json(self, context, **response_kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
response_kwargs['content_type'] = 'application/json'
return self.json_response_class(
json.dumps(context),
**response_kwargs
)
def render_to_response(self, context, **response_kwargs):
if self.json_enabled:
return self.render_to_json(context, **response_kwargs)
else:
return super(JSONResponseEnabledMixin, self).render_to_response(context, **response_kwargs)
class BaseFriendRequestCreateView(CreateView):
form_class = FriendRequestForm
template_name = 'social_network/friend/request.html'
def get_context_data(self, **kwargs):
context = super(BaseFriendRequestCreateView, self).get_context_data(**kwargs)
context.update({
'receiver': self.kwargs['receiver']
})
return context
def get_form_kwargs(self):
kwargs = super(BaseFriendRequestCreateView, self).get_form_kwargs()
kwargs['initial'] = {
'from_user': self.request.user,
'to_user': Manager.get(pk=self.kwargs['receiver']),
}
return kwargs
class FriendRequestCreateView(JSONResponseEnabledMixin, BaseFriendRequestCreateView):
def form_valid(self, form):
self.object = form.save()
return self.render_to_json({
'successMsg': force_text(SERVER_SUCCESS_MESSAGE)
}, status=201)
class FriendRequestListView(ListView):
template_name = 'social_network/friend/list/main.html'
def get_queryset(self):
self.queryset = FriendRequest.objects.filter(to_user__pk=self.kwargs['receiver'], accepted=False)
return super(FriendRequestListView, self).get_queryset()
def get_context_data(self, **kwargs):
context = super(FriendRequestListView, self).get_context_data(**kwargs)
user = Manager.get(pk=self.kwargs['receiver'])
context.update({
'friends': user.friend_list()
})
return context
class AcceptFriendRequestView(JSONResponseEnabledMixin, View):
def post(self, request, *args, **kwargs):
try:
FriendRequest.objects.get(pk=kwargs['pk']).accept(self.request.user)
return self.render_to_json({
'result': True,
'successMsg': force_text(SERVER_SUCCESS_MESSAGE)
})
except (FriendRequest.DoesNotExist, Exception) as e:
return HttpResponseBadRequest()
class DenyFriendRequestView(JSONResponseEnabledMixin, View):
def post(self, request, *args, **kwargs):
try:
result = FriendRequest.objects.get(pk=kwargs['pk']).deny(self.request.user)
if not result:
raise
return self.render_to_json({
'result': result,
'successMsg': force_text(SERVER_SUCCESS_MESSAGE)
})
except (FriendRequest.DoesNotExist, Exception) as e:
return HttpResponseBadRequest()
class FriendshipButtonsTemplateView(TemplateView):
template_name = 'social_network/buttons/_friendship_buttons.html'
def get_context_data(self, **kwargs):
context = super(FriendshipButtonsTemplateView, self).get_context_data(**kwargs)
context.update({
'profile_user': Manager.get(pk=self.kwargs['profile'])
})
return context
class SocialGroupListView(ListView):
template_name = 'social_network/group/detail/members.html'
paginate_by = 10
def get_queryset(self):
self.queryset = SocialGroup.on_site.all()
return super(SocialGroupListView, self).get_queryset()
class BaseSocialGroupCreateView(CreateView):
form_class = SocialGroupForm
template_name = 'social_network/group/form.html'
def get_form_kwargs(self):
kwargs = super(BaseSocialGroupCreateView, self).get_form_kwargs()
kwargs['initial'] = {
'creator': self.request.user,
'site': Site.objects.get_current()
}
return kwargs
class SocialGroupCreateView(JSONResponseEnabledMixin, BaseSocialGroupCreateView):
def form_valid(self, form):
self.object = form.save()
return self.render_to_json({
'result': True,
'successMsg': force_text(SERVER_SUCCESS_MESSAGE)
}, status=201)
class BaseSocialGroupUpdateView(UpdateView):
queryset = SocialGroup.on_site.all()
pk_url_kwarg = 'group'
form_class = SocialGroupForm
template_name = 'social_network/group/form.html'
def get_form_kwargs(self):
kwargs = super(BaseSocialGroupUpdateView, self).get_form_kwargs()
kwargs['initial'] = {
'creator': self.request.user,
'site': Site.objects.get_current()
}
return kwargs
class SocialGroupUpdateView(JSONResponseEnabledMixin, BaseSocialGroupUpdateView):
def form_valid(self, form):
self.object = form.save()
return self.render_to_json({
'result': True,
'successMsg': force_text(SERVER_SUCCESS_MESSAGE)
}, status=201)
class SocialGroupUserList(ListView):
template_name = 'social_network/group/list/main.html'
def get_queryset(self):
self.queryset = SocialGroup.on_site.integrated_by(self.request.user)
return super(SocialGroupUserList, self).get_queryset()
def get_context_data(self, **kwargs):
context = super(SocialGroupUserList, self).get_context_data(**kwargs)
context.update({
'owner': int(self.kwargs['user'])
})
return context
class SocialGroupDetailView(DetailView):
model = SocialGroup
template_name = 'social_network/group/detail/main.html'
context_object_name = 'group'
class BaseSocialGroupRequestCreateView(CreateView):
form_class = GroupMembershipRequestForm
template_name = 'social_network/group/request.html'
def get_context_data(self, **kwargs):
context = super(BaseSocialGroupRequestCreateView, self).get_context_data(**kwargs)
context.update({
'group': self.kwargs['group']
})
return context
def get_form_kwargs(self):
kwargs = super(BaseSocialGroupRequestCreateView, self).get_form_kwargs()
kwargs['initial'] = {
'requester': self.request.user,
'group': SocialGroup.objects.get(pk=self.kwargs['group'])
}
return kwargs
class SocialGroupRequestCreateView(JSONResponseEnabledMixin, BaseSocialGroupRequestCreateView):
def form_valid(self, form):
self.object = form.save()
return self.render_to_json({
'result': True,
'successMsg': force_text(SERVER_SUCCESS_MESSAGE),
'sentLabel': force_text(_(u"Solicitud Enviada"))
}, status=201)
class SocialGroupRequestAcceptView(JSONResponseEnabledMixin, View):
def post(self, request, *args, **kwargs):
try:
result = GroupMembershipRequest.objects.get(pk=kwargs['pk']).accept(self.request.user)
if not result:
raise
return self.render_to_json({
'result': result,
'successMsg': force_text(SERVER_SUCCESS_MESSAGE)
})
except (GroupMembershipRequest.DoesNotExist, Exception) as e:
return HttpResponseBadRequest()
class SocialGroupRequestDenyView(JSONResponseEnabledMixin, View):
def post(self, request, *args, **kwargs):
try:
result = GroupMembershipRequest.objects.get(pk=kwargs['pk']).deny(self.request.user)
if not result:
raise
return self.render_to_json({
'result': result,
'successMsg': force_text(SERVER_SUCCESS_MESSAGE)
})
except (GroupMembershipRequest.DoesNotExist, Exception) as e:
return HttpResponseBadRequest()
class SocialGroupJoinView(JSONResponseEnabledMixin, View):
def post(self, request, *args, **kwargs):
try:
result = self.request.user.join(SocialGroup.objects.get(pk=kwargs['group']))
if not result:
raise
return self.render_to_json({
'result': result,
'successMsg': force_text(SERVER_SUCCESS_MESSAGE)
})
except (SocialGroup.DoesNotExist, Exception) as e:
return HttpResponseBadRequest()
class BaseGroupPostCreateView(CreateView):
def get_context_data(self, **kwargs):
context = super(BaseGroupPostCreateView, self).get_context_data(**kwargs)
context.update({
'group': self.kwargs['group']
})
return context
def get_form_kwargs(self):
kwargs = super(BaseGroupPostCreateView, self).get_form_kwargs()
kwargs['initial'] = {
'creator': self.request.user,
'group': SocialGroup.objects.get(pk=self.kwargs['group']),
}
return kwargs
class GroupPostCreateView(JSONResponseEnabledMixin):
def form_valid(self, form):
self.object = form.save()
return self.render_to_json({
'result': True,
'successMsg': force_text(SERVER_SUCCESS_MESSAGE)
}, status=201)
class BaseGroupCommentCreateView(BaseGroupPostCreateView):
form_class = GroupCommentForm
template_name = 'social_network/group/comment.html'
class GroupCommentCreateView(GroupPostCreateView, BaseGroupCommentCreateView):
pass
class BaseGroupLinkCreateView(BaseGroupPostCreateView):
form_class = GroupSharedLinkForm
template_name = 'social_network/group/link.html'
class GroupLinkCreateView(GroupPostCreateView, BaseGroupLinkCreateView):
pass
class BaseGroupPhotoCreateView(BaseGroupPostCreateView):
form_class = GroupPhotoForm
template_name = 'social_network/group/photo.html'
class GroupPhotoCreateView(GroupPostCreateView, BaseGroupPhotoCreateView):
pass
class SocialGroupFeedView(ListView):
template_name = 'social_network/group/detail/feed.html'
def get_queryset(self):
self.queryset = GroupFeedItem.on_site.filter(group=self.kwargs.get('group')).order_by('-event__date')
return super(SocialGroupFeedView, self).get_queryset()
class SocialGroupMembershipRequestsList(ListView):
template_name = 'social_network/group/detail/requests.html'
def get_queryset(self):
self.queryset = GroupMembershipRequest.objects.filter(
group__pk=self.kwargs['group'],
accepted=False,
denied=False
)
return super(SocialGroupMembershipRequestsList, self).get_queryset()
def get_context_data(self, **kwargs):
context = super(SocialGroupMembershipRequestsList, self).get_context_data(**kwargs)
context['group'] = self.kwargs['group']
return context
class SocialGroupMembersList(ListView):
template_name = 'social_network/group/detail/members.html'
def get_queryset(self):
self.group = SocialGroup.objects.get(pk=self.kwargs['group'])
self.queryset = Manager.filter(pk__in=[user.pk for user in self.group.member_list])
return super(SocialGroupMembersList, self).get_queryset()
def get_context_data(self, **kwargs):
context = super(SocialGroupMembersList, self).get_context_data(**kwargs)
context.update({
'roles': self.group.member_role_list,
})
return context
class MembershipButtonsTemplateView(TemplateView):
template_name = 'social_network/buttons/_membership_buttons.html'
def get_context_data(self, **kwargs):
context = super(MembershipButtonsTemplateView, self).get_context_data(**kwargs)
context.update({
'group': SocialGroup.objects.get(pk=self.kwargs['group'])
})
return context
class BaseFeedCommentCreateView(CreateView):
template_name = 'social_network/userfeed/comment.html'
form_class = FeedCommentForm
def get_context_data(self, **kwargs):
context = super(BaseFeedCommentCreateView, self).get_context_data(**kwargs)
context.update({
'receiver': self.kwargs['receiver']
})
return context
def get_form_kwargs(self):
kwargs = super(BaseFeedCommentCreateView, self).get_form_kwargs()
kwargs['initial'] = {
'receiver': Manager.get(pk=self.kwargs['receiver']),
'creator': self.request.user
}
return kwargs
class FeedCommentCreateView(JSONResponseEnabledMixin, BaseFeedCommentCreateView):
def form_valid(self, form):
self.object = form.save()
return self.render_to_json({
'result': True,
'comment_id': self.object.pk,
'successMsg': force_text(SERVER_SUCCESS_MESSAGE)
}, status=201)
class FollowerRelationshipToggleView(JSONResponseEnabledMixin, View):
def post(self, request, *args, **kwargs):
try:
pk = request.POST['pk']
user = Manager.get(pk=pk)
if user.followed_by(request.user):
request.user.stop_following(user)
tooltip = _(u"Follow")
toggle_status = False
else:
request.user.follow(user)
tooltip = _(u"Stop Following")
toggle_status = True
followers = user.followers()
return self.render_to_json({
'result': True,
'toggle_status': toggle_status,
'counter': followers,
'counterStr': intmin(followers),
'tooltip': force_text(tooltip)
})
except Exception as e:
logger.exception(e)
return self.render_to_json({'result': False})
class FollowerRelationshipCreateView(JSONResponseEnabledMixin, View):
def post(self, request, *args, **kwargs):
try:
request.user.follow(Manager.get(pk=kwargs['followed']))
return self.render_to_json({
'result': True,
'successMsg': force_text(SERVER_SUCCESS_MESSAGE)
}, status=201)
except (User.DoesNotExist, Exception):
return HttpResponseBadRequest()
class FollowerRelationshipDestroyView(JSONResponseEnabledMixin, View):
def post(self, request, *args, **kwargs):
try:
request.user.stop_following(Manager.get(pk=kwargs['followed']))
return self.render_to_json({
'result': True,
'successMsg': force_text(SERVER_SUCCESS_MESSAGE)
})
except (User.DoesNotExist, Exception):
return HttpResponseBadRequest()
|
diana-gv/django-social-network
|
social_network/utils.py
|
# coding=utf-8
import random
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
from notifications.models import EventType
from social_graph import EdgeType
try:
from hashlib import sha1 as sha_constructor, md5 as md5_constructor
except ImportError:
pass
#---------------------NOTIFICATIONS---------------------------------
def group_comment_event_type():
comment_event_type = cache.get('SOCIAL_NETWORK_COMMENT_EVENT_TYPE')
if comment_event_type is not None:
return comment_event_type
try:
from . import SOCIAL_GROUP_COMMENT_EVENT_TYPE_NAME
comment_event_type = EventType.objects.get(name=SOCIAL_GROUP_COMMENT_EVENT_TYPE_NAME)
cache.set('SOCIAL_NETWORK_COMMENT_EVENT_TYPE', comment_event_type)
return comment_event_type
except ObjectDoesNotExist as e:
pass # TODO Log this
def group_shared_link_event_type():
shared_link = cache.get('SOCIAL_NETWORK_SHARED_LINK_EVENT_TYPE')
if shared_link is not None:
return shared_link
try:
from . import SOCIAL_GROUP_SHARED_LINK_EVENT_TYPE_NAME
shared_link = EventType.objects.get(name=SOCIAL_GROUP_SHARED_LINK_EVENT_TYPE_NAME)
cache.set('SOCIAL_NETWORK_SHARED_LINK_EVENT_TYPE', shared_link)
return shared_link
except ObjectDoesNotExist as e:
pass # TODO Log this
def group_photo_event_type():
photo_event_type = cache.get('SOCIAL_NETWORK_PHOTO_EVENT_TYPE')
if photo_event_type is not None:
return photo_event_type
try:
from . import SOCIAL_GROUP_PHOTO_EVENT_TYPE_NAME
photo_event_type = EventType.objects.get(name=SOCIAL_GROUP_PHOTO_EVENT_TYPE_NAME)
cache.set('SOCIAL_NETWORK_PHOTO_EVENT_TYPE', photo_event_type)
return photo_event_type
except ObjectDoesNotExist as e:
pass # TODO Log this
#---------------------EDGES-----------------------------------------
def friendship_edge():
_friendship = cache.get('FRIENDSHIP_EDGE_TYPE')
if _friendship is not None:
return _friendship
try:
_friendship = EdgeType.objects.get(name="Friendship")
cache.set('FRIENDSHIP_EDGE_TYPE', _friendship)
return _friendship
except ObjectDoesNotExist as e:
pass # TODO Log this
def integrated_by_edge():
_integrated_by = cache.get('INTEGRATED_BY_EDGE_TYPE')
if _integrated_by is not None:
return _integrated_by
try:
_integrated_by = EdgeType.objects.get(name="Integrated by")
cache.set('INTEGRATED_BY_EDGE_TYPE', _integrated_by)
return _integrated_by
except ObjectDoesNotExist as e:
pass # TODO Log this
def member_of_edge():
_member_of = cache.get('MEMBER_OF_EDGE_TYPE')
if _member_of is not None:
return _member_of
try:
_member_of = EdgeType.objects.get(name="Member")
cache.set('MEMBER_OF_EDGE_TYPE', _member_of)
return _member_of
except ObjectDoesNotExist as e:
pass # TODO Log this
def follower_of_edge():
_follower_of = cache.get('FOLLOWER_OF_EDGE_TYPE')
if _follower_of is not None:
return _follower_of
try:
_follower_of = EdgeType.objects.get(name="Follower")
cache.set('FOLLOWER_OF_EDGE_TYPE', _follower_of)
return _follower_of
except ObjectDoesNotExist:
pass
def followed_by_edge():
_followed_by = cache.get('FOLLOWED_BY_EDGE_TYPE')
if _followed_by is not None:
return _followed_by
try:
_followed_by = EdgeType.objects.get(name="Followed by")
cache.set('FOLLOWED_BY_EDGE_TYPE', _followed_by)
return _followed_by
except ObjectDoesNotExist:
pass
#---------------------GENERAL-----------------------------------------
def generate_sha1(string, salt=None):
"""
Generates a sha1 hash for supplied string. Doesn't need to be very secure
because it's not used for password checking. We got Django for that.
:param string:
The string that needs to be encrypted.
:param salt:
Optionally define your own salt. If none is supplied, will use a random
string of 5 characters.
:return: Tuple containing the salt and hash.
"""
if not isinstance(string, (str, unicode)):
string = str(string)
if isinstance(string, unicode):
string = string.encode("utf-8")
if not salt:
salt = sha_constructor(str(random.random())).hexdigest()[:5]
hash = sha_constructor(salt+string).hexdigest()
return (salt, hash)
# A tuple of standard large number to their converters
intword_converters = (
(3, lambda number: _('%(value)dK')),
(6, lambda number: _('%(value)dM')),
(9, lambda number: _('%(value)dG')),
)
def intmin(value):
"""
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if value < 1000:
return value
for exponent, converter in intword_converters:
large_number = 10 ** exponent
if value < large_number * 1000:
new_value = value / large_number
tpl = "+%s" if value > large_number else "%s"
return tpl % converter(new_value) % {'value': new_value}
return value
|
diana-gv/django-social-network
|
social_network/migrations/0005_auto__add_field_friendrequest_denied.py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FriendRequest.denied'
db.add_column(u'social_network_friendrequest', 'denied',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'FriendRequest.denied'
db.delete_column(u'social_network_friendrequest', 'denied')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'notifications.action': {
'Meta': {'object_name': 'Action'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'read_as': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'notifications.event': {
'Meta': {'object_name': 'Event'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'max_length': '500'}),
'extra_data': ('notifications.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True'}),
'target_pk': ('django.db.models.fields.TextField', [], {}),
'target_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'event'", 'to': u"orm['contenttypes.ContentType']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.EventType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events'", 'to': u"orm['auth.User']"})
},
'notifications.eventtype': {
'Meta': {'object_name': 'EventType'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.Action']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.EventTypeCategory']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immediate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'read_as': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'target_type': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'notifications.eventtypecategory': {
'Meta': {'object_name': 'EventTypeCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'read_as': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'notifications.notificationtemplateconfig': {
'Meta': {'unique_together': "(('event_type', 'transport', 'context'),)", 'object_name': 'NotificationTemplateConfig'},
'context': ('django.db.models.fields.CharField', [], {'default': "u'default'", 'max_length': '255'}),
'data': ('notifications.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.EventType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'single_template_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'template_path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'transport': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.Transport']"})
},
'notifications.transport': {
'Meta': {'object_name': 'Transport'},
'allows_context': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allows_freq_config': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allows_subscription': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cls': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'delete_sent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'social_network.feedcomment': {
'Meta': {'object_name': 'FeedComment'},
'comment': ('django.db.models.fields.TextField', [], {}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'feed_comments'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'receiver': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'feed_received_comments'", 'to': u"orm['auth.User']"})
},
'social_network.friendrequest': {
'Meta': {'object_name': 'FriendRequest'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'denied': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'from_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_outgoing_friend_requests'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'to_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_incoming_friend_requests'", 'to': u"orm['auth.User']"})
},
'social_network.groupcomment': {
'Meta': {'object_name': 'GroupComment'},
'comment': ('django.db.models.fields.TextField', [], {}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'(app_label)s_groupcomment_set_post'", 'to': u"orm['auth.User']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'(app_label)s_groupcomment_set_post'", 'to': "orm['social_network.SocialGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'social_network.groupfeeditem': {
'Meta': {'object_name': 'GroupFeedItem'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.Event']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['social_network.SocialGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'template_config': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.NotificationTemplateConfig']"})
},
'social_network.groupimage': {
'Meta': {'object_name': 'GroupImage'},
'comment': ('django.db.models.fields.TextField', [], {}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'(app_label)s_groupimage_set_post'", 'to': u"orm['auth.User']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'(app_label)s_groupimage_set_post'", 'to': "orm['social_network.SocialGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})
},
'social_network.groupmembershiprequest': {
'Meta': {'object_name': 'GroupMembershipRequest'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'acceptor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'accepted_group_memberships'", 'null': 'True', 'to': u"orm['auth.User']"}),
'denied': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aspirants'", 'to': "orm['social_network.SocialGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'requested_group_memberships'", 'to': u"orm['auth.User']"})
},
'social_network.socialgroup': {
'Meta': {'object_name': 'SocialGroup'},
'administrators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'groups_administrated_by'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups_created_by'", 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"})
}
}
complete_apps = ['social_network']
|
diana-gv/django-social-network
|
social_network/management/__init__.py
|
# coding=utf-8
from django.db.models.signals import post_syncdb
from django.conf import settings
from .. import models as social_app
AUTOCONFIGURE_NOTIFICATIONS = getattr(settings, 'SOCIAL_NETWORK_AUTOCONFIGURE_NOTIFICATIONS', True)
COMMENT_ACTION_READ_AS = getattr(settings, 'SOCIAL_NETWORK_COMMENT_ACTION_READ_AS', 'Comment')
SHARED_LINK_ACTION_READ_AS = getattr(settings, 'SOCIAL_NETWORK_SHARED_LINK_ACTION_READ_AS', 'Link Post')
PHOTO_ACTION_READ_AS = getattr(settings, 'SOCIAL_NETWORK_PHOTO_ACTION_READ_AS', 'Image Post')
COMMENT_EVENT_READ_AS = getattr(settings, 'SOCIAL_NETWORK_COMMENT_EVENT_READ_AS', 'Community Comment')
SHARED_LINK_EVENT_READ_AS = getattr(settings, 'SOCIAL_NETWORK_SHARED_LINK_EVENT_READ_AS', 'Community Shared Link')
PHOTO_EVENT_READ_AS = getattr(settings, 'SOCIAL_NETWORK_PHOTO_EVENT_READ_AS', 'Community Image Post')
def create_edge_types(**kwargs):
from social_graph import EdgeType, EdgeTypeAssociation
#FRIENDSHIP EDGES
friendship, created = EdgeType.objects.get_or_create(name="Friendship", defaults={
'read_as': 'is friends with'
})
EdgeTypeAssociation.objects.get_or_create(direct=friendship, inverse=friendship)
# FOLLOWER EDGES
follower_of, created = EdgeType.objects.get_or_create(name="Follower", defaults={
'read_as': 'is follower of'
})
followed_by, created = EdgeType.objects.get_or_create(name="Followed by", defaults={
'read_as': 'is followed by'
})
EdgeTypeAssociation.objects.get_or_create(direct=follower_of, inverse=followed_by)
# GROUP EDGES
member_of, created = EdgeType.objects.get_or_create(name="Member", defaults={
'read_as': 'is member of'
})
integrated_by, created = EdgeType.objects.get_or_create(name="Integrated by", defaults={
'read_as': 'is integrated by'
})
EdgeTypeAssociation.objects.get_or_create(direct=member_of, inverse=integrated_by)
post_syncdb.connect(create_edge_types, sender=social_app)
def configure_notifications(**kwargs):
from notifications.models import Transport, Action, EventType, EventTypeCategory, AttendantRole, EventAttendantsConfig, NotificationTemplateConfig
from .. import SOCIAL_GROUP_COMMENT_EVENT_TYPE_NAME, SOCIAL_GROUP_SHARED_LINK_EVENT_TYPE_NAME, SOCIAL_GROUP_PHOTO_EVENT_TYPE_NAME
group_transport, created = Transport.objects.get_or_create(
name='group_transport',
cls='social_network.transports.GroupFeedTransport',
defaults={
'allows_context': False,
'allows_freq_config': False,
'delete_sent': False
}
)
category, created = EventTypeCategory.objects.get_or_create(
name='social_network_category',
defaults={'read_as': 'Social'}
)
group_attendant_role, created = AttendantRole.objects.get_or_create(role='owner', defaults={'priority': 1})
comment, created = Action.objects.get_or_create(name='comment', defaults={'read_as': COMMENT_ACTION_READ_AS})
shared_link, created = Action.objects.get_or_create(name='shared_link', defaults={'read_as': SHARED_LINK_ACTION_READ_AS})
photo, created = Action.objects.get_or_create(name='photo', defaults={'read_as': PHOTO_ACTION_READ_AS})
comment_event, created = EventType.objects.get_or_create(
name=SOCIAL_GROUP_COMMENT_EVENT_TYPE_NAME,
action=comment,
target_type='social_network.socialgroup',
defaults={
'read_as': COMMENT_EVENT_READ_AS,
'category': category,
'immediate': True
}
)
shared_link_event, created = EventType.objects.get_or_create(
name=SOCIAL_GROUP_SHARED_LINK_EVENT_TYPE_NAME,
action=shared_link,
target_type='social_network.socialgroup',
defaults={
'read_as': SHARED_LINK_EVENT_READ_AS,
'category': category,
'immediate': True
}
)
photo_event, created = EventType.objects.get_or_create(
name=SOCIAL_GROUP_PHOTO_EVENT_TYPE_NAME,
action=photo,
target_type='social_network.socialgroup',
defaults={
'read_as': PHOTO_EVENT_READ_AS,
'category': category,
'immediate': True
}
)
comment_attendants_config, created = EventAttendantsConfig.objects.get_or_create(
event_type=comment_event,
transport=group_transport,
defaults={
'get_attendants_methods': [
{'source': 'target_obj', 'type': 'property', 'value': "group,owner"}
]
}
)
shared_link_attendants_config, created = EventAttendantsConfig.objects.get_or_create(
event_type=shared_link_event,
transport=group_transport,
defaults={
'get_attendants_methods': [
{'source': 'target_obj', 'type': 'property', 'value': "group,owner"}
]
}
)
photo_attendants_config, created = EventAttendantsConfig.objects.get_or_create(
event_type=photo_event,
transport=group_transport,
defaults={
'get_attendants_methods': [
{'source': 'target_obj', 'type': 'property', 'value': "group,owner"}
]
}
)
comment_template_config, created = NotificationTemplateConfig.objects.get_or_create(
event_type=comment_event,
transport=group_transport,
defaults={
'template_path': 'social_network/group/feed/comment.html'
}
)
shared_link_template_config, created = NotificationTemplateConfig.objects.get_or_create(
event_type=shared_link_event,
transport=group_transport,
defaults={
'template_path': 'social_network/group/feed/shared_link.html'
}
)
photo_template_config, created = NotificationTemplateConfig.objects.get_or_create(
event_type=photo_event,
transport=group_transport,
defaults={
'template_path': 'social_network/group/feed/photo.html'
}
)
if AUTOCONFIGURE_NOTIFICATIONS:
post_syncdb.connect(configure_notifications, sender=social_app)
|
diana-gv/django-social-network
|
social_network/management/commands/clearcache.py
|
<gh_stars>1-10
# coding=utf-8
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
def handle_noargs(self, **options):
from social_graph.api import Graph
graph = Graph()
graph.clear_cache()
|
diana-gv/django-social-network
|
social_network/transports.py
|
# coding=utf-8
from notifications.transports import BaseTransport
from .models import GroupFeedItem, SocialGroup
class GroupFeedTransport(BaseTransport):
@staticmethod
def send_notification(group, role, event, template, delay=False):
if group is None or not isinstance(group, SocialGroup) or event is None or template is None:
return
try:
feed_item = GroupFeedItem(group=group, event=event, template_config=template)
feed_item.save()
except:
pass # TODO Log
|
diana-gv/django-social-network
|
social_network/admin.py
|
# coding=utf-8
from django.contrib import admin
from .models import SocialGroup
class SocialGroupAdmin(admin.ModelAdmin):
readonly_fields = ['name']
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
admin.site.register(SocialGroup, SocialGroupAdmin)
|
diana-gv/django-social-network
|
social_network/forms.py
|
# coding=utf-8
from django import forms
from django.core.exceptions import ValidationError
from . import Manager
from models import (
FriendRequest,
SocialGroup,
GroupComment,
GroupMembershipRequest,
GroupImage,
FeedComment,
GroupSharedLink)
class FriendRequestForm(forms.ModelForm):
class Meta:
model = FriendRequest
widgets = {
'from_user': forms.widgets.HiddenInput,
'to_user': forms.widgets.HiddenInput,
}
class SocialGroupForm(forms.ModelForm):
administrators = forms.ModelMultipleChoiceField(Manager.all(), required=False)
class Meta:
model = SocialGroup
widgets = {
'creator': forms.widgets.HiddenInput,
'site': forms.widgets.HiddenInput
}
class GroupPostForm(forms.ModelForm):
class Meta(object):
widgets = {
'creator': forms.widgets.HiddenInput,
'group': forms.widgets.HiddenInput
}
def clean(self):
if not self.cleaned_data['group'].has_member(self.cleaned_data['creator']):
raise ValidationError("Only members can post in groups")
return self.cleaned_data
class GroupCommentForm(GroupPostForm):
class Meta(GroupPostForm.Meta):
model = GroupComment
class GroupSharedLinkForm(GroupPostForm):
class Meta(GroupPostForm.Meta):
model = GroupSharedLink
class GroupPhotoForm(GroupPostForm):
class Meta(GroupPostForm.Meta):
model = GroupImage
class GroupMembershipRequestForm(forms.ModelForm):
class Meta:
model = GroupMembershipRequest
widgets = {
'requester': forms.widgets.HiddenInput,
'group': forms.widgets.HiddenInput
}
def clean(self):
if GroupMembershipRequest.objects.filter(
requester=self.cleaned_data['requester'],
group=self.cleaned_data['group'],
accepted=False,
denied=False,
).exists():
raise ValidationError('Pre-existing group membership request from this user to this group.')
return self.cleaned_data
class FeedCommentForm(forms.ModelForm):
class Meta:
model = FeedComment
widgets = {
'creator': forms.widgets.HiddenInput,
'receiver': forms.widgets.HiddenInput,
}
|
diana-gv/django-social-network
|
social_network/signals.py
|
<reponame>diana-gv/django-social-network<filename>social_network/signals.py
# coding=utf-8
from django.dispatch import Signal
friend_request_created = Signal(providing_args=['instance', 'user', 'receiver'])
friendship_created = Signal(providing_args=['friend', 'user'])
social_group_created = Signal(providing_args=['instance', 'user'])
social_group_comment_created = Signal(providing_args=['instance', 'user'])
social_group_shared_link_created = Signal(providing_args=['instance', 'user'])
social_group_photo_created = Signal(providing_args=['instance', 'user'])
social_group_membership_request_created = Signal(providing_args=['instance', 'user', 'group'])
social_group_member_added = Signal(providing_args=['group', 'member', 'user'])
feed_comment_created = Signal(providing_args=['instance', 'user'])
follower_relationship_created = Signal(providing_args=['followed', 'user'])
follower_relationship_destroyed = Signal(providing_args=['followed', 'user'])
|
diana-gv/django-social-network
|
social_network/management/commands/configuresocialnetwork.py
|
<filename>social_network/management/commands/configuresocialnetwork.py
# coding=utf-8
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
def handle_noargs(self, **options):
from ...management import configure_notifications, create_edge_types
configure_notifications()
create_edge_types()
|
diana-gv/django-social-network
|
setup.py
|
<gh_stars>1-10
from setuptools import setup, find_packages
setup(
name="django-social-network",
url="http://github.com/dgvicente/django-social-network/",
author="<NAME>",
author_email="<EMAIL>",
version="0.2.4",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
description="Social Network for Django",
install_requires=['django>=1.6.1', 'celery>=3.1.4', 'django-social-graph>=0.1.8',
'django-notifications>=0.1.6'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
"Environment :: Web Environment",
"Framework :: Django",
],
)
|
diana-gv/django-social-network
|
social_network/templatetags/social_network_tags.py
|
# coding=utf-8
from django import template
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from ..models import FriendRequest, SocialGroup, GroupMembershipRequest
register = template.Library()
#--------------------------------------FOLLOWER TAGS---------------------------------------------
@register.filter
def followed_by(user1, user2):
"""
Returns whether user1 is followed by user2 or not.
"""
if not user1 or not user2 or user1.is_anonymous() or user2.is_anonymous():
return False
return user1.followed_by(user2)
@register.filter
def is_follower_of(user1, user2):
"""
Returns whether user1 follows user2 or not.
:param user1: An User instance.
:param user2: An User instance.
"""
if not user1 or not user2 or user1.is_anonymous() or user2.is_anonymous():
return False
return user2.followed_by(user1)
@register.filter
def followers_count(user):
"""
Returns user followers count
:param user: An User instance
"""
if not user or user.is_anonymous():
return 0
return user.followers()
@register.filter
def followed_count(user):
"""
Returns the count of how many users is the user following
:param user: An User instance
"""
if not user or user.is_anonymous():
return 0
return user.following()
#--------------------------------------FRIENDSHIP TAGS-------------------------------------------
def process_user_param(user):
if not user:
return False
if isinstance(user, User):
return user
else:
try:
return User.objects.get(pk=int(user))
except:
return False
@register.filter
def is_friends_with(user1, user2):
"""
Returns whether user1 and user2 are friends or not.
:param user1: An User instance.
:param user2: An User instance.
"""
if not user1 or not user2 or user1.is_anonymous() or user2.is_anonymous():
return False
return user1.friend_of(user2)
@register.filter
def has_requested_friendship_to(user1, user2):
"""
Returns True if user1 has requested friendship to user2, False otherwise.
:param user1: An User instance.
:param user2: An User instance.
"""
if not user1 or not user2 or user1.is_anonymous() or user2.is_anonymous() or user1 == user2:
return False
return FriendRequest.objects.filter(from_user=user1, to_user=user2, accepted=False).exists()
@register.filter
def friends_count(user):
"""
Returns how many users have a "friendship" relationship with given user
:param user: An User instance.
"""
user_obj = process_user_param(user)
if not user_obj:
return 0
return user_obj.friends()
#--------------------------------------GROUPS TAGS-------------------------------------------
def process_group_param(group):
if not group:
return False
if isinstance(group, SocialGroup):
return group
else:
try:
return SocialGroup.objects.get(pk=int(group))
except:
return False
role_dict = {
'creator': _(u"Creator"),
'admin': _(u"Administrator"),
'member': _(u"Member")
}
@register.filter
def relationship_with(group, user):
"""
Returns relationship between group and passed user
:param group: A SocialGroup instance.
:param user: An User instance.
"""
user_obj = process_user_param(user)
group_obj = process_group_param(group)
if not user_obj or not group_obj:
return None
return role_dict[group_obj.relationship_with(user_obj)[1]]
@register.assignment_tag
def has_creator(group, user):
"""
Returns True if user is the creator, False otherwise
:param user: An User instance.
:param group: A SocialGroup instance.
"""
user_obj = process_user_param(user)
group_obj = process_group_param(group)
if not user_obj or not group_obj:
return False
return group_obj.creator == user_obj
@register.assignment_tag
def has_admin(group, user):
"""
Returns True if user is in the group list of administrators or is the creator, False otherwise
:param user: An User instance.
:param group: A SocialGroup instance.
"""
user_obj = process_user_param(user)
group_obj = process_group_param(group)
if not user_obj or not group_obj:
return False
return group_obj.has_admin(user_obj)
@register.assignment_tag
def is_group_member(user, group):
"""
Returns True if user is in a group member, False otherwise
:param user: An User instance.
:param group: A SocialGroup instance.
"""
user_obj = process_user_param(user)
group_obj = process_group_param(group)
if not user_obj or not group_obj:
return False
return group_obj.has_member(user_obj)
@register.assignment_tag
def has_requested_membership(user, group):
"""
Returns True if user1 has requested friendship to user2, False otherwise.
:param user: An User instance.
:param group: An SocialGroup instance.
"""
user_obj = process_user_param(user)
group_obj = process_group_param(group)
if not user_obj or not group_obj:
return False
return GroupMembershipRequest.objects.filter(
requester=user_obj,
group=group_obj,
accepted=False,
denied=False
).exists()
@register.simple_tag(takes_context=True)
def render_user_rol(context, user):
return role_dict[context['roles'][user.pk]]
@register.filter
def groups_count(user):
"""
Returns the total count of how many groups the user is a member of
"""
user_obj = process_user_param(user)
if not user_obj:
return 0
return user_obj.social_groups()
|
diana-gv/django-social-network
|
social_network/models.py
|
#coding: utf-8
from django.contrib.auth import get_user_model
from django.contrib.sites.managers import CurrentSiteManager
from django.contrib.sites.models import Site
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save, m2m_changed
from django.db.models import permalink
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from notifications.models import Event, NotificationTemplateConfig
from social_graph import Graph
from .signals import (
social_group_member_added,
social_group_created,
friend_request_created,
social_group_membership_request_created,
social_group_photo_created,
social_group_comment_created,
feed_comment_created,
social_group_shared_link_created)
from .utils import integrated_by_edge, member_of_edge, generate_sha1, group_comment_event_type, group_photo_event_type, group_shared_link_event_type
User = get_user_model()
graph = Graph()
class FriendRequest(models.Model):
from_user = models.ForeignKey(User, related_name='user_outgoing_friend_requests', verbose_name=_(u'Requester'))
to_user = models.ForeignKey(User, related_name='user_incoming_friend_requests', verbose_name=_(u'Receiver'))
message = models.TextField(null=True, blank=True, verbose_name=_(u'Message'))
accepted = models.BooleanField(default=False, verbose_name=_(u'Accepted'))
denied = models.BooleanField(default=False, verbose_name=_(u'Denied'))
class Meta:
app_label = 'social_network'
def __unicode__(self):
return self.to_user.get_full_name() or self.to_user.get_username()
def accept(self, by_user):
if by_user != self.to_user or self.denied or self.accepted:
return False
if self.to_user.make_friend_of(self.from_user):
self.accepted = True
self.save()
return True
else:
raise Exception("A problem has occurred while trying to create a friendship edge.")
def deny(self, by_user):
if by_user != self.to_user or self.accepted or self.denied:
return False
self.denied = True
self.save()
return True
@receiver(post_save, sender=FriendRequest, dispatch_uid='post_save_friend_request')
def post_save_friend_request(sender, instance, created, **kwargs):
if created:
friend_request_created.send(
sender=FriendRequest,
user=instance.from_user,
instance=instance,
receiver=instance.to_user
)
class SocialGroupManagerMixin(object):
def integrated_by(self, user):
member_of = member_of_edge()
count = graph.edge_count(user, member_of)
ids = [node.pk for node, attributes, time in graph.edge_range(user, member_of, 0, count)]
return self.get_queryset().filter(pk__in=ids)
class SocialGroupManager(SocialGroupManagerMixin, models.Manager):
pass
class SocialGroupCurrentSiteManager(SocialGroupManagerMixin, CurrentSiteManager):
pass
class SocialGroup(models.Model):
creator = models.ForeignKey(User, related_name='groups_created_by', verbose_name=_(u'Creator'))
name = models.CharField(max_length=255, verbose_name=_(u'Name'))
description = models.TextField(verbose_name=_(u'Description'))
closed = models.BooleanField(default=False, verbose_name=_(u'Closed'))
administrators = models.ManyToManyField(User, related_name='groups_administrated_by',
verbose_name=_(u'Administrators'), null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
site = models.ForeignKey(Site)
def images_upload(self, filename):
salt, hash = generate_sha1(self.id)
return 'site-%s/groups/%s/%s/%s/%s' % (
self.site, '%s_%s' % (self._meta.app_label, self._meta.object_name.lower()), self.creator.pk, hash,
filename)
image = models.ImageField(verbose_name=_(u'Image'), upload_to=images_upload, null=True, blank=True, max_length=500)
objects = SocialGroupManager()
on_site = SocialGroupCurrentSiteManager()
class Meta:
app_label = 'social_network'
def __init__(self, *args, **kwargs):
super(SocialGroup, self).__init__(*args, **kwargs)
if not self.pk and not self.site_id:
self.site_id = Site.objects.get_current().pk
@permalink
def get_absolute_url(self):
return "social_group_details", [self.pk]
@property
def members(self):
return graph.edge_count(self, integrated_by_edge())
@property
def member_list(self):
edge = integrated_by_edge()
count = graph.edge_count(self, edge)
return [user for user, attributes, time in graph.edge_range(self, edge, 0, count)]
def specific_role_member_list(self, role):
edge = integrated_by_edge()
count = graph.edge_count(self, edge)
return [user for user, attributes, time in graph.edge_range(self, edge, 0, count) if attributes['role'] == role]
@property
def member_role_list(self):
edge = integrated_by_edge()
count = graph.edge_count(self, edge)
return dict([(user.pk, attributes.get('role', 'member'))
for user, attributes, time in graph.edge_range(self, edge, 0, count)])
def has_admin(self, user):
return user == self.creator or user in self.administrators.all()
def has_member(self, user):
return graph.edge_get(user, member_of_edge(), self) is not None
def relationship_with(self, user):
edge = graph.edge_get(user, member_of_edge(), self)
return user, edge.attributes.get('role', 'member') if edge else None
def add_member(self, member, acceptor=None):
if not acceptor:
acceptor = member
if self.closed and not self.has_admin(acceptor):
return False
_edge = graph.edge(member, self, member_of_edge(), self.site, {'role': 'member'})
if _edge:
social_group_member_added.send(
sender=SocialGroup,
group=self,
member=member,
user=acceptor
)
return True
else:
raise Exception("A problem has occurred while trying to create a membership edge.")
def __unicode__(self):
return self.name
@receiver(post_save, sender=SocialGroup, dispatch_uid='post_save_social_group')
def post_save_social_group(sender, instance, created, **kwargs):
if created:
# add creator to members
graph.edge(instance.creator, instance, member_of_edge(), instance.site, {'role': 'creator'})
social_group_created.send(sender=SocialGroup, instance=instance, user=instance.creator)
@receiver(m2m_changed, sender=SocialGroup.administrators.through, dispatch_uid='post_m2m_changed_social_group')
def post_m2m_changed_social_group(sender, instance, action, reverse, model, pk_set, **kwargs):
if action not in ('post_add', 'post_remove', 'post_clear'):
return
member_of = member_of_edge()
if not reverse: # the call has modified the direct relationship SocialGroup.administrators
group = instance
if action == 'post_clear':
for admin in group.specific_role_member_list('admin'):
graph.no_edge(admin, group, member_of, group.site)
else:
admins = model.objects.filter(pk__in=list(pk_set))
if action == 'post_add':
for admin in admins:
graph.edge(admin, group, member_of, group.site, {'role': 'admin'})
elif action == 'post_remove':
for admin in admins:
graph.no_edge(admin, group, member_of, group.site)
else: # the call has modified the reverse relationship: User.groups_administrated_by
admin = instance
if action == 'post_clear':
for group in admin.specific_role_social_group_list('admin'):
graph.no_edge(admin, group, member_of, group.site)
else:
groups = model.objects.filter(pk__in=list(pk_set))
if action == 'post_add':
for group in groups:
graph.edge(admin, group, member_of, group.site, {'role': 'admin'})
elif action == 'post_remove':
for group in groups:
graph.no_edge(admin, group, member_of, group.site)
class GroupMembershipRequest(models.Model):
requester = models.ForeignKey(User, related_name='requested_group_memberships', verbose_name=_(u'Requester'))
group = models.ForeignKey(SocialGroup, related_name='aspirants', verbose_name=_(u'Group'))
message = models.TextField(null=True, blank=True, verbose_name=_(u'Message'))
accepted = models.BooleanField(default=False, verbose_name=_(u'Accepted'))
denied = models.BooleanField(default=False, verbose_name=_(u'Denied'))
acceptor = models.ForeignKey(
User, related_name='accepted_group_memberships', verbose_name=_(u'Decider'), null=True, blank=True
)
class Meta:
app_label = 'social_network'
def accept(self, by_user):
if not by_user.is_admin_of(self.group) or self.denied or self.accepted:
return False
if self.group.add_member(self.requester, by_user):
self.accepted = True
self.acceptor = by_user
self.save()
return True
else:
return False
def deny(self, by_user):
if not by_user.is_admin_of(self.group) or self.accepted or self.denied:
return False
self.denied = True
self.acceptor = by_user
self.save()
return True
@receiver(post_save, sender=GroupMembershipRequest, dispatch_uid='post_save_group_membership_request')
def post_save_group_membership_request(sender, instance, created, **kwargs):
if created:
social_group_membership_request_created.send(
sender=GroupMembershipRequest,
instance=instance,
user=instance.requester,
group=instance.group
)
class GroupPost(models.Model):
creator = models.ForeignKey(User, related_name='(app_label)s_%(class)s_set_post')
group = models.ForeignKey(SocialGroup, related_name='(app_label)s_%(class)s_set_post')
comment = models.TextField()
class Meta:
abstract = True
class GroupComment(GroupPost):
class Meta:
app_label = 'social_network'
@receiver(post_save, sender=GroupComment, dispatch_uid='post_save_group_comment')
def post_save_group_comment(sender, instance, created, **kwargs):
if created:
social_group_comment_created.send(sender=GroupComment, user=instance.creator, instance=instance)
@receiver(social_group_comment_created, sender=GroupComment)
def social_network_group_comment(instance, user, **kwargs):
from notifications import create_event
create_event(user, group_comment_event_type(), instance, _(u'A comment has been posted in a group'))
class GroupSharedLink(GroupPost):
url = models.URLField()
class Meta:
app_label = 'social_network'
@receiver(post_save, sender=GroupSharedLink, dispatch_uid='post_save_group_shared_link')
def post_save_group_shared_link(sender, instance, created, **kwargs):
if created:
social_group_shared_link_created.send(sender=GroupSharedLink, user=instance.creator, instance=instance)
@receiver(social_group_shared_link_created, sender=GroupSharedLink)
def social_network_group_shared_link(instance, user, **kwargs):
from notifications import create_event
create_event(user, group_shared_link_event_type(), instance, _(u'A link has been shared in a group'))
class GroupImage(GroupPost):
def images_upload(self, filename):
salt, hash = generate_sha1(self.id)
return 'site-%s/groups_images/%s/%s/%s/%s' % (
settings.SITE_ID, '%s_%s' % (self._meta.app_label, self._meta.object_name.lower()), self.creator.pk, hash,
filename)
image = models.ImageField(verbose_name=_(u'Image'), upload_to=images_upload, null=True, blank=True, max_length=500)
class Meta:
app_label = 'social_network'
@receiver(post_save, sender=GroupImage, dispatch_uid='post_save_group_image')
def post_save_group_image(sender, instance, created, **kwargs):
if created:
social_group_photo_created.send(sender=GroupImage, user=instance.creator, instance=instance)
@receiver(social_group_photo_created, sender=GroupImage)
def social_network_group_photo(instance, user, **kwargs):
from notifications import create_event
create_event(user, group_photo_event_type(), instance, _(u'A photo has been posted in a group'))
class GroupFeedItem(models.Model):
group = models.ForeignKey(SocialGroup)
event = models.ForeignKey(Event)
template_config = models.ForeignKey(NotificationTemplateConfig)
site = models.ForeignKey(Site)
objects = models.Manager()
on_site = CurrentSiteManager()
class Meta:
app_label = 'social_network'
def __init__(self, *args, **kwargs):
super(GroupFeedItem, self).__init__(*args, **kwargs)
if not self.pk and not self.site_id:
self.site_id = self.event.site_id or Site.objects.get_current().pk
class FeedComment(models.Model):
creator = models.ForeignKey(User, related_name='feed_comments')
receiver = models.ForeignKey(User, related_name='feed_received_comments')
comment = models.TextField()
class Meta:
app_label = 'social_network'
@receiver(post_save, sender=FeedComment, dispatch_uid='post_save_feed_comment')
def post_save_feed_comment(sender, instance, created, **kwargs):
if created:
feed_comment_created.send(sender=FeedComment, user=instance.creator, instance=instance)
|
diana-gv/django-social-network
|
social_network/urls.py
|
<reponame>diana-gv/django-social-network
# coding=utf-8
from django.conf.urls import patterns, url
from django.contrib.auth.decorators import login_required
import views
urlpatterns = patterns(
'',
url(
r'^group/all/$',
views.SocialGroupListView.as_view(),
name='social_groups'
),
url(
r'^group/create/$',
login_required(views.BaseSocialGroupCreateView.as_view()),
name='social_group_create'
),
url(
r'^group/create/ajax/$',
login_required(views.SocialGroupCreateView.as_view()),
name='social_group_create_ajax'
),
url(
r'^group/(?P<pk>\d+)/$',
views.SocialGroupDetailView.as_view(),
name='social_group_details'
),
url(
r'^group/(?P<group>\d+)/members/$',
views.SocialGroupMembersList.as_view(),
name='social_group_members_list'
),
url(
r'^group/(?P<group>\d+)/edit/$',
login_required(views.BaseSocialGroupUpdateView.as_view()),
name='social_group_edit'
),
url(
r'^group/(?P<group>\d+)/edit/ajax/$',
login_required(views.SocialGroupUpdateView.as_view()),
name='social_group_edit_ajax'
),
url(
r'^group/list/(?P<user>\d+)/$',
views.SocialGroupUserList.as_view(),
name='social_group_user_list'
),
url(
r'^group/(?P<group>\d+)/comment/$',
login_required(views.BaseGroupCommentCreateView.as_view()),
name='social_group_comment_create'
),
url(
r'^group/(?P<group>\d+)/comment/ajax/$',
login_required(views.GroupCommentCreateView.as_view()),
name='social_group_comment_create_ajax'
),
url(
r'^group/(?P<group>\d+)/link/$',
login_required(views.BaseGroupLinkCreateView.as_view()),
name='social_group_link_create'
),
url(
r'^group/(?P<group>\d+)/link/ajax/$',
login_required(views.GroupLinkCreateView.as_view()),
name='social_group_link_create_ajax'
),
url(
r'^group/(?P<group>\d+)/photo/$',
login_required(views.BaseGroupPhotoCreateView.as_view()),
name='social_group_photo_create'
),
url(
r'^group/(?P<group>\d+)/photo/ajax/$',
login_required(views.GroupPhotoCreateView.as_view()),
name='social_group_photo_create_ajax'
),
url(
r'^group/(?P<group>\d+)/feed/$',
views.SocialGroupFeedView.as_view(),
name='social_group_feed'
),
url(
r'^group/(?P<group>\d+)/request_membership/$',
login_required(views.BaseSocialGroupRequestCreateView.as_view()),
name='social_group_request_create'
),
url(
r'^group/(?P<group>\d+)/request_membership/ajax/$',
login_required(views.SocialGroupRequestCreateView.as_view()),
name='social_group_request_create_ajax'
),
url(
r'^group/(?P<group>\d+)/requests/$',
login_required(views.SocialGroupMembershipRequestsList.as_view()),
name='social_group_request_list'
),
url(
r'^group/requests/(?P<pk>\d+)/accept/$',
login_required(views.SocialGroupRequestAcceptView.as_view()),
name='social_group_request_accept'
),
url(
r'^group/requests/(?P<pk>\d+)/deny/$',
login_required(views.SocialGroupRequestDenyView.as_view()),
name='social_group_request_deny'
),
url(
r'^group/(?P<group>\d+)/join/$',
login_required(views.SocialGroupJoinView.as_view()),
name='social_group_join'
),
url(
r'^group/(?P<group>\d+)/buttons/$',
login_required(views.MembershipButtonsTemplateView.as_view()),
name='social_group_membership_buttons'
),
url(
r'^user/(?P<receiver>\d+)/comment/$',
login_required(views.BaseFeedCommentCreateView.as_view()),
name="profile_comment_create"
),
url(
r'^user/(?P<receiver>\d+)/comment/ajax/$',
login_required(views.FeedCommentCreateView.as_view()),
name="profile_comment_create_ajax"
),
url(
r'^user/toggle_follow/$',
login_required(views.FollowerRelationshipToggleView.as_view()),
name="toggle_follower_relationship"
),
url(
r'^user/(?P<followed>\d+)/follow/$',
login_required(views.FollowerRelationshipCreateView.as_view()),
name="follower_relationship_create"
),
url(
r'^user/(?P<followed>\d+)/stop_following/$',
login_required(views.FollowerRelationshipDestroyView.as_view()),
name="follower_relationship_destroy"
),
url(
r'^user/(?P<receiver>\d+)/request_friendship/$',
login_required(views.BaseFriendRequestCreateView.as_view()),
name='friend_request_create'
),
url(
r'^user/(?P<receiver>\d+)/request_friendship/ajax/$',
login_required(views.FriendRequestCreateView.as_view()),
name='friend_request_create_ajax'
),
url(
r'^user/(?P<receiver>\d+)/friend_requests/$',
login_required(views.FriendRequestListView.as_view()),
name='friend_request_list'
),
url(
r'^user/friend_requests/(?P<pk>\d+)/accept/$',
login_required(views.AcceptFriendRequestView.as_view()),
name='friend_request_accept'
),
url(
r'^user/friend_requests/(?P<pk>\d+)/deny/$',
login_required(views.DenyFriendRequestView.as_view()),
name='friend_request_deny'
),
url(
r'^user/(?P<profile>\d+)/buttons/$',
login_required(views.FriendshipButtonsTemplateView.as_view()),
name='friendship_buttons'
),
)
|
rolfengelhard/iq-success-metrics2
|
releasefiles/reports2/create-components-quarantined.py
|
import json
import requests
import os
import os.path
import sys
iqurl = sys.argv[1]
iquser = sys.argv[2]
iqpwd = sys.argv[3]
jsonfile = 'componentsinquarantine.json'
csvfile = 'componentsinquarantine.csv'
def get_metrics():
req = requests.get('{}/api/v2/reports/components/quarantined'.format(iqurl), auth=(iquser, iqpwd), verify=False)
if req.status_code == 200:
res = req.json()
else:
res = "Error fetching data"
return res
def writeToCsvFile(componentsQuarantined):
components = componentsQuarantined['componentsInQuarantine']
with open(csvfile, 'w') as fd:
fd.write("Repository,Format,PackageUrl,QuarantineTime,PolicyName,ThreatLevel\n")
for component in components:
repository = component["repository"]["publicId"]
format = component["repository"]["format"]
componentsList = component["components"]
for comp in componentsList:
packageUrl = comp["component"]["packageUrl"]
quarantineTime = comp["component"]["quarantineTime"]
if "policyViolations" in comp:
policyViolations = comp["policyViolations"]
for policyViolation in policyViolations:
policyName = policyViolation["policyName"]
threatLevel = policyViolation["threatLevel"]
line = repository + "," + format + "," + packageUrl + "," + quarantineTime + "," + policyName + "," + str(threatLevel) + "\n"
fd.write(line)
return
def main():
componentsQuarantined = get_metrics()
with open(jsonfile, 'w') as fd:
json.dump(componentsQuarantined, fd)
print(jsonfile)
writeToCsvFile(componentsQuarantined)
print(csvfile)
if __name__ == '__main__':
main()
|
rolfengelhard/iq-success-metrics2
|
releasefiles/reports2/create-policy-violations-data.py
|
import json
import requests
import os
import os.path
import sys
iqurl = sys.argv[1]
iquser = sys.argv[2]
iqpwd = sys.argv[3]
jsonfile = 'policyviolations.json'
csvfile = 'policyviolations.csv'
def getNexusIqData(api):
url = "{}{}" . format(iqurl, api)
req = requests.get(url, auth=(iquser, iqpwd), verify=False)
if req.status_code == 200:
res = req.json()
else:
res = "Error fetching data"
return res
def getCVEValue(d):
cve = "none"
if type(d) is dict:
cve = d["value"]
return(cve)
def getPolicyIds(data):
policyIds = ""
policies = data['policies']
for policy in policies:
name = policy["name"]
id = policy["id"]
if name == "Security-Critical" or name == "Security-High" or name == "Security-Medium" or name == "Security-Malicious" or name == "License-Banned" or name == "License-None" or name == "License-Copyleft":
policyIds += "p=" + id + "&"
result = policyIds.rstrip('&')
return result
def writeToCsvFile(policyViolations):
applicationViolations = policyViolations['applicationViolations']
with open(csvfile, 'w') as fd:
fd.write("PolicyName,CVE,ApplicationName,OpenTime,Component,Stage\n")
for applicationViolation in applicationViolations:
applicationPublicId = applicationViolation["application"]["publicId"]
policyViolations = applicationViolation["policyViolations"]
for policyViolation in policyViolations:
stage = policyViolation["stageId"]
openTime = policyViolation["openTime"]
policyName = policyViolation["policyName"]
packageUrl = policyViolation["component"]["packageUrl"]
constraintViolations = policyViolation["constraintViolations"]
for constraintViolation in constraintViolations:
values = ""
reasons = constraintViolation["reasons"]
for reason in reasons:
v = getCVEValue(reason["reference"])
values += v+":"
values = values[:-1]
line = policyName + "," + values + "," + applicationPublicId + "," + openTime + "," + packageUrl + "," + stage + "\n"
fd.write(line)
return
def main():
policies = getNexusIqData('/api/v2/policies')
policyIds = getPolicyIds(policies)
policyViolations = getNexusIqData("/api/v2/policyViolations?" + policyIds)
with open(jsonfile, 'w') as fd:
json.dump(policyViolations, fd)
print(jsonfile)
writeToCsvFile(policyViolations)
print(csvfile)
if __name__ == '__main__':
main()
|
rolfengelhard/iq-success-metrics2
|
releasefiles/reports2/create-waiver-data.py
|
<filename>releasefiles/reports2/create-waiver-data.py<gh_stars>0
import json
import requests
import os
import os.path
import sys
iqurl = sys.argv[1]
iquser = sys.argv[2]
iqpwd = sys.argv[3]
jsonfile = 'componentwaivers.json'
csvfile = 'componentwaivers.csv'
def get_metrics():
req = requests.get('{}/api/v2/reports/components/waivers'.format(iqurl), auth=(iquser, iqpwd), verify=False)
if req.status_code == 200:
res = req.json()
else:
res = "Error fetching data"
return res
def writeToCsvFile(componentWaivers):
waiverList = []
applicationWaivers = componentWaivers['applicationWaivers']
repositoryWaivers = componentWaivers['repositoryWaivers']
with open(csvfile, 'w') as fd:
fd.write("ApplicationName,Stage,PackageUrl,PolicyName,ThreatLevel,Comment,CreateDate,ExpiryTime\n")
for waiver in applicationWaivers:
applicationName = waiver['application']['publicId']
stages = waiver['stages']
for stage in stages:
stageId = stage['stageId']
componentPolicyViolations = stage['componentPolicyViolations']
for componentPolicyViolation in componentPolicyViolations:
packageUrl = componentPolicyViolation["component"]["packageUrl"]
waivedPolicyViolations = componentPolicyViolation['waivedPolicyViolations']
for waivedPolicyViolation in waivedPolicyViolations:
policyName = waivedPolicyViolation['policyName']
threatLevel = waivedPolicyViolation['threatLevel']
comment = waivedPolicyViolation['policyWaiver']['comment']
createDate = waivedPolicyViolation['policyWaiver']['createTime']
expiryTime = ""
if "\n" in comment:
comment = comment.replace("\n", "-")
if "," in comment:
comment = comment.replace(",", "|")
line = applicationName + "," + stageId + "," + packageUrl + "," + policyName + "," + str(threatLevel) + "," + comment + "," + createDate + "," + expiryTime + "\n"
fd.write(line)
fd.close()
with open(csvfile, 'a') as fd:
for waiver in repositoryWaivers:
name = waiver['repository']['publicId']
stages = waiver['stages']
for stage in stages:
stageId = stage['stageId']
componentPolicyViolations = stage['componentPolicyViolations']
for componentPolicyViolation in componentPolicyViolations:
packageUrl = componentPolicyViolation["component"]["packageUrl"]
waivedPolicyViolations = componentPolicyViolation['waivedPolicyViolations']
for waivedPolicyViolation in waivedPolicyViolations:
policyName = waivedPolicyViolation['policyName']
threatLevel = waivedPolicyViolation['threatLevel']
comment = waivedPolicyViolation['policyWaiver']['comment']
createDate = waivedPolicyViolation['policyWaiver']['createTime']
line = name + "," + stageId + "," + packageUrl + "," + policyName + "," + str(threatLevel) + "," + comment + "," + createDate + ",\n"
fd.write(line)
return
def main():
componentWaivers = get_metrics()
with open(jsonfile, 'w') as fd:
json.dump(componentWaivers, fd, indent=4)
print(jsonfile)
writeToCsvFile(componentWaivers)
print(csvfile)
if __name__ == '__main__':
main()
|
rolfengelhard/iq-success-metrics2
|
releasefiles/reports2/create-application-evaluations-data.py
|
<gh_stars>1-10
import json
import requests
import os
import os.path
import sys
iqurl = sys.argv[1]
iquser = sys.argv[2]
iqpwd = sys.argv[3]
jsonfile = 'applicationevaluations.json'
csvfile = 'applicationevaluations.csv'
def get_metrics():
req = requests.get('{}/api/v2/reports/applications'.format(iqurl), auth=(iquser, iqpwd), verify=False)
if req.status_code == 200:
res = req.json()
else:
res = "Error fetching data"
return res
def getApplicationName(urlPath):
l = urlPath.split('/')
return(l[3])
def writeToCsvFile(applicationEvaluations):
with open(csvfile, 'w') as fd:
fd.write("ApplicationName,EvaluationDate,Stage\n")
for applicationEvaluation in applicationEvaluations:
stage = applicationEvaluation["stage"]
evaluationDate = applicationEvaluation["evaluationDate"]
applicationId = applicationEvaluation["applicationId"]
applicationName = getApplicationName(applicationEvaluation["reportDataUrl"])
line = applicationName + "," + evaluationDate + "," + stage + "\n"
fd.write(line)
return
def main():
applicationEvaluations = get_metrics()
with open(jsonfile, 'w') as fd:
json.dump(applicationEvaluations, fd)
print(jsonfile)
writeToCsvFile(applicationEvaluations)
print(csvfile)
if __name__ == '__main__':
main()
|
junkainiu/codewars
|
leo/python/lv4/sum_of_factors.py
|
<reponame>junkainiu/codewars<filename>leo/python/lv4/sum_of_factors.py
def sum_for_list(lst):
result = []
an = []
for index,elem in enumerate(lst):
i = 2
while abs(elem) > 1:
while elem%i == 0:
elem = elem/i
if not [i,index] in result:
result.append([i,index])
i += 1
keys = sorted(list(set([a[0] for a in result])))
for elem in keys:
r = 0
for value in result:
if value[0] == elem:
r += lst[value[1]]
an.append([elem,r])
return an
sum_for_list([15, 21, 24, 30, -45])
|
junkainiu/codewars
|
leo/python/lv4/explosiv_sum.py
|
def exp_sum(n,n):
if n == 1:
return 1
if start > n:
return exp_sum(n,n)
sum = 0
for i in xrange(1,start):
sum += exp_sum(n-i, i)
print sum
return sum
exp_sum(3,1)
|
junkainiu/codewars
|
leo/python/lv3/chemstry.py
|
import re
from collections import defaultdict
atom_pattern = re.compile(r'[A-Z]')
sub_atom_pattern = re.compile(r'[a-z]')
num_pattern = re.compile(r'\d')
br_start_pattern = re.compile(r'\[|\{|\(')
br_end_pattern = re.compile(r'\]|\}|\)')
def get_num(formula, i):
r = re.match(r'\d+', formula[i:])
len = r.span()[1] - r.span()[0]
result = int(r.group())
return result,len
def calculate_br_pos(formula):
result = []
pair_pattern = re.compile(r'\[\w+\]|\{\w+\}|\(\w+\)')
while pair_pattern.search(formula):
br_pos = pair_pattern.search(formula).span()
br_len = br_pos[1] - br_pos[0]
result.append(br_pos)
formula = pair_pattern.sub('a'*br_len, formula, 1)
return result
def parse_molecule(formula):
i = 0
result = defaultdict(dict)
br_position = calculate_br_pos(formula)
while i<len(formula):
try:
if atom_pattern.match(formula[i]):
name = formula[i]
if sub_atom_pattern.match(formula[i+1]):
name = formula[i] + formula[i+1]
if num_pattern.match(formula[i+2]):
num,num_len = get_num(formula, i+2)
result[name].update({i+2 : num})
i = i+2+num_len
else:
result[name].update({i+1 : 1})
i += 2
elif num_pattern.match(formula[i+1]):
num,num_len = get_num(formula, i+1)
result[name].update({i+1 : num})
i = i + 1 + num_len
else:
result[name].update({i : 1})
i += 1
elif br_start_pattern.match(formula[i]):
i += 1
elif br_end_pattern.match(formula[i]):
if num_pattern.match(formula[i+1]):
num,num_len = get_num(formula, i+1)
result = add_br_end_num(result, i+1, num, br_position)
i += 1 + num_len
else:
result = add_br_end_num(result, i+1, 1, br_position)
i += 1
else:
i += 1
except IndexError:
if num_pattern.match(formula[len(formula)-1]):
if atom_pattern.match(formula[i]):
num,num_len = get_num(formula, i+1)
result[name].update({i+1, num})
i = i + 1 + num_len
elif br_end_pattern.match(formula[i]):
num,num_len = get_num(formula, i+1)
result = add_br_end_num(result, i+1, num, br_position)
i += 1 + num_len
else:
i += 1
elif br_end_pattern.match(formula[len(formula)-1]):
if atom_pattern.match(formula[i]):
result[name].update({i : 1})
i += 1
else:
i += 1
elif sub_atom_pattern.match(formula[len(formula)-1]):
result[name].update({i+1 : 1})
i += 2
elif atom_pattern.match(formula[len(formula)-1]):
result[name].update({i : 1})
i += 1
continue
result = calculate_result(result)
print result
return result
def add_br_end_num(result, i, v, br_position):
for elem in br_position:
if elem[1] == i:
for value in result.itervalues():
for key in value.keys():
if key > elem[0] and key < elem[1]:
value[key] = int(value[key]) * int(v)
return result
def calculate_result(result):
result = {key:sum([int(v) for v in value.itervalues()]) for key,value in result.iteritems()}
return result
|
junkainiu/codewars
|
leo/python/lv4/sum_of_money.py
|
def count_change(money, coins):
# your implementation here
coins.sort()
result = 0
cache = [coins[0]]
for i,coin in enumerate(coins):
while sum(cache) < money:
if sum(cache) == money:
result += 1
cache.append(coin)
|
junkainiu/codewars
|
leo/python/lv3/fib.py
|
def mul(a, b):
r = [[0, 0], [0, 0]]
r[0][0] = a[0][0] * b[0][0] + a[0][1] * b[1][0];
r[0][1] = a[0][0] * b[0][1] + a[0][1] * b[1][1];
r[1][0] = a[1][0] * b[0][0] + a[1][1] * b[1][0];
r[1][1] = a[1][0] * b[0][1] + a[1][1] * b[1][1];
return r;
def _fib(n):
if n == 0:
return [[1, 0], [0, 1]]
if n == 1:
return [[1, 1], [1, 0]]
if n % 2 == 0 :
return mul(_fib((n / 2)), _fib((n / 2)))
else:
return mul(mul(_fib((n-1) / 2), _fib((n-1) / 2)), [[1, 1], [1, 0]])
def fib(n):
if n<0:
return _fib(-n)[1][0] * (1 if n%2 == 1 else -1)
else:
return _fib(n)[1][0]
|
svcvit/Live-data-generator
|
src/ws/app.py
|
<reponame>svcvit/Live-data-generator
"""
ws tool for excel and pdf
"""
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
import datetime
import time
import pandas as pd
from .tool import tools
def get_localtime():
now = datetime.datetime.now()
# offset = datetime.timedelta(hours=2)
start = now.strftime('%Y-%m-%d')+" 15:00:00"
end = now.strftime('%Y-%m-%d')+" 18:00:00"
return [start,end]
class WS(toga.App):
def startup(self):
"""
Construct and show the Toga application.
Usually, you would add your application to a main content box.
We then create a main window (with a name matching the app), and
show the main window.
"""
self.get_location()
self.tool = tools(self.resources)
main_box = toga.Box(style=Pack(padding=10))
container = toga.OptionContainer()
container_box1 = self.container_box1()
#添加container
container.add('观看数据生成器', container_box1)
main_box.add(container)
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = main_box
self.main_window.show()
def get_location(self):
self.resources = self.factory.paths.app / self.factory.paths.Path("resources/")
def generate_table(self):
pass
def container_box1(self):
label_style = Pack(flex=1)
style_row = Pack(direction=ROW, flex=1,padding=10)
style_column = Pack(direction=COLUMN)
style_flex = Pack(flex=1,
# font_family='monospace', font_size=14,height=20
)
self.now = get_localtime()
province_list = self.tool.get_province()
self.start_datetime = toga.MultilineTextInput(style=style_flex,initial=self.now[0])
self.end_datetime = toga.MultilineTextInput(style=style_flex,initial=self.now[1])
self.export_number = toga.NumberInput(style=style_flex, min_value=0, default=300)
self.city = toga.Selection(items=province_list,style = Pack(flex=1))
row1 = toga.Box(
style=style_row,
children=[
toga.Label("输入开始时间", style=label_style),
self.start_datetime]
)
row2 = toga.Box(
style=style_row,
children=[
toga.Label("输入结束时间", style=label_style),
self.end_datetime]
)
row3 = toga.Box(
style=style_row,
children=[
toga.Label("姓名隐私保护", style=label_style),
toga.Selection(items=["姓名加**","昵称"],style = Pack(flex=1))
]
)
row4 = toga.Box(
style=style_row,
children=[
toga.Label("观看直播主要城市", style=label_style),
self.city
]
)
row5 = toga.Box(
style=style_row,
children=[
toga.Label("大概生成数量", style=label_style),
self.export_number]
)
row6 = toga.Box(
style=style_row,
children=[
toga.Button('生成表格',style=style_flex,on_press=self.button_click_01),
]
)
box = toga.Box(
style=style_column,
children=[row1, row2,row4,row5,row6]
)
return box
# 按钮功能
#集采任务分配表
def button_click_01(self, widget):
province =self.city.value
start = str(self.start_datetime.value)
end = str(self.end_datetime.value)
count = int(self.export_number.value)
#获取输入数据
date_time = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())
fname = f'直播观看数据-{date_time}.xlsx'
output = self.tool.generate_dataset(province=province,count=count,rate=2/10,start=start,end=end)
try:
save_path = self.main_window.save_file_dialog(
"保存文件",
suggested_filename=fname)
if save_path is not None:
file_name_out = save_path
with pd.ExcelWriter(file_name_out, engine='xlsxwriter') as writer:
output.to_excel(writer, sheet_name='Sheet1',index=False)
formatObj = writer.book.add_format({'num_format': 'hh:mm:ss'})
writer.book.sheetnames['Sheet1'].set_column('A:A',14, formatObj)
writer.book.sheetnames['Sheet1'].set_column('B:D',20, formatObj)
writer.book.sheetnames['Sheet1'].set_column('E:E',12, formatObj)
writer.book.sheetnames['Sheet1'].set_column('G:H',20, formatObj)
self.main_window.info_dialog('提示', '数据导出成功')
else:
self.main_window.info_dialog('提示', '取消数据导出')
except ValueError:
self.main_window.info_dialog('提示', '取消数据导出')
def main():
return WS()
|
svcvit/Live-data-generator
|
src/ws/tool.py
|
import pandas as pd
from faker import Faker
import random
import os
import numpy as np
import datetime
import arrow
class tools():
def __init__(self,path):
self.path = str(path)
def get_province(self):
ip_data = pd.read_feather(self.path+'/ip_data.feather')
return list(ip_data['province'].unique())
def get_datetime(self,input_datetime = '2022-01-19 15:00:00',count=300):
mid_datetime = arrow.get(input_datetime).naive
s = list(np.random.randint(-10*60,10*60,size=count))
datatime_list = []
for item in s:
offset = datetime.timedelta(seconds=int(item))
time = mid_datetime + offset
datatime_list.append(time)
return datatime_list
def createRandomString(self,len):
result = []
for i in range (len):
raw = ""
range1 = range(58, 65) # between 0~9 and A~Z
range2 = range(91, 97) # between A~Z and a~z
i = 0
while i < 12:
seed = random.randint(48, 122)
if ((seed in range1) or (seed in range2)):
continue
raw += chr(seed)
i += 1
result.append(raw)
return result
def long2ip(self,long):
floor_list=[]
yushu=long
for i in reversed(range(4)): #3,2,1,0
res=divmod(yushu,256**i)
floor_list.append(str(res[0]))
yushu=res[1]
return '.'.join(floor_list)
def get_fakename(self,number=300):
result =[]
fake = Faker(['zh_CN'])
for _ in range(number):
result.append(fake.name())
return result
def get_nickname(self,number=300):
table = pd.read_excel(self.path+'/nickname.xlsx')
result = random.sample(list(table['nickname']), number)
return result
def get_ramdon_ip(self,ip=16777472):
offset = random.randint(1,254)
ip_address = ip+offset
return self.long2ip(ip_address)
def generate_dataset(self,province="上海市",count=300,rate=2/10,start='2022-01-19 15:00:00',end='2022-01-19 18:00:00'):
ip_data = pd.read_feather(self.path+'/ip_data.feather')
selected_ip = ip_data[ip_data['province']==province]
out_selected_ip = ip_data[ip_data['province']!=province]
if len(selected_ip) >= count:
#随机抽样
order = np.random.randint(0,len(selected_ip),size=count)
#通过随机抽样抽取DataFrame中的行
newDf = selected_ip.take(order)
else:
loop = int(count/len(selected_ip))
newDf = selected_ip
for i in range(loop):
newDf = pd.concat([newDf,selected_ip],sort=False)
out_numbner = int(count*rate)
order_out = np.random.randint(0,len(out_selected_ip),size=out_numbner)
newDf_out = out_selected_ip.take(order_out)
newDf = pd.concat([newDf,newDf_out],sort=False)
newDf['ip'] = newDf['ip_start_num'].apply(self.get_ramdon_ip)
result = newDf[['province','city','location','ip']]
ramdom_result = result.take(np.random.permutation(len(result))).reset_index(drop=True)
nickname = self.get_nickname(len(ramdom_result))
enter_time = self.get_datetime(start,len(ramdom_result))
out_time = self.get_datetime(end,len(ramdom_result))
id = self.createRandomString(len(ramdom_result))
df_nickname = pd.DataFrame({'id':id,'nickname':nickname,'enter_time':enter_time,'out_time':out_time}).reset_index(drop=True)
new = pd.concat([ramdom_result,df_nickname],axis=1, sort=False)
new['during_time'] = new['out_time']-new['enter_time']
new['during_time'] = new['during_time'].dt.seconds/(24*60*60)
new['enter_time'] = new['enter_time'].dt.tz_localize(None)
new['out_time'] = new['out_time'].dt.tz_localize(None)
output =new.rename(columns={"id":"ID", "nickname":"昵称", "enter_time":"进入时间",
"out_time":"退出时间","during_time":"在线时长","province":"省份","city":"城市","ip":"IP地址"
})
output=output[['ID','昵称','进入时间','退出时间','在线时长','省份','城市','IP地址']]
return output
|
adrian7123/django-gerenciador-de-funcionarios
|
myproject/urls.py
|
<reponame>adrian7123/django-gerenciador-de-funcionarios
"""my_blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from my_app import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home, name="home"),
path('funcionarios/', views.funcionarios, name="funcionarios"),
path('funcionario/cadastro', views.cadastroFuncionario, name="cadastroFunc"),
path('funcionario/cadastrar', views.cadastrarFuncionario, name="cadastrarFunc"),
path('funcionario/deletar/<int:id>', views.deletarFuncionario, name="deletarFunc"),
path('funcionario/editar/<int:id>', views.editarFuncionario, name="editarFunc"),
path('fufuncionario/editado/<int:id>', views.editado, name="editado" )
]
|
adrian7123/django-gerenciador-de-funcionarios
|
my_app/admin.py
|
<filename>my_app/admin.py
from django.contrib import admin
from my_app.models import Funcionarios
# Register your models here.
admin.site.register(Funcionarios)
|
adrian7123/django-gerenciador-de-funcionarios
|
my_app/views.py
|
from django.shortcuts import render, redirect
from .models import Funcionarios
# Create your views here.
def home(req):
return render(req, 'views/home.html')
def funcionarios(req):
func = Funcionarios.objects.all()
context = {
'req': req,
'func': func
}
return render(req, 'views/funcionarios/index.html',
context)
def cadastroFuncionario(req):
context = {
'req': req
}
return render(req,
'views/funcionarios/cadastrar.html',
context )
def cadastrarFuncionario(req):
newFunc = Funcionarios.objects.create(
nome = req.POST['nome'],
sobrenome = req.POST['sobrenome'],
cargo = req.POST['cargo'],
empresa = req.POST['empresa']
)
newFunc.save()
return redirect('funcionarios')
def deletarFuncionario(req, id):
existFunc = Funcionarios.objects.get(id=id)
existFunc.delete()
return redirect('funcionarios')
def editarFuncionario(req, id):
funcionario = Funcionarios.objects.get(id=id)
context = {
'req': req,
'func': funcionario
}
return render(req, 'views/funcionarios/editar.html', context)
def editado(req, id):
func = Funcionarios.objects.get(id=id)
func.nome = req.POST['nome']
func.sobrenome = req.POST['sobrenome']
func.empresa =req.POST['empresa']
func.cargo = req.POST['cargo']
func.save()
return redirect('funcionarios')
|
adrian7123/django-gerenciador-de-funcionarios
|
my_app/models.py
|
from django.db import models
# Create your models here.
class Funcionarios(models.Model):
nome = models.CharField(max_length=30)
sobrenome = models.CharField(max_length=30)
cargo = models.CharField(max_length=30)
empresa = models.CharField(max_length=30)
def __str__(self):
return self.nome
|
pplewka/zwo
|
src/db.py
|
<filename>src/db.py
import sqlite3
from parser import Document
from typing import Sequence, Callable, TypeVar
DB_NAME = "nyt.sqlite"
STATEMENT_CACHE = 100000
STATS_FUNCS = dict()
DBConnection = sqlite3.Connection
T = TypeVar('T')
def chunks(seq: Sequence[T], n: int = 1000) -> Sequence[Sequence[T]]:
# we batch 1000 sql commands instead of 10. 10 was extremely slow
"""Divide an iterable into chunks of size n"""
for i in range(0, len(seq), n):
yield seq[i:i + n]
def compute_statistics(connection: DBConnection) -> None:
"""Compute the dfs, dsl, d statistics and write them to the db"""
for f in STATS_FUNCS.values():
f(connection)
def create_db(db_name: str = DB_NAME) -> DBConnection:
"""Creates a new database with given name. Only the empty tables docs and tfs will be present after this."""
connection = open_db(db_name)
connection.execute("""
CREATE TABLE docs
(did INTEGER PRIMARY KEY,
title TEXT NOT NULL,
url TEXT NOT NULL)
""")
connection.execute("""
CREATE TABLE tfs
(did INTEGER,
term TEXT NOT NULL,
tf INTEGER)
""")
connection.execute("""
CREATE TABLE boost
(did INTEGER,
date INTEGER,
page INTEGER
)""")
print(f"[+] Created db {DB_NAME}")
return connection
def open_db(db_name: str = DB_NAME) -> DBConnection:
"""Opens the database with given name"""
return sqlite3.connect(db_name, cached_statements=STATEMENT_CACHE)
def insert_documents(connection: DBConnection, documents: Sequence[Document]) -> None:
"""Inserts all documents into the docs table"""
max_ = len(documents)
current = 0
print() # print an extra line, because we will delete lines with printing \r
for chunk in chunks(documents):
connection.execute("BEGIN TRANSACTION")
for doc in chunk:
# python doesn't support prepared statements, but instead has a builtin sql cache
connection.execute(
"INSERT INTO docs(did, title, url) VALUES (?, ?, ?)", doc.convert_to_tuple())
current += 1
print(f"\r[{current}/{max_}] doc done", end='')
connection.execute("COMMIT")
def insert_boost(connection: DBConnection, documents: Sequence[Document]) -> None:
"""Inserts all values into the boost table"""
max_ = len(documents)
current = 0
print() # print an extra line, because we will delete lines with printing \r
for chunk in chunks(documents):
connection.execute("BEGIN TRANSACTION")
for doc in chunk:
connection.execute(
"INSERT INTO boost(did, date, page) VALUES (?, ?, ?)", (doc.id, doc.date, doc.page))
connection.execute("COMMIT")
current += len(chunk)
print(f"\r[{current}/{max_}] boost done", end='')
print()
def insert_tfs(connection: DBConnection, documents: Sequence[Document]) -> None:
"""Inserts all term frequencies into the tfs table"""
max_ = len(documents)
current = 0
print() # print an extra line, because we will delete lines with printing \r
for chunk in chunks(documents):
rows = (d.get_tfs_rows() for d in chunk)
connection.execute("BEGIN TRANSACTION")
for row in rows:
connection.executemany(
"INSERT INTO tfs(did, term, tf) VALUES (?, ?, ?)", row)
connection.execute("COMMIT")
current += len(chunk)
print(f"\r[{current}/{max_}] doc-tfs done", end='')
print()
def get_headline(connection: DBConnection, did: int):
"""Retrieves the headline of a article in the db"""
return connection.execute("SELECT title FROM docs WHERE did=:did", (did,)).fetchone()[0]
def get_url(connection: DBConnection, did: int):
"""Retrieves the headline of a article in the db"""
return connection.execute("SELECT url FROM docs WHERE did=:did", (did,)).fetchone()[0]
def get_max_page(connection: DBConnection) -> int:
"""Retrieves the maximum page of a article in the db"""
return connection.execute("SELECT max_page FROM max_page").fetchone()[0]
def collection_statistic(func: Callable) -> Callable:
"""Decorator Function to mark a function as computing statistics."""
STATS_FUNCS[func.__name__] = func
return func
@collection_statistic
def create_and_insert_dls(connection: DBConnection) -> None:
"""Creates and fills the table dls with document ids and the length of the document"""
print("\n[-] creating table dls", end="")
connection.execute("""
CREATE TABLE dls AS
SELECT did, SUM(tf) AS len FROM tfs GROUP BY did
""")
print("\r[+] creating table dls")
@collection_statistic
def create_and_insert_dfs(connection: DBConnection) -> None:
"""Creates and fills the table dfs with terms and their document frequencies"""
print("\n[-] creating table dfs", end="")
connection.execute("""
CREATE TABLE dfs AS
SELECT term, COUNT(tf) AS df FROM tfs GROUP BY term
""")
print("\r[+] creating table dfs")
@collection_statistic
def create_and_insert_d(connection: DBConnection) -> None:
"""Create and fills the table d with the total number of documents in the collection"""
print("\n[-] creating table d", end="")
connection.execute("""
CREATE TABLE d AS
SELECT COUNT(DISTINCT did) AS size FROM tfs
""")
print("\r[+] creating table d")
@collection_statistic
def create_and_insert_max_page(connection: DBConnection) -> None:
"""Create and fills the table max_page with the maximum page number in the collection"""
print("\n[-] creating table max_page", end="")
connection.execute("""
CREATE TABLE max_page AS
SELECT MAX(page) AS max_page from boost""")
print("\r[+] creating table max_page")
|
pplewka/zwo
|
src/parser.py
|
<filename>src/parser.py
import re
import xml.etree.ElementTree as XML
from collections import Counter
from dataclasses import dataclass
from typing import Sequence, Union, Iterable, Tuple, List
from pathlib import Path
import constants
@dataclass
class Document:
"""Definition of a document"""
id: int
title: str
url: str
abstract: str
content: Sequence[str]
content_counter: Counter
title_counter: Counter
abstract_counter: Counter
date: int
page: int
def __init__(self, id: int, title: str, url: str, content: Sequence[str], abstract: str, date: str, page: int):
self.id = int(id)
self.title = title
self.url = url
self.abstract = abstract
self.content = content
# count term frequencies on initialization, saving us a lot of time later.
self.content_counter = Counter(self.content)
self.title_counter = Counter(Parser.tokenize([self.title]))
self.abstract_counter = Counter(Parser.tokenize([self.abstract]))
self.date = int(date.replace("T", ""))
self.page = int(page)
def __repr__(self):
from pprint import pformat
return pformat(vars(self), indent=2, compact=True)
def convert_to_tuple(self) -> Tuple:
"""Converts the document into a tuple, ready to be inserted into the docs table"""
return self.id, self.title, self.url
def get_tfs_rows(self) -> Iterable:
"""Returns all rows for the tfs table of this document"""
for term in self.content_counter.keys():
yield (self.id, term, self.content_counter[term] * constants.TUNABLE_WEIGHT_CONTENT +
self.title_counter[term] * constants.TUNABLE_WEIGHT_TITLE + self.abstract_counter[
term] * constants.TUNABLE_WEIGHT_ABSTRACT)
class Parser:
"""Parser class taking an XML file and turning it into a Document object"""
# The regex consists of two parts. With [^a-zA-Z0-9 ]+ we match all the characters that we want to remove,
# such as full stops, non - alphanumeric characters etc. Next, we use a negative lookbehind to find acronyms and
# exclude them from the matches. An acronym as consisting at least two upper or lowercase characters, each followed
# by a dot, followed by a whitespace.
__TOKENIZE_REGEX = r'[^a-zA-Z0-9 ]+(?<!( |\.)[a-zA-Z]\.)'
__COMPILED_REGEX = re.compile(__TOKENIZE_REGEX)
@staticmethod
def parse(path: Path) -> Document:
"""Takes a XML Document and parses it to a Document. Performs tokenization for the content as well"""
with path.open('r') as input_:
prospective_doc = _nytcorpus_to_document(XML.parse(input_))
content = Parser.tokenize(prospective_doc[3])
return Document(prospective_doc[0], prospective_doc[1], prospective_doc[2], content, prospective_doc[4],
prospective_doc[5], prospective_doc[6])
@staticmethod
def tokenize(content: List[str]) -> List[str]:
# Begin tokenizing
# Replace non alphanumeric while keeping abbreviations with whitespace.
# Uses a "negative" regex, matching only the things that need to be removed,
# instead of finding the things to keep.
cleaned = (re.sub(Parser.__COMPILED_REGEX, " ", s) for s in content)
# Lowercase and split at whitespace.
return ' '.join([par.lower() for par in cleaned]).split()
def _nytcorpus_to_document(root: Union[XML.Element, XML.ElementTree]) -> Tuple[int, str, str, List[str], str, str, int]:
""" Simple XML parsing function that extracts our Document object from a given news article.
The content field of the returned document will not be tokenized.
They are still a Sequence of strings, each string representing a new paragraph.
"""
from sys import stderr
head = root.find("./head")
body = root.find("./body")
docdata = head.find("./docdata")
pubdata = head.find("./pubdata")
id_ = "-1" # fallback value
date = ""
page = -1
try:
id_ = docdata.find("./doc-id").get('id-string')
title = head.find("./title")
if title is None:
print("Document {} had no title.".format(id_), file=stderr)
title = "NO TITLE FOUND"
else:
title = title.text
abstract = body.find("./body.head/abstract/p")
if abstract is None:
abstract = ""
else:
abstract = abstract.text
if abstract is None:
abstract = ""
url = pubdata.get('ex-ref')
date = pubdata.get("date.publication")
try:
page = int(head.find("./meta[@name='print_page_number']").get("content"))
except AttributeError:
page = 100
# Already cleans out all the HTML Elements
content = [*[par.text for par in body.findall(
"./body.content/*[@class='full_text']/p")]] # , *
# [par.text for par in body.findall(
# "./body.content/*[@class='lead_paragraph']/p")]]
except AttributeError as attr:
# We can't do much if finding a url or the content fails.
print("Attribute error for document ID: " + id_, file=stderr)
print(attr, file=stderr)
return int(id_), "Error", "Error", [], "Error", "", -1
return int(id_), title, url, content, abstract, date, page
|
pplewka/zwo
|
src/query_processing.py
|
import sqlite3
import heapq
from datetime import datetime
from math import log
from typing import List
from dataclasses import dataclass, field
from constants import ADDITIVE_CONSTANT_DATE_BOOST, TUNABLE_WEIGHT_DATE, TUNABLE_WEIGHT_PAGE
from db import get_max_page
from parser import Parser
from posting_list import InvertedIndex
DBConnection = sqlite3.Connection
@dataclass(order=True)
class Accumulator:
did: int = field(compare=False)
score: float
def __eq__(self, other):
return self.did == other.did
def __add__(self, other): # Overload "+" Operator
if self == other:
return Accumulator(self.did, self.score + other.score)
else:
raise ValueError
def __iadd__(self, other): # Overload "+=" assignment
if isinstance(other, Accumulator):
self.score += other.score
return self
else:
self.score += other
return self
class QueryProcessor:
def __init__(self, connection: DBConnection):
self.index = InvertedIndex(connection)
self.collection_size = self.index.getSize()
self.max_page = get_max_page(connection)
self.first_day = datetime(2000, 1, 1)
self.max_days = 366
def process(self, query: str, k: int = -1) -> List[Accumulator]:
"""Process a query string and return the weighted results.
@param: query - the query string
@param: k - number of top k results to return, if empty, default of -1 is used, indicating all results.
"""
terms = Parser.tokenize([query])
results = dict()
# print(f'Processing terms: {terms}')
for t in terms:
try:
df = self.index.getDF(t)
except TypeError:
continue
plist = self.index.getIndexList(t)
term_specific_constant = log(self.collection_size / df)
for posting in plist:
boost_value = (1 - TUNABLE_WEIGHT_PAGE * (posting.page / self.max_page)) * self.get_date_boost(
posting.date)
acc = self.score(posting.did, posting.tf, term_specific_constant, boost_value)
try: # Try to sum up the values
results[posting.did] += acc
except KeyError: # No posting for this did has been seen yet.
results[posting.did] = acc
if k == -1:
return sorted(results.values(), reverse=True)
return heapq.nlargest(k, results.values())
def get_date_boost(self, date: int) -> float:
str_date = str(date)
year = int(str_date[0:4])
month = int(str_date[4:6])
day = int(str_date[6:8])
date_obj = datetime(year, month, day)
diff = date_obj - self.first_day
return ((diff.days / self.max_days) + ADDITIVE_CONSTANT_DATE_BOOST) * TUNABLE_WEIGHT_DATE
@staticmethod
def score(did: int, tf: int, term_specific_constant: float, boost_value: float):
return Accumulator(did=did, score=tf * term_specific_constant * boost_value)
|
pplewka/zwo
|
src/posting_list.py
|
<filename>src/posting_list.py
import sqlite3
import heapq
from dataclasses import dataclass, field
from typing import List
DBConnection = sqlite3.Connection
@dataclass(order=True, frozen=True)
class Posting:
"""Definition of a single posting"""
did: int
tf: int = field(compare=False)
page: int = field(compare=False)
date: int = field(compare=False)
def __repr__(self):
return f'{self.did}|{self.tf}'
class InvertedIndex:
connection: DBConnection
def __init__(self, connection: DBConnection):
self.connection = connection
def getIndexList(self, term: str) -> List[Posting]:
h = []
for did, tf in self.connection.execute("SELECT did, tf FROM tfs where term = ?", (term,)):
heapq.heappush(h, Posting(did, tf, self.getPage(did), self.getDate(did)))
return [heapq.heappop(h) for _ in range(len(h))]
def getDF(self, term: str) -> int:
"""Return the document frequency for a given term."""
r = self.connection.execute("""
SELECT df from dfs WHERE term = ?
""", (term,))
return int(r.fetchone()[0])
def getPage(self, did: int) -> int:
"""Return the page of a document"""
return self.connection.execute("SELECT page FROM boost WHERE did = ?", (did,)).fetchone()[0]
def getDate(self, did: int) -> int:
"""Return the date of a document"""
return self.connection.execute("SELECT date FROM boost WHERE did = ?", (did,)).fetchone()[0]
def getSize(self) -> int:
"""Return the size of the document collection"""
r = self.connection.execute("""
SELECT size from d
""")
return int(r.fetchone()[0])
def getLength(self, did: int) -> int:
"""Return the length (number of term occurances) for a document identifier."""
r = self.connection.execute("""
SELECT len from dls WHERE did=:did
""", (did,))
return int(r.fetchone()[0])
def create_indices(connection: DBConnection):
print("\n[-] creating index tfs_idx", end="")
connection.execute("""
CREATE INDEX tfs_idx ON tfs(term, did)
""")
print("\r[+] creating index tfs_idx")
print("\n[-] creating index docs_idx", end="")
connection.execute("""
CREATE INDEX docs_idx ON docs(did)
""")
print("\r[+] creating index docs_idx")
print("\n[-] creating index dfs_idx", end="")
connection.execute("""
CREATE INDEX dfs_idx ON dfs(term, df)
""")
print("\r[+] creating index dfs_idx")
print("\n[-] creating index dls_idx", end="")
connection.execute("""
CREATE INDEX dls_idx ON dls(did, len)
""")
print("\r[+] creating index dls_idx")
print("\n[-] creating index boost_idx", end="")
connection.execute("""
CREATE INDEX boost_idx ON boost(did, date, page)
""")
print("\r[+] creating index boost_idx")
|
pplewka/zwo
|
src/constants.py
|
<gh_stars>1-10
TUNABLE_WEIGHT_TITLE = 3
TUNABLE_WEIGHT_CONTENT = 1
TUNABLE_WEIGHT_ABSTRACT = 2
TUNABLE_WEIGHT_DATE = 0.7
TUNABLE_WEIGHT_PAGE = 0.9
ADDITIVE_CONSTANT_DATE_BOOST = .5
|
pplewka/zwo
|
src/__main__.py
|
import os
import time
from posting_list import InvertedIndex, create_indices
from query_processing import QueryProcessor
from importer import Importer
from db import *
def parse_dir(directory: str) -> None:
"""Takes a directory path and parses all Documents inside this path, writing the results to a file."""
docs = Importer.import_dir(directory)
with open_db() as connection:
insert_documents(connection, docs)
insert_tfs(connection, docs)
insert_boost(connection, docs)
compute_statistics(connection)
if __name__ == "__main__":
print(r'''
.-. .-. .-. .-.
| \ / | | /\ |
/, ,_ `'-. .-'` _ˎ ˎ\
.-|\ /`\ '. .' /`\ /|-.
.' 0/ | 0\ \_ `". ."` _/ /0 | |0 '.
.-' _,/ '--'.'|#''---' '---''#|'.'--' \ˎ_ '-.____________________________________
`--' | / \# #/: \ | '--` | |
| / \# #/. \ | | HAPPY SEARCHING |
\ ;|\ .\# #/ ./|; / / |
|' ' // \ ::\# #/ ::/ \ ' '| /_____________________________________|
\ /` \ ':\# #/ ':/ `\ /
`"` \.. \# #/ ../ `"`
/| \::. \# #/. ./
______________________________________________________/ | \:: \# #/:: /
| | \' .:\# #/:::: /
| | \ :::\# #/::: /
| WELCOME TO ZWO | \ '::\# #/::' /
| - A Search Engine for the New York Times Corpus 2000 | \ \##/ /
| | \ /
|________________________________________________________| | |''')
if os.path.isfile("nyt.sqlite"):
with open_db() as connection:
query = input("\nSearch: ")
processor = QueryProcessor(connection)
time_stamp = time.time()
accumulators = processor.process(query, k=10)
elapsed_time = time.time() - time_stamp
print(f'\nHere are the Top-10 results for {query}')
print(f'Found in {round(elapsed_time, 2)} seconds.\n')
for i, acc in enumerate(accumulators):
print(f'{i+1}.\tscore: {int(acc.score)}\turl: {get_url(connection, acc.did)}\ttitle: {get_headline(connection, acc.did)}')
else:
create_db()
dir = input("Please tell me the path to the diretory of the nyt corpus: ")
parse_dir(dir)
with open_db() as connection:
create_indices(connection)
|
pplewka/zwo
|
src/importer.py
|
import os
from pathlib import Path
from typing import List
from parser import Parser, Document
class Importer:
"""Importer class used for finding all xml files in a given directory"""
@staticmethod
def import_dir(path: str) -> List[Document]:
"""
Searches for all xml files in a directory and all its subdirectories,
prints them with their size and returns a list with their paths
:param path: a directory
:return: a list with all paths of xml files
"""
results: List[Document] = []
for dirname, _, files in os.walk(path):
for f in files:
p = Path(dirname).joinpath(f)
if p.suffix == ".xml":
print(f"File: {p} with size {p.stat().st_size} bytes")
results.append(Parser.parse(p)) # we don't insert them right now, we insert them in chunks later
return results
|
Gerrystev/efficientnet-simple-baseline
|
pose_estimation/output/mpii/pose_efficient_50/256x256_d256x3_adam_lr1e-3/pose_efficient.py
|
<reponame>Gerrystev/efficientnet-simple-baseline<filename>pose_estimation/output/mpii/pose_efficient_50/256x256_d256x3_adam_lr1e-3/pose_efficient.py<gh_stars>0
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import torch
import torch.nn as nn
from collections import OrderedDict
from math import ceil
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
base_model = [
# expand_ratio, channels, repeats, stride, kernel_size
[1, 16, 1, 1, 3],
[6, 24, 2, 2, 3],
[6, 40, 2, 2, 5],
[6, 80, 3, 2, 3],
[6, 112, 3, 1, 5],
[6, 192, 4, 2, 5],
[6, 320, 1, 1, 3],
]
phi_values = {
# tuple of: (phi_value, resolution, drop_rate)
"b0": (0, 224, 0.2), # alpha, beta, gamma, depth = alpha ** phi
"b1": (0.5, 240, 0.2),
"b2": (1, 260, 0.3),
"b3": (2, 300, 0.3),
"b4": (3, 380, 0.4),
"b5": (4, 456, 0.4),
"b6": (5, 528, 0.5),
"b7": (6, 600, 0.5),
}
class CNNBlock(nn.Module):
def __init__(
self, in_channels, out_channels, kernel_size, stride, padding, groups=1
):
super(CNNBlock, self).__init__()
self.cnn = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups=groups,
bias=False,
)
self.bn = nn.BatchNorm2d(out_channels)
self.silu = nn.SiLU() # SiLU <-> Swish
def forward(self, x):
return self.silu(self.bn(self.cnn(x)))
class SqueezeExcitation(nn.Module):
def __init__(self, in_channels, reduced_dim):
super(SqueezeExcitation, self).__init__()
self.se = nn.Sequential(
nn.AdaptiveAvgPool2d(1), # C x H x W -> C x 1 x 1
nn.Conv2d(in_channels, reduced_dim, 1),
nn.SiLU(),
nn.Conv2d(reduced_dim, in_channels, 1),
nn.Sigmoid(),
)
def forward(self, x):
return x * self.se(x)
class InvertedResidualBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
expand_ratio,
reduction=4, # squeeze excitation
survival_prob=0.8, # for stochastic depth
):
super(InvertedResidualBlock, self).__init__()
self.survival_prob = 0.8
self.use_residual = in_channels == out_channels and stride == 1
hidden_dim = in_channels * expand_ratio
self.expand = in_channels != hidden_dim
reduced_dim = int(in_channels / reduction)
if self.expand:
self.expand_conv = CNNBlock(
in_channels, hidden_dim, kernel_size=3, stride=1, padding=1,
)
self.conv = nn.Sequential(
CNNBlock(
hidden_dim, hidden_dim, kernel_size, stride, padding, groups=hidden_dim,
),
SqueezeExcitation(hidden_dim, reduced_dim),
nn.Conv2d(hidden_dim, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
)
def stochastic_depth(self, x):
if not self.training:
return x
binary_tensor = torch.rand(x.shape[0], 1, 1, 1, device=x.device) < self.survival_prob
return torch.div(x, self.survival_prob) * binary_tensor
def forward(self, inputs):
x = self.expand_conv(inputs) if self.expand else inputs
if self.use_residual:
return self.stochastic_depth(self.conv(x)) + inputs
else:
return self.conv(x)
class PoseEfficientNet(nn.Module):
def __init__(self, version, cfg, **kwargs):
# properties for deconv layer
self.inplanes = 64
extra = cfg.MODEL.EXTRA
self.deconv_with_bias = extra.DECONV_WITH_BIAS
super(PoseEfficientNet, self).__init__()
width_factor, depth_factor, dropout_rate = self.calculate_factors(version)
last_channels = ceil(1280 * width_factor)
self.pool = nn.AdaptiveAvgPool2d(1)
self.features = self.create_features(width_factor, depth_factor, last_channels)
# used for deconv layers
self.deconv_layers = self._make_deconv_layer(
extra.NUM_DECONV_LAYERS,
extra.NUM_DECONV_FILTERS,
extra.NUM_DECONV_KERNELS,
)
self.final_layer = nn.Conv2d(
in_channels=extra.NUM_DECONV_FILTERS[-1],
out_channels=cfg.MODEL.NUM_JOINTS,
kernel_size=extra.FINAL_CONV_KERNEL,
stride=1,
padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0
)
def calculate_factors(self, version, alpha=1.2, beta=1.1):
phi, res, drop_rate = phi_values[version]
depth_factor = alpha ** phi
width_factor = beta ** phi
return width_factor, depth_factor, drop_rate
def create_features(self, width_factor, depth_factor, last_channels):
channels = int(32 * width_factor)
features = [CNNBlock(3, channels, 3, stride=2, padding=1)]
in_channels = channels
for expand_ratio, channels, repeats, stride, kernel_size in base_model:
out_channels = 4*ceil(int(channels*width_factor) / 4)
layers_repeats = ceil(repeats * depth_factor)
for layer in range(layers_repeats):
features.append(
InvertedResidualBlock(
in_channels,
out_channels,
expand_ratio=expand_ratio,
stride = stride if layer == 0 else 1,
kernel_size=kernel_size,
padding=kernel_size//2, # if k=1:pad=0, k=3:pad=1, k=5:pad=2
)
)
in_channels = out_channels
self.inplanes = last_channels
features.append(
CNNBlock(in_channels, last_channels, kernel_size=1, stride=1, padding=0)
)
return nn.Sequential(*features)
def forward(self, x):
x = self.features(x)
x = self.deconv_layers(x)
x = self.final_layer(x)
return x
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
layers.append(
nn.ConvTranspose2d(
in_channels=self.inplanes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias))
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def init_weights(self, pretrained=''):
os.chdir("..")
print(os.getcwd())
if os.path.isfile(pretrained):
logger.info('=> init deconv weights from normal distribution')
for name, m in self.deconv_layers.named_modules():
if isinstance(m, nn.ConvTranspose2d):
logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
logger.info('=> init {}.weight as 1'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
logger.info('=> init final conv weights from normal distribution')
for m in self.final_layer.modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
# pretrained_state_dict = torch.load(pretrained)
logger.info('=> loading pretrained model {}'.format(pretrained))
# self.load_state_dict(pretrained_state_dict, strict=False)
checkpoint = torch.load(pretrained)
if isinstance(checkpoint, OrderedDict):
state_dict = checkpoint
elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict_old = checkpoint['state_dict']
state_dict = OrderedDict()
# delete 'module.' because it is saved from DataParallel module
for key in state_dict_old.keys():
if key.startswith('module.'):
# state_dict[key[7:]] = state_dict[key]
# state_dict.pop(key)
state_dict[key[7:]] = state_dict_old[key]
else:
state_dict[key] = state_dict_old[key]
else:
raise RuntimeError(
'No state_dict found in checkpoint file {}'.format(pretrained))
self.load_state_dict(state_dict, strict=False)
else:
logger.error('=> imagenet pretrained model dose not exist')
logger.error('=> please download it first')
raise ValueError('imagenet pretrained model does not exist')
def get_pose_net(cfg, is_train, **kwargs):
version = 'b0'
model = PoseEfficientNet(version, cfg, **kwargs)
# if is_train and cfg.MODEL.INIT_WEIGHTS:
# model.init_weights(cfg.MODEL.PRETRAINED)
return model
|
Gerrystev/efficientnet-simple-baseline
|
pose_estimation/detect.py
|
<reponame>Gerrystev/efficientnet-simple-baseline
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import pprint
import cv2
import numpy as np
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import _init_paths
from core.config import config
from core.config import update_config
from core.inference import get_final_preds
from utils.utils import create_logger
from utils.transforms import get_affine_transform
from utils.transforms import flip_back
import models
def _box2cs(box, image_width, image_height):
x, y, w, h = box[:4]
return _xywh2cs(x, y, w, h, image_width, image_height)
def _xywh2cs(x, y, w, h, image_width, image_height):
center = np.zeros((2), dtype=np.float32)
center[0] = x + w * 0.5
center[1] = y + h * 0.5
aspect_ratio = image_width * 1.0 / image_height
pixel_std = 200
if w > aspect_ratio * h:
h = w * 1.0 / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
scale = np.array(
[w * 1.0 / pixel_std, h * 1.0 / pixel_std],
dtype=np.float32)
if center[0] != -1:
scale = scale * 1.25
return center, scale
def draw_skeleton(preds, img):
pred = preds[0,:, 0:2] + 1.0
pred = np.round(pred).astype(int)
# get keypoint (15 keypoints)
pelvis_point = tuple(pred[6])
thorax_point = tuple(pred[7])
left_shoulder_point = tuple(pred[13])
left_elbow_point = tuple(pred[14])
left_wrist_point = tuple(pred[15])
right_shoulder_point = tuple(pred[12])
right_elbow_point = tuple(pred[11])
right_wrist_point = tuple(pred[10])
left_hip_point = tuple(pred[3])
left_knee_point = tuple(pred[4])
left_ankle_point = tuple(pred[5])
right_hip_point = tuple(pred[2])
right_knee_point = tuple(pred[1])
right_ankle_point = tuple(pred[0])
head_point = tuple(pred[9])
# draw line to make a skeleton
# color (argument 4 is BGR)
# thickness in px
thickness = 5
img_skel = cv2.line(img, pelvis_point, thorax_point, (203, 192, 255), thickness)
img_skel = cv2.line(img_skel, thorax_point, left_shoulder_point, (0, 165, 255), thickness)
img_skel = cv2.line(img_skel, left_shoulder_point, left_elbow_point, (128, 0, 128), thickness)
img_skel = cv2.line(img_skel, left_elbow_point, left_wrist_point, (0, 75, 150), thickness)
img_skel = cv2.line(img_skel, thorax_point, right_shoulder_point, (0, 255, 255), thickness)
img_skel = cv2.line(img_skel, right_shoulder_point, right_elbow_point, (0, 255, 0), thickness)
img_skel = cv2.line(img_skel, right_elbow_point, right_wrist_point, (0, 0, 255), thickness)
img_skel = cv2.line(img_skel, pelvis_point, left_hip_point, (33, 0, 133), thickness)
img_skel = cv2.line(img_skel, left_hip_point, left_knee_point, (0, 76, 255), thickness)
img_skel = cv2.line(img_skel, left_knee_point, left_ankle_point, (0, 255, 0), thickness)
img_skel = cv2.line(img_skel, pelvis_point, right_hip_point, (248, 0, 252), thickness)
img_skel = cv2.line(img_skel, right_hip_point, right_knee_point, (0, 196, 92), thickness)
img_skel = cv2.line(img_skel, right_knee_point, right_ankle_point, (0, 238, 255), thickness)
img_skel = cv2.line(img_skel, head_point, thorax_point, (255, 0, 0), thickness)
cv2.imwrite('predictions.jpg', img_skel)
def detect_cv2(model, imgfile, flip_pairs):
img = cv2.imread(imgfile)
data_numpy = cv2.imread(imgfile, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
if data_numpy is None:
print('=> fail to read {}'.format(imgfile))
raise ValueError('Fail to read {}'.format(imgfile))
c = np.array([data_numpy.shape[1] / 2.0, data_numpy.shape[0] / 2.0], dtype='float32')
max_wh = max([data_numpy.shape[1] / 200.0, data_numpy.shape[0] / 200.0])
s = np.array([max_wh, max_wh], dtype='float32')
r = 0
# c = np.array([img.shape[0]/2.0, img.shape[1]/2.0], dtype='float32')
# s = np.array([img.shape[1]/200.0, img.shape[1]/200.0], dtype='float32')
# r = 0
trans = get_affine_transform(c, s, r, config.MODEL.IMAGE_SIZE)
input = cv2.warpAffine(
data_numpy,
trans,
(int(config.MODEL.IMAGE_SIZE[0]), int(config.MODEL.IMAGE_SIZE[1])),
flags=cv2.INTER_LINEAR)
# vis transformed image
cv2.imshow('image', input)
cv2.waitKey(0)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
input = transform(input).unsqueeze(0)
# switch to evaluate mode
model.eval()
with torch.no_grad():
# compute output heatmap
output = model(input)
# compute coordinate
preds, maxvals = get_final_preds(
config, output.clone().cpu().numpy(), np.asarray([c]), np.asarray([s]))
# plot
image = data_numpy.copy()
for mat in preds[0]:
x, y = int(mat[0]), int(mat[1])
cv2.circle(image, (x, y), 2, (255, 0, 0), 2)
# vis result
cv2.imshow('res', image)
cv2.waitKey(0)
# # ===================================
# convert_tensor = transforms.ToTensor()
# input = convert_tensor(img_rgb)
#
# # add one more dimension for tensor input
# input = input[None, :, :, :]
# with torch.no_grad():
# output = model(input)
#
# # this part is ugly, because pytorch has not supported negative index
# # input_flipped = model(input[:, :, :, ::-1])
# input_flipped = np.flip(input.cpu().numpy(), 3).copy()
# input_flipped = torch.from_numpy(input_flipped).cuda()
# output_flipped = model(input_flipped)
# output_flipped = flip_back(output_flipped.cpu().numpy(),
# flip_pairs)
# output_flipped = torch.from_numpy(output_flipped.copy()).cuda()
#
# # feature is not aligned, shift flipped heatmap for higher accuracy
# if config.TEST.SHIFT_HEATMAP:
# output_flipped[:, :, :, 1:] = \
# output_flipped.clone()[:, :, :, 0:-1]
# # output_flipped[:, :, :, 0] = 0
#
# output = (output + output_flipped) * 0.5
#
# c = np.array([[img.shape[0]/2.0, img.shape[1]/2.0]], dtype='float32')
# s = np.array([[img.shape[1]/200.0, img.shape[1]/200.0]], dtype='float32')
#
# preds, maxvals = get_final_preds(
# config, output.clone().cpu().numpy(), c, s)
draw_skeleton(preds, img)
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
args, rest = parser.parse_known_args()
# update config
update_config(args.cfg)
# training
parser.add_argument('--frequent',
help='frequency of logging',
default=config.PRINT_FREQ,
type=int)
parser.add_argument('--gpus',
help='gpus',
type=str)
parser.add_argument('--workers',
help='num of dataloader workers',
type=int)
parser.add_argument('--model-file',
help='model state file',
type=str)
parser.add_argument('--use-detect-bbox',
help='use detect bbox',
action='store_true')
parser.add_argument('--flip-test',
help='use flip test',
action='store_true')
parser.add_argument('--post-process',
help='use post process',
action='store_true')
parser.add_argument('--shift-heatmap',
help='shift heatmap',
action='store_true')
parser.add_argument('--coco-bbox-file',
help='coco detection bbox file',
type=str)
parser.add_argument('--imgfile',
help='cropped person image file',
type=str)
args = parser.parse_args()
return args
def reset_config(config, args):
if args.gpus:
config.GPUS = args.gpus
if args.workers:
config.WORKERS = args.workers
if args.use_detect_bbox:
config.TEST.USE_GT_BBOX = not args.use_detect_bbox
if args.flip_test:
config.TEST.FLIP_TEST = args.flip_test
if args.post_process:
config.TEST.POST_PROCESS = args.post_process
if args.shift_heatmap:
config.TEST.SHIFT_HEATMAP = args.shift_heatmap
if args.model_file:
config.TEST.MODEL_FILE = args.model_file
if args.coco_bbox_file:
config.TEST.COCO_BBOX_FILE = args.coco_bbox_file
def main():
args = parse_args()
reset_config(config, args)
logger, final_output_dir, tb_log_dir = create_logger(
config, args.cfg, 'valid')
logger.info(pprint.pformat(args))
logger.info(pprint.pformat(config))
# cudnn related setting
cudnn.benchmark = config.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = config.CUDNN.ENABLED
model = eval('models.'+config.MODEL.NAME+'.get_pose_net')(
config, is_train=False
)
gpus = [int(i) for i in config.GPUS.split(',')]
model = torch.nn.DataParallel(model, device_ids=gpus).cuda()
if config.TEST.MODEL_FILE:
logger.info('=> loading model from {}'.format(config.TEST.MODEL_FILE))
model.load_state_dict(torch.load(config.TEST.MODEL_FILE))
else:
model_state_file = os.path.join(final_output_dir,
'final_state.pth.tar')
logger.info('=> loading model from {}'.format(model_state_file))
model.load_state_dict(torch.load(model_state_file))
gpus = [int(i) for i in config.GPUS.split(',')]
model = torch.nn.DataParallel(model, device_ids=gpus).cuda()
# get flip_pairs from dataset.___.flip_pairs
# in this case took flip_pairs from mpii
flip_pairs = [[0, 5], [1, 4], [2, 3], [10, 15], [11, 14], [12, 13]]
detect_cv2(model, args.imgfile, flip_pairs)
if __name__ == '__main__':
main()
|
davincif/Amila-RTS
|
world_map/wmap.py
|
import pygame
from world_map.camp import Camp
from fuzy_prob.prob import Prob
from fuzy_prob import prob
class Wmap():
preprint = None
__map_x_size = 10
__map_y_size = 10
__is_generated = False
__matrix = [] #world mape matriz
def __init__(self):
pass
def init(self):
print(self.preprint + 'wmap from world_map...')
def quit(self):
print(self.preprint + 'wmap from world_map...')
def def_map_xy(self, mapx, mapy):
# argument type check
if(self.__is_generated):
raise Exception('map already generated')
elif(type(mapx) != int or type(mapy) != int):
raise TypeError('arguments just be int, but the are: ' + str(type(mapx)) + str(type(mapy)) )
elif(mapx <= 0 or mapy <= 0):
raise Exception('map size should be positive')
self.__map_x_size = mapx
self.__map_y_size = mapy
def generate_map(self):
if(self.__is_generated):
raise Exception('map is already generated')
self.__is_generated = True
print('generating world map...', end='')
for ix in range(self.__map_x_size):
line = []
for iy in range(self.__map_y_size):
coin = prob.flit_a_coin()
line.append(coin)
self.__matrix.append(line)
print(' done', end='\n\n')
world_map = Wmap();
if __name__ == '__main__':
Wmap.generate_map()
|
davincif/Amila-RTS
|
world_map/bioma_types.py
|
<reponame>davincif/Amila-RTS
from enum import Enum
class BiomaType():
HOT_DISERT = 1
COLD_DISERT = 2
HILLY = 3
PLAINS = 4
FLOREST = 5
SWAMPY = 6
ARID = 7
TUNDRA = 8
PRAIRIE = 9
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.